diff options
Diffstat (limited to 'drivers/gpu')
969 files changed, 20383 insertions, 22783 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 19fc4ce62a94..1461652921be 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -15,6 +15,9 @@ menuconfig DRM select I2C_ALGOBIT select DMA_SHARED_BUFFER select SYNC_FILE +# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate +# device and dmabuf fd. Let's make sure that is available for our userspace. + select KCMP help Kernel-level support for the Direct Rendering Infrastructure (DRI) introduced in XFree86 4.0. If you say Y here, you need to select @@ -349,8 +352,6 @@ source "drivers/gpu/drm/vc4/Kconfig" source "drivers/gpu/drm/etnaviv/Kconfig" -source "drivers/gpu/drm/arc/Kconfig" - source "drivers/gpu/drm/hisilicon/Kconfig" source "drivers/gpu/drm/mediatek/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 926adef289db..5eb5bf7c16e3 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -44,7 +44,8 @@ drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ drm_simple_kms_helper.o drm_modeset_helper.o \ - drm_scdc_helper.o drm_gem_framebuffer_helper.o \ + drm_scdc_helper.o drm_gem_atomic_helper.o \ + drm_gem_framebuffer_helper.o \ drm_atomic_state_helper.o drm_damage_helper.o \ drm_format_helper.o drm_self_refresh_helper.o @@ -110,7 +111,6 @@ obj-y += panel/ obj-y += bridge/ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/ -obj-$(CONFIG_DRM_ARCPGU)+= arc/ obj-y += hisilicon/ obj-$(CONFIG_DRM_ZTE) += zte/ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index efe6b5ca5185..a037c223c251 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1090,7 +1090,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) return &adev->ddev; } -static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) +static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) { return container_of(bdev, struct amdgpu_device, mman.bdev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c index 3107b9575929..5af464933976 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c @@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0); * All the BOs in a process share an eviction fence. When process X wants * to map VRAM memory but TTM can't find enough space, TTM will attempt to * evict BOs from its LRU list. TTM checks if the BO is valuable to evict - * by calling ttm_bo_driver->eviction_valuable(). + * by calling ttm_device_funcs->eviction_valuable(). * - * ttm_bo_driver->eviction_valuable() - will return false if the BO belongs + * ttm_device_funcs->eviction_valuable() - will return false if the BO belongs * to process X. Otherwise, it will return true to indicate BO can be * evicted by TTM. * - * If ttm_bo_driver->eviction_valuable returns true, then TTM will continue + * If ttm_device_funcs->eviction_valuable returns true, then TTM will continue * the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move * --> amdgpu_copy_buffer(). This sets up job in GPU scheduler. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index ab516a1a54cd..e93850f2f3b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -120,6 +120,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) */ #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14) +static size_t amdgpu_amdkfd_acc_size(uint64_t size) +{ + size >>= PAGE_SHIFT; + size *= sizeof(dma_addr_t) + sizeof(void *); + + return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) + + __roundup_pow_of_two(sizeof(struct ttm_tt)) + + PAGE_ALIGN(size); +} + static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 domain, bool sg) { @@ -128,8 +138,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; int ret = 0; - acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, - sizeof(struct amdgpu_bo)); + acc_size = amdgpu_amdkfd_acc_size(size); vram_needed = 0; if (domain == AMDGPU_GEM_DOMAIN_GTT) { @@ -176,8 +185,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev, { size_t acc_size; - acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, - sizeof(struct amdgpu_bo)); + acc_size = amdgpu_amdkfd_acc_size(size); spin_lock(&kfd_mem_limit.mem_limit_lock); if (domain == AMDGPU_GEM_DOMAIN_GTT) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index a5e98d0142d7..27b19503773b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -295,7 +295,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } @@ -308,7 +308,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 6d16f58ac91e..1a4809d9e850 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -485,7 +485,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, num_hw_submission, amdgpu_job_hang_limit, - timeout, ring->name); + timeout, NULL, ring->name); if (r) { DRM_ERROR("Failed to create scheduler on ring %s.\n", ring->name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index f1ede4b43d07..5807cad833d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -71,7 +71,7 @@ */ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) { - struct page *dummy_page = ttm_bo_glob.dummy_read_page; + struct page *dummy_page = ttm_glob.dummy_read_page; if (adev->dummy_page_addr) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index ff48101bab55..759b34799221 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,7 +28,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_timedout(struct drm_sched_job *s_job) +static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); @@ -41,7 +41,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", s_job->sched->name); - return; + return DRM_GPU_SCHED_STAT_NOMINAL; } amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); @@ -53,10 +53,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) if (amdgpu_device_should_recover_gpu(ring->adev)) { amdgpu_device_gpu_recover(ring->adev, job); + return DRM_GPU_SCHED_STAT_NOMINAL; } else { drm_sched_suspend_timeout(&ring->sched); if (amdgpu_sriov_vf(adev)) adev->virt.tdr_debug = true; + return DRM_GPU_SCHED_STAT_NOMINAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index de52a99916f8..ac1bb5089260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -523,7 +523,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; - size_t acc_size; int r; /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ @@ -546,9 +545,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, - sizeof(struct amdgpu_bo)); - bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -577,8 +573,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, bo->tbo.priority = 1; r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, - &bo->placement, page_align, &ctx, acc_size, - NULL, bp->resv, &amdgpu_bo_destroy); + &bo->placement, page_align, &ctx, NULL, + bp->resv, &amdgpu_bo_destroy); if (unlikely(r != 0)) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b51d0100532f..1c6131489a85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -61,10 +61,10 @@ #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 -static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); -static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, @@ -570,7 +570,7 @@ out: * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() */ -static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) +static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct drm_mm_node *mm_node = mem->mm_node; @@ -818,7 +818,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) * * Called by amdgpu_ttm_backend_bind() **/ -static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, +static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); @@ -856,7 +856,7 @@ release_sg: /* * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages */ -static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, +static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); @@ -940,7 +940,7 @@ gart_bind_fail: * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * This handles binding GTT memory to the device address space. */ -static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, +static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem) { @@ -1080,7 +1080,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * ttm_tt_destroy(). */ -static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, +static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); @@ -1105,7 +1105,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, gtt->bound = false; } -static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, +static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1159,7 +1159,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, * Map the pages of a ttm_tt object to an address space visible * to the underlying device. */ -static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, +static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { @@ -1203,7 +1203,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it. */ -static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, +static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1523,7 +1523,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) amdgpu_bo_move_notify(bo, false, NULL); } -static struct ttm_bo_driver amdgpu_bo_driver = { +static struct ttm_device_funcs amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, @@ -1705,7 +1705,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, + r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->vma_offset_manager, adev->need_swiotlb, @@ -1853,7 +1853,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); - ttm_bo_device_release(&adev->mman.bdev); + ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; DRM_INFO("amdgpu: ttm finalized\n"); } @@ -1929,7 +1929,7 @@ unlock: return ret; } -static struct vm_operations_struct amdgpu_ttm_vm_ops = { +static const struct vm_operations_struct amdgpu_ttm_vm_ops = { .fault = amdgpu_ttm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e62992797557..dec0db8b0b13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -60,7 +60,7 @@ struct amdgpu_gtt_mgr { }; struct amdgpu_mman { - struct ttm_bo_device bdev; + struct ttm_device bdev; bool initialized; void __iomem *aper_base_kaddr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2b0f1445be65..e8cafc97eada 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm_bo_base *bo_base; if (vm->bulk_moveable) { - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); return; } memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); list_for_each_entry(bo_base, &vm->idle, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, &bo->shadow->tbo.mem, &vm->lru_bulk_move); } - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); vm->bulk_moveable = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 7944781e1086..ea825b4f8ee8 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1862,7 +1862,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1981,8 +1980,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 1b6ff0470011..a360a6dec198 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1904,7 +1904,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -2023,8 +2022,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 83a88385b762..ef124ac853b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1820,7 +1820,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1929,8 +1928,8 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 224b30214427..c98650183828 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1791,7 +1791,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1902,8 +1901,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ddfdfaf44439..00edf78975b1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -990,8 +990,7 @@ static void event_mall_stutter(struct work_struct *work) else dm->active_vblank_irq_count--; - dc_allow_idle_optimizations( - dm->dc, dm->active_vblank_irq_count == 0); + dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); @@ -4689,7 +4688,6 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, const struct drm_framebuffer *fb = plane_state->fb; const struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane_state->fb); - struct drm_format_name_buf format_name; int ret; memset(plane_info, 0, sizeof(*plane_info)); @@ -4737,8 +4735,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, break; default: DRM_ERROR( - "Unsupported screen format %s\n", - drm_get_format_name(fb->format->format, &format_name)); + "Unsupported screen format %p4cc\n", + &fb->format->format); return -EINVAL; } @@ -6730,8 +6728,10 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state, } static int dm_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct amdgpu_device *adev = drm_to_adev(plane->dev); struct dc *dc = adev->dm.dc; struct dm_plane_state *dm_plane_state; @@ -6739,23 +6739,24 @@ static int dm_plane_atomic_check(struct drm_plane *plane, struct drm_crtc_state *new_crtc_state; int ret; - trace_amdgpu_dm_plane_atomic_check(state); + trace_amdgpu_dm_plane_atomic_check(new_plane_state); - dm_plane_state = to_dm_plane_state(state); + dm_plane_state = to_dm_plane_state(new_plane_state); if (!dm_plane_state->dc_state) return 0; new_crtc_state = - drm_atomic_get_new_crtc_state(state->state, state->crtc); + drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); if (!new_crtc_state) return -EINVAL; - ret = dm_plane_helper_check_state(state, new_crtc_state); + ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); if (ret) return ret; - ret = fill_dc_scaling_info(state, &scaling_info); + ret = fill_dc_scaling_info(new_plane_state, &scaling_info); if (ret) return ret; @@ -6766,7 +6767,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane, } static int dm_plane_atomic_async_check(struct drm_plane *plane, - struct drm_plane_state *new_plane_state) + struct drm_atomic_state *state) { /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) @@ -6776,10 +6777,12 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane, } static void dm_plane_atomic_async_update(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_plane_state *old_state = - drm_atomic_get_old_plane_state(new_state->state, plane); + drm_atomic_get_old_plane_state(state, plane); trace_amdgpu_dm_atomic_update_cursor(new_state); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 0fbdfff87835..622a5bf9737f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1621,12 +1621,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } +static void calculate_wm_set_for_vlevel( + int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 3908ad929176..6ee9dd833b85 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -187,10 +187,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .ack = NULL }; -static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = { - .set = NULL, - .ack = NULL -}; static const struct irq_source_info_funcs vline0_irq_info_funcs = { .set = NULL, diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig deleted file mode 100644 index e8f3d63e0b91..000000000000 --- a/drivers/gpu/drm/arc/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config DRM_ARCPGU - tristate "ARC PGU" - depends on DRM && OF - select DRM_KMS_CMA_HELPER - select DRM_KMS_HELPER - help - Choose this option if you have an ARC PGU controller. - - If M is selected the module will be called arcpgu. diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile deleted file mode 100644 index c7028b7427b3..000000000000 --- a/drivers/gpu/drm/arc/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o -obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h deleted file mode 100644 index 6aac44b953ad..000000000000 --- a/drivers/gpu/drm/arc/arcpgu.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * ARC PGU DRM driver. - * - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#ifndef _ARCPGU_H_ -#define _ARCPGU_H_ - -struct arcpgu_drm_private { - void __iomem *regs; - struct clk *clk; - struct drm_framebuffer *fb; - struct drm_crtc crtc; - struct drm_plane *plane; -}; - -#define crtc_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, crtc) - -static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu, - unsigned int reg, u32 value) -{ - iowrite32(value, arcpgu->regs + reg); -} - -static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu, - unsigned int reg) -{ - return ioread32(arcpgu->regs + reg); -} - -int arc_pgu_setup_crtc(struct drm_device *dev); -int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np); -int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np); - -#endif diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c deleted file mode 100644 index 895cdd991af6..000000000000 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ /dev/null @@ -1,217 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ARC PGU DRM driver. - * - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_device.h> -#include <drm/drm_fb_cma_helper.h> -#include <drm/drm_gem_cma_helper.h> -#include <drm/drm_plane_helper.h> -#include <drm/drm_probe_helper.h> -#include <linux/clk.h> -#include <linux/platform_data/simplefb.h> - -#include "arcpgu.h" -#include "arcpgu_regs.h" - -#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1)) - -static const u32 arc_pgu_supported_formats[] = { - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_ARGB8888, -}; - -static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc) -{ - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - const struct drm_framebuffer *fb = crtc->primary->state->fb; - uint32_t pixel_format = fb->format->format; - u32 format = DRM_FORMAT_INVALID; - int i; - u32 reg_ctrl; - - for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) { - if (arc_pgu_supported_formats[i] == pixel_format) - format = arc_pgu_supported_formats[i]; - } - - if (WARN_ON(format == DRM_FORMAT_INVALID)) - return; - - reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL); - if (format == DRM_FORMAT_RGB565) - reg_ctrl &= ~ARCPGU_MODE_XRGB8888; - else - reg_ctrl |= ARCPGU_MODE_XRGB8888; - arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl); -} - -static const struct drm_crtc_funcs arc_pgu_crtc_funcs = { - .destroy = drm_crtc_cleanup, - .set_config = drm_atomic_helper_set_config, - .page_flip = drm_atomic_helper_page_flip, - .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, -}; - -static enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc, - const struct drm_display_mode *mode) -{ - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - long rate, clk_rate = mode->clock * 1000; - long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */ - - rate = clk_round_rate(arcpgu->clk, clk_rate); - if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0)) - return MODE_OK; - - return MODE_NOCLOCK; -} - -static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc) -{ - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - struct drm_display_mode *m = &crtc->state->adjusted_mode; - u32 val; - - arc_pgu_write(arcpgu, ARCPGU_REG_FMT, - ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal)); - - arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC, - ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay, - m->crtc_hsync_end - m->crtc_hdisplay)); - - arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC, - ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay, - m->crtc_vsync_end - m->crtc_vdisplay)); - - arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE, - ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start, - m->crtc_vblank_end - m->crtc_vblank_start)); - - val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL); - - if (m->flags & DRM_MODE_FLAG_PVSYNC) - val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST; - else - val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST); - - if (m->flags & DRM_MODE_FLAG_PHSYNC) - val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST; - else - val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST); - - arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val); - arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0); - arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1); - - arc_pgu_set_pxl_fmt(crtc); - - clk_set_rate(arcpgu->clk, m->crtc_clock * 1000); -} - -static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - - clk_prepare_enable(arcpgu->clk); - arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, - arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) | - ARCPGU_CTRL_ENABLE_MASK); -} - -static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - - clk_disable_unprepare(arcpgu->clk); - arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, - arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) & - ~ARCPGU_CTRL_ENABLE_MASK); -} - -static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = { - .mode_valid = arc_pgu_crtc_mode_valid, - .mode_set_nofb = arc_pgu_crtc_mode_set_nofb, - .atomic_enable = arc_pgu_crtc_atomic_enable, - .atomic_disable = arc_pgu_crtc_atomic_disable, -}; - -static void arc_pgu_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct arcpgu_drm_private *arcpgu; - struct drm_gem_cma_object *gem; - - if (!plane->state->crtc || !plane->state->fb) - return; - - arcpgu = crtc_to_arcpgu_priv(plane->state->crtc); - gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); - arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr); -} - -static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = { - .atomic_update = arc_pgu_plane_atomic_update, -}; - -static const struct drm_plane_funcs arc_pgu_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, -}; - -static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm) -{ - struct arcpgu_drm_private *arcpgu = drm->dev_private; - struct drm_plane *plane = NULL; - int ret; - - plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); - if (!plane) - return ERR_PTR(-ENOMEM); - - ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs, - arc_pgu_supported_formats, - ARRAY_SIZE(arc_pgu_supported_formats), - NULL, - DRM_PLANE_TYPE_PRIMARY, NULL); - if (ret) - return ERR_PTR(ret); - - drm_plane_helper_add(plane, &arc_pgu_plane_helper_funcs); - arcpgu->plane = plane; - - return plane; -} - -int arc_pgu_setup_crtc(struct drm_device *drm) -{ - struct arcpgu_drm_private *arcpgu = drm->dev_private; - struct drm_plane *primary; - int ret; - - primary = arc_pgu_plane_init(drm); - if (IS_ERR(primary)) - return PTR_ERR(primary); - - ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL, - &arc_pgu_crtc_funcs, NULL); - if (ret) { - drm_plane_cleanup(primary); - return ret; - } - - drm_crtc_helper_add(&arcpgu->crtc, &arc_pgu_crtc_helper_funcs); - return 0; -} diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c deleted file mode 100644 index 077d006b1fbf..000000000000 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ /dev/null @@ -1,224 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ARC PGU DRM driver. - * - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#include <linux/clk.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_debugfs.h> -#include <drm/drm_device.h> -#include <drm/drm_drv.h> -#include <drm/drm_fb_cma_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_of.h> -#include <drm/drm_probe_helper.h> -#include <linux/dma-mapping.h> -#include <linux/module.h> -#include <linux/of_reserved_mem.h> -#include <linux/platform_device.h> - -#include "arcpgu.h" -#include "arcpgu_regs.h" - -static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { - .fb_create = drm_gem_fb_create, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, -}; - -static void arcpgu_setup_mode_config(struct drm_device *drm) -{ - drm_mode_config_init(drm); - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - drm->mode_config.max_width = 1920; - drm->mode_config.max_height = 1080; - drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs; -} - -DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops); - -static int arcpgu_load(struct drm_device *drm) -{ - struct platform_device *pdev = to_platform_device(drm->dev); - struct arcpgu_drm_private *arcpgu; - struct device_node *encoder_node = NULL, *endpoint_node = NULL; - struct resource *res; - int ret; - - arcpgu = devm_kzalloc(&pdev->dev, sizeof(*arcpgu), GFP_KERNEL); - if (arcpgu == NULL) - return -ENOMEM; - - drm->dev_private = arcpgu; - - arcpgu->clk = devm_clk_get(drm->dev, "pxlclk"); - if (IS_ERR(arcpgu->clk)) - return PTR_ERR(arcpgu->clk); - - arcpgu_setup_mode_config(drm); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - arcpgu->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(arcpgu->regs)) - return PTR_ERR(arcpgu->regs); - - dev_info(drm->dev, "arc_pgu ID: 0x%x\n", - arc_pgu_read(arcpgu, ARCPGU_REG_ID)); - - /* Get the optional framebuffer memory resource */ - ret = of_reserved_mem_device_init(drm->dev); - if (ret && ret != -ENODEV) - return ret; - - if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32))) - return -ENODEV; - - if (arc_pgu_setup_crtc(drm) < 0) - return -ENODEV; - - /* - * There is only one output port inside each device. It is linked with - * encoder endpoint. - */ - endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); - if (endpoint_node) { - encoder_node = of_graph_get_remote_port_parent(endpoint_node); - of_node_put(endpoint_node); - } - - if (encoder_node) { - ret = arcpgu_drm_hdmi_init(drm, encoder_node); - of_node_put(encoder_node); - if (ret < 0) - return ret; - } else { - dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n"); - ret = arcpgu_drm_sim_init(drm, NULL); - if (ret < 0) - return ret; - } - - drm_mode_config_reset(drm); - drm_kms_helper_poll_init(drm); - - platform_set_drvdata(pdev, drm); - return 0; -} - -static int arcpgu_unload(struct drm_device *drm) -{ - drm_kms_helper_poll_fini(drm); - drm_atomic_helper_shutdown(drm); - drm_mode_config_cleanup(drm); - - return 0; -} - -#ifdef CONFIG_DEBUG_FS -static int arcpgu_show_pxlclock(struct seq_file *m, void *arg) -{ - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *drm = node->minor->dev; - struct arcpgu_drm_private *arcpgu = drm->dev_private; - unsigned long clkrate = clk_get_rate(arcpgu->clk); - unsigned long mode_clock = arcpgu->crtc.mode.crtc_clock * 1000; - - seq_printf(m, "hw : %lu\n", clkrate); - seq_printf(m, "mode: %lu\n", mode_clock); - return 0; -} - -static struct drm_info_list arcpgu_debugfs_list[] = { - { "clocks", arcpgu_show_pxlclock, 0 }, -}; - -static void arcpgu_debugfs_init(struct drm_minor *minor) -{ - drm_debugfs_create_files(arcpgu_debugfs_list, - ARRAY_SIZE(arcpgu_debugfs_list), - minor->debugfs_root, minor); -} -#endif - -static const struct drm_driver arcpgu_drm_driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .name = "arcpgu", - .desc = "ARC PGU Controller", - .date = "20160219", - .major = 1, - .minor = 0, - .patchlevel = 0, - .fops = &arcpgu_drm_ops, - DRM_GEM_CMA_DRIVER_OPS, -#ifdef CONFIG_DEBUG_FS - .debugfs_init = arcpgu_debugfs_init, -#endif -}; - -static int arcpgu_probe(struct platform_device *pdev) -{ - struct drm_device *drm; - int ret; - - drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev); - if (IS_ERR(drm)) - return PTR_ERR(drm); - - ret = arcpgu_load(drm); - if (ret) - goto err_unref; - - ret = drm_dev_register(drm, 0); - if (ret) - goto err_unload; - - drm_fbdev_generic_setup(drm, 16); - - return 0; - -err_unload: - arcpgu_unload(drm); - -err_unref: - drm_dev_put(drm); - - return ret; -} - -static int arcpgu_remove(struct platform_device *pdev) -{ - struct drm_device *drm = platform_get_drvdata(pdev); - - drm_dev_unregister(drm); - arcpgu_unload(drm); - drm_dev_put(drm); - - return 0; -} - -static const struct of_device_id arcpgu_of_table[] = { - {.compatible = "snps,arcpgu"}, - {} -}; - -MODULE_DEVICE_TABLE(of, arcpgu_of_table); - -static struct platform_driver arcpgu_platform_driver = { - .probe = arcpgu_probe, - .remove = arcpgu_remove, - .driver = { - .name = "arcpgu", - .of_match_table = arcpgu_of_table, - }, -}; - -module_platform_driver(arcpgu_platform_driver); - -MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>"); -MODULE_DESCRIPTION("ARC PGU DRM driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c deleted file mode 100644 index 52839934f2fb..000000000000 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ARC PGU DRM driver. - * - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#include <drm/drm_bridge.h> -#include <drm/drm_crtc.h> -#include <drm/drm_encoder.h> -#include <drm/drm_device.h> - -#include "arcpgu.h" - -static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) -{ - struct drm_encoder *encoder; - struct drm_bridge *bridge; - - int ret = 0; - - encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); - if (encoder == NULL) - return -ENOMEM; - - /* Locate drm bridge from the hdmi encoder DT node */ - bridge = of_drm_find_bridge(np); - if (!bridge) - return -EPROBE_DEFER; - - encoder->possible_crtcs = 1; - encoder->possible_clones = 0; - ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - if (ret) - return ret; - - /* Link drm_bridge to encoder */ - ret = drm_bridge_attach(encoder, bridge, NULL, 0); - if (ret) - drm_encoder_cleanup(encoder); - - return ret; -} diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h deleted file mode 100644 index b689a382d556..000000000000 --- a/drivers/gpu/drm/arc/arcpgu_regs.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * ARC PGU DRM driver. - * - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#ifndef _ARC_PGU_REGS_H_ -#define _ARC_PGU_REGS_H_ - -#define ARCPGU_REG_CTRL 0x00 -#define ARCPGU_REG_STAT 0x04 -#define ARCPGU_REG_FMT 0x10 -#define ARCPGU_REG_HSYNC 0x14 -#define ARCPGU_REG_VSYNC 0x18 -#define ARCPGU_REG_ACTIVE 0x1c -#define ARCPGU_REG_BUF0_ADDR 0x40 -#define ARCPGU_REG_STRIDE 0x50 -#define ARCPGU_REG_START_SET 0x84 - -#define ARCPGU_REG_ID 0x3FC - -#define ARCPGU_CTRL_ENABLE_MASK 0x02 -#define ARCPGU_CTRL_VS_POL_MASK 0x1 -#define ARCPGU_CTRL_VS_POL_OFST 0x3 -#define ARCPGU_CTRL_HS_POL_MASK 0x1 -#define ARCPGU_CTRL_HS_POL_OFST 0x4 -#define ARCPGU_MODE_XRGB8888 BIT(2) -#define ARCPGU_STAT_BUSY_MASK 0x02 - -#endif diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c deleted file mode 100644 index 37d961668dfe..000000000000 --- a/drivers/gpu/drm/arc/arcpgu_sim.c +++ /dev/null @@ -1,108 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ARC PGU DRM driver. - * - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_device.h> -#include <drm/drm_probe_helper.h> - -#include "arcpgu.h" - -#define XRES_DEF 640 -#define YRES_DEF 480 - -#define XRES_MAX 8192 -#define YRES_MAX 8192 - - -struct arcpgu_drm_connector { - struct drm_connector connector; -}; - -static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) -{ - int count; - - count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); - drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); - return count; -} - -static void arcpgu_drm_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - -static const struct drm_connector_helper_funcs -arcpgu_drm_connector_helper_funcs = { - .get_modes = arcpgu_drm_connector_get_modes, -}; - -static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = arcpgu_drm_connector_destroy, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np) -{ - struct arcpgu_drm_connector *arcpgu_connector; - struct drm_encoder *encoder; - struct drm_connector *connector; - int ret; - - encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); - if (encoder == NULL) - return -ENOMEM; - - encoder->possible_crtcs = 1; - encoder->possible_clones = 0; - - ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, - DRM_MODE_ENCODER_VIRTUAL, NULL); - if (ret) - return ret; - - arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), - GFP_KERNEL); - if (!arcpgu_connector) { - ret = -ENOMEM; - goto error_encoder_cleanup; - } - - connector = &arcpgu_connector->connector; - drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); - - ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - if (ret < 0) { - dev_err(drm->dev, "failed to initialize drm connector\n"); - goto error_encoder_cleanup; - } - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret < 0) { - dev_err(drm->dev, "could not attach connector to encoder\n"); - drm_connector_unregister(connector); - goto error_connector_cleanup; - } - - return 0; - -error_connector_cleanup: - drm_connector_cleanup(connector); - -error_encoder_cleanup: - drm_encoder_cleanup(encoder); - return ret; -} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h index 32273cf18f7c..cf7a183f773d 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h @@ -82,17 +82,6 @@ struct komeda_format_caps_table { extern u64 komeda_supported_modifiers[]; -static inline const char *komeda_get_format_name(u32 fourcc, u64 modifier) -{ - struct drm_format_name_buf buf; - static char name[64]; - - snprintf(name, sizeof(name), "%s with modifier: 0x%llx.", - drm_get_format_name(fourcc, &buf), modifier); - - return name; -} - const struct komeda_format_caps * komeda_get_format_caps(struct komeda_format_caps_table *table, u32 fourcc, u64 modifier); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index 170f9dc8ec19..3c372d2deb0a 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -276,8 +276,8 @@ bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type, supported = komeda_format_mod_supported(&mdev->fmt_tbl, layer_type, fourcc, modifier, rot); if (!supported) - DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %s.\n", - layer_type, komeda_get_format_name(fourcc, modifier)); + DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %p4cc with modifier: 0x%llx.\n", + layer_type, &fourcc, modifier); return supported; } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 034ee08482e0..aeda4e5ec4f4 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -73,6 +73,7 @@ static const struct drm_driver komeda_kms_driver = { static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; + bool fence_cookie = dma_fence_begin_signalling(); drm_atomic_helper_commit_modeset_disables(dev, old_state); @@ -85,6 +86,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_wait_for_flip_done(dev, old_state); + dma_fence_end_signalling(fence_cookie); + drm_atomic_helper_cleanup_planes(dev, old_state); } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c index 98e915e325dd..d63d83800a8a 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c @@ -49,10 +49,8 @@ komeda_plane_init_data_flow(struct drm_plane_state *st, dflow->rot = drm_rotation_simplify(st->rotation, caps->supported_rots); if (!has_bits(dflow->rot, caps->supported_rots)) { - DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %s.\n", - dflow->rot, - komeda_get_format_name(caps->fourcc, - fb->modifier)); + DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %p4cc with modifier: 0x%llx.\n", + dflow->rot, &caps->fourcc, fb->modifier); return -EINVAL; } @@ -71,20 +69,23 @@ komeda_plane_init_data_flow(struct drm_plane_state *st, */ static int komeda_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct komeda_plane *kplane = to_kplane(plane); - struct komeda_plane_state *kplane_st = to_kplane_st(state); + struct komeda_plane_state *kplane_st = to_kplane_st(new_plane_state); struct komeda_layer *layer = kplane->layer; struct drm_crtc_state *crtc_st; struct komeda_crtc_state *kcrtc_st; struct komeda_data_flow_cfg dflow; int err; - if (!state->crtc || !state->fb) + if (!new_plane_state->crtc || !new_plane_state->fb) return 0; - crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_st = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); if (IS_ERR(crtc_st) || !crtc_st->enable) { DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n"); return -EINVAL; @@ -96,7 +97,7 @@ komeda_plane_atomic_check(struct drm_plane *plane, kcrtc_st = to_kcrtc_st(crtc_st); - err = komeda_plane_init_data_flow(state, kcrtc_st, &dflow); + err = komeda_plane_init_data_flow(new_plane_state, kcrtc_st, &dflow); if (err) return err; @@ -115,7 +116,7 @@ komeda_plane_atomic_check(struct drm_plane *plane, */ static void komeda_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { } diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index a3234bfb0917..7adb065169e9 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -229,12 +229,14 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { }; static int hdlcd_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); int i; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - u32 src_h = state->src_h >> 16; + u32 src_h = new_plane_state->src_h >> 16; /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */ if (src_h >= HDLCD_MAX_YRES) { @@ -242,23 +244,27 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) { + for_each_new_crtc_in_state(state, crtc, crtc_state, + i) { /* we cannot disable the plane while the CRTC is active */ - if (!state->fb && crtc_state->active) + if (!new_plane_state->fb && crtc_state->active) return -EINVAL; - return drm_atomic_helper_check_plane_state(state, crtc_state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - false, true); + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); } return 0; } static void hdlcd_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_framebuffer *fb = plane->state->fb; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; struct hdlcd_drm_private *hdlcd; u32 dest_h; dma_addr_t scanout_start; @@ -266,8 +272,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, if (!fb) return; - dest_h = drm_rect_height(&plane->state->dst); - scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0); + dest_h = drm_rect_height(&new_plane_state->dst); + scanout_start = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0); hdlcd = plane->dev->dev_private; hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index fceda010d65a..d83c7366b348 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -234,6 +234,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state) struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int i; + bool fence_cookie = dma_fence_begin_signalling(); pm_runtime_get_sync(drm->dev); @@ -260,6 +261,8 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state) malidp_atomic_commit_hw_done(state); + dma_fence_end_signalling(fence_cookie); + pm_runtime_put(drm->dev); drm_atomic_helper_cleanup_planes(drm, state); diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 7d0e7b031e44..f5847a79dd7e 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -151,11 +151,8 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE, fb->format->format, !!fb->modifier); if (mw_state->format == MALIDP_INVALID_FORMAT_ID) { - struct drm_format_name_buf format_name; - - DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(fb->format->format, - &format_name)); + DRM_DEBUG_KMS("Invalid pixel format %p4cc\n", + &fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 351a85088d0e..ddbba67f0283 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -502,20 +502,22 @@ static void malidp_de_prefetch_settings(struct malidp_plane *mp, } static int malidp_de_plane_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct malidp_plane *mp = to_malidp_plane(plane); - struct malidp_plane_state *ms = to_malidp_plane_state(state); - bool rotated = state->rotation & MALIDP_ROTATED_MASK; + struct malidp_plane_state *ms = to_malidp_plane_state(new_plane_state); + bool rotated = new_plane_state->rotation & MALIDP_ROTATED_MASK; struct drm_framebuffer *fb; - u16 pixel_alpha = state->pixel_blend_mode; + u16 pixel_alpha = new_plane_state->pixel_blend_mode; int i, ret; unsigned int block_w, block_h; - if (!state->crtc || WARN_ON(!state->fb)) + if (!new_plane_state->crtc || WARN_ON(!new_plane_state->fb)) return 0; - fb = state->fb; + fb = new_plane_state->fb; ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, mp->layer->id, fb->format->format, @@ -541,15 +543,15 @@ static int malidp_de_plane_check(struct drm_plane *plane, DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes"); return -EINVAL; } - if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) { + if ((new_plane_state->src_x >> 16) % block_w || (new_plane_state->src_y >> 16) % block_h) { DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes"); return -EINVAL; } - if ((state->crtc_w > mp->hwdev->max_line_size) || - (state->crtc_h > mp->hwdev->max_line_size) || - (state->crtc_w < mp->hwdev->min_line_size) || - (state->crtc_h < mp->hwdev->min_line_size)) + if ((new_plane_state->crtc_w > mp->hwdev->max_line_size) || + (new_plane_state->crtc_h > mp->hwdev->max_line_size) || + (new_plane_state->crtc_w < mp->hwdev->min_line_size) || + (new_plane_state->crtc_h < mp->hwdev->min_line_size)) return -EINVAL; /* @@ -559,15 +561,15 @@ static int malidp_de_plane_check(struct drm_plane *plane, */ if (ms->n_planes == 3 && !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && - (state->fb->pitches[1] != state->fb->pitches[2])) + (new_plane_state->fb->pitches[1] != new_plane_state->fb->pitches[2])) return -EINVAL; - ret = malidp_se_check_scaling(mp, state); + ret = malidp_se_check_scaling(mp, new_plane_state); if (ret) return ret; /* validate the rotation constraints for each layer */ - if (state->rotation != DRM_MODE_ROTATE_0) { + if (new_plane_state->rotation != DRM_MODE_ROTATE_0) { if (mp->layer->rot == ROTATE_NONE) return -EINVAL; if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier)) @@ -588,11 +590,11 @@ static int malidp_de_plane_check(struct drm_plane *plane, } ms->rotmem_size = 0; - if (state->rotation & MALIDP_ROTATED_MASK) { + if (new_plane_state->rotation & MALIDP_ROTATED_MASK) { int val; - val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w, - state->crtc_h, + val = mp->hwdev->hw->rotmem_required(mp->hwdev, new_plane_state->crtc_w, + new_plane_state->crtc_h, fb->format->format, !!(fb->modifier)); if (val < 0) @@ -602,7 +604,7 @@ static int malidp_de_plane_check(struct drm_plane *plane, } /* HW can't support plane + pixel blending */ - if ((state->alpha != DRM_BLEND_ALPHA_OPAQUE) && + if ((new_plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE) && (pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) && fb->format->has_alpha) return -EINVAL; @@ -789,13 +791,16 @@ static void malidp_de_set_plane_afbc(struct drm_plane *plane) } static void malidp_de_plane_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct malidp_plane *mp; struct malidp_plane_state *ms = to_malidp_plane_state(plane->state); - struct drm_plane_state *state = plane->state; - u16 pixel_alpha = state->pixel_blend_mode; - u8 plane_alpha = state->alpha >> 8; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + u16 pixel_alpha = new_state->pixel_blend_mode; + u8 plane_alpha = new_state->alpha >> 8; u32 src_w, src_h, dest_w, dest_h, val; int i; struct drm_framebuffer *fb = plane->state->fb; @@ -811,12 +816,12 @@ static void malidp_de_plane_update(struct drm_plane *plane, src_h = fb->height; } else { /* convert src values from Q16 fixed point to integer */ - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + src_w = new_state->src_w >> 16; + src_h = new_state->src_h >> 16; } - dest_w = state->crtc_w; - dest_h = state->crtc_h; + dest_w = new_state->crtc_w; + dest_h = new_state->crtc_h; val = malidp_hw_read(mp->hwdev, mp->layer->base); val = (val & ~LAYER_FORMAT_MASK) | ms->format; @@ -828,7 +833,7 @@ static void malidp_de_plane_update(struct drm_plane *plane, malidp_de_set_mmu_control(mp, ms); malidp_de_set_plane_pitches(mp, ms->n_planes, - state->fb->pitches); + new_state->fb->pitches); if ((plane->state->color_encoding != old_state->color_encoding) || (plane->state->color_range != old_state->color_range)) @@ -841,8 +846,8 @@ static void malidp_de_plane_update(struct drm_plane *plane, malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h), mp->layer->base + MALIDP_LAYER_COMP_SIZE); - malidp_hw_write(mp->hwdev, LAYER_H_VAL(state->crtc_x) | - LAYER_V_VAL(state->crtc_y), + malidp_hw_write(mp->hwdev, LAYER_H_VAL(new_state->crtc_x) | + LAYER_V_VAL(new_state->crtc_y), mp->layer->base + MALIDP_LAYER_OFFSET); if (mp->layer->id == DE_SMART) { @@ -864,19 +869,19 @@ static void malidp_de_plane_update(struct drm_plane *plane, val &= ~LAYER_ROT_MASK; /* setup the rotation and axis flip bits */ - if (state->rotation & DRM_MODE_ROTATE_MASK) + if (new_state->rotation & DRM_MODE_ROTATE_MASK) val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) << LAYER_ROT_OFFSET; - if (state->rotation & DRM_MODE_REFLECT_X) + if (new_state->rotation & DRM_MODE_REFLECT_X) val |= LAYER_H_FLIP; - if (state->rotation & DRM_MODE_REFLECT_Y) + if (new_state->rotation & DRM_MODE_REFLECT_Y) val |= LAYER_V_FLIP; val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff)); - if (state->alpha != DRM_BLEND_ALPHA_OPAQUE) { + if (new_state->alpha != DRM_BLEND_ALPHA_OPAQUE) { val |= LAYER_COMP_PLANE; - } else if (state->fb->format->has_alpha) { + } else if (new_state->fb->format->has_alpha) { /* We only care about blend mode if the format has alpha */ switch (pixel_alpha) { case DRM_MODE_BLEND_PREMULTI: @@ -890,9 +895,9 @@ static void malidp_de_plane_update(struct drm_plane *plane, val |= LAYER_ALPHA(plane_alpha); val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK); - if (state->crtc) { + if (new_state->crtc) { struct malidp_crtc_state *m = - to_malidp_crtc_state(state->crtc->state); + to_malidp_crtc_state(new_state->crtc->state); if (m->scaler_config.scale_enable && m->scaler_config.plane_src_id == mp->layer->id) @@ -907,7 +912,7 @@ static void malidp_de_plane_update(struct drm_plane *plane, } static void malidp_de_plane_disable(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { struct malidp_plane *mp = to_malidp_plane(plane); diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 6346b890279a..d3e3e5fdc390 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -66,9 +66,12 @@ static inline u32 armada_csc(struct drm_plane_state *state) /* === Plane support === */ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct armada_crtc *dcrtc; struct armada_regs *regs; unsigned int idx; @@ -76,62 +79,64 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane, DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name); - if (!state->fb || WARN_ON(!state->crtc)) + if (!new_state->fb || WARN_ON(!new_state->crtc)) return; DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n", plane->base.id, plane->name, - state->crtc->base.id, state->crtc->name, - state->fb->base.id, - old_state->visible, state->visible); + new_state->crtc->base.id, new_state->crtc->name, + new_state->fb->base.id, + old_state->visible, new_state->visible); - dcrtc = drm_to_armada_crtc(state->crtc); + dcrtc = drm_to_armada_crtc(new_state->crtc); regs = dcrtc->regs + dcrtc->regs_idx; idx = 0; - if (!old_state->visible && state->visible) + if (!old_state->visible && new_state->visible) armada_reg_queue_mod(regs, idx, 0, CFG_PDWN16x66 | CFG_PDWN32x66, LCD_SPU_SRAM_PARA1); - val = armada_src_hw(state); + val = armada_src_hw(new_state); if (armada_src_hw(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN); - val = armada_dst_yx(state); + val = armada_dst_yx(new_state); if (armada_dst_yx(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN); - val = armada_dst_hw(state); + val = armada_dst_hw(new_state); if (armada_dst_hw(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN); /* FIXME: overlay on an interlaced display */ - if (old_state->src.x1 != state->src.x1 || - old_state->src.y1 != state->src.y1 || - old_state->fb != state->fb || - state->crtc->state->mode_changed) { + if (old_state->src.x1 != new_state->src.x1 || + old_state->src.y1 != new_state->src.y1 || + old_state->fb != new_state->fb || + new_state->crtc->state->mode_changed) { const struct drm_format_info *format; u16 src_x; - armada_reg_queue_set(regs, idx, armada_addr(state, 0, 0), + armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0), LCD_SPU_DMA_START_ADDR_Y0); - armada_reg_queue_set(regs, idx, armada_addr(state, 0, 1), + armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 1), LCD_SPU_DMA_START_ADDR_U0); - armada_reg_queue_set(regs, idx, armada_addr(state, 0, 2), + armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 2), LCD_SPU_DMA_START_ADDR_V0); - armada_reg_queue_set(regs, idx, armada_addr(state, 1, 0), + armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0), LCD_SPU_DMA_START_ADDR_Y1); - armada_reg_queue_set(regs, idx, armada_addr(state, 1, 1), + armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 1), LCD_SPU_DMA_START_ADDR_U1); - armada_reg_queue_set(regs, idx, armada_addr(state, 1, 2), + armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 2), LCD_SPU_DMA_START_ADDR_V1); - val = armada_pitch(state, 0) << 16 | armada_pitch(state, 0); + val = armada_pitch(new_state, 0) << 16 | armada_pitch(new_state, + 0); armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC); - val = armada_pitch(state, 1) << 16 | armada_pitch(state, 2); + val = armada_pitch(new_state, 1) << 16 | armada_pitch(new_state, + 2); armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV); - cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) | - CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) | + cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) | + CFG_DMA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod) | CFG_CBSH_ENA; - if (state->visible) + if (new_state->visible) cfg |= CFG_DMA_ENA; /* @@ -139,28 +144,28 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane, * U/V planes to swap. Compensate for it by also toggling * the UV swap. */ - format = state->fb->format; - src_x = state->src.x1 >> 16; + format = new_state->fb->format; + src_x = new_state->src.x1 >> 16; if (format->num_planes == 1 && src_x & (format->hsub - 1)) cfg ^= CFG_DMA_MOD(CFG_SWAPUV); - if (to_armada_plane_state(state)->interlace) + if (to_armada_plane_state(new_state)->interlace) cfg |= CFG_DMA_FTOGGLE; cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU | CFG_YUV2RGB) | CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE | CFG_DMA_ENA; - } else if (old_state->visible != state->visible) { - cfg = state->visible ? CFG_DMA_ENA : 0; + } else if (old_state->visible != new_state->visible) { + cfg = new_state->visible ? CFG_DMA_ENA : 0; cfg_mask = CFG_DMA_ENA; } else { cfg = cfg_mask = 0; } - if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) || - drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) { + if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) || + drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) { cfg_mask |= CFG_DMA_HSMOOTH; - if (drm_rect_width(&state->src) >> 16 != - drm_rect_width(&state->dst)) + if (drm_rect_width(&new_state->src) >> 16 != + drm_rect_width(&new_state->dst)) cfg |= CFG_DMA_HSMOOTH; } @@ -168,41 +173,41 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane, armada_reg_queue_mod(regs, idx, cfg, cfg_mask, LCD_SPU_DMA_CTRL0); - val = armada_spu_contrast(state); - if ((!old_state->visible && state->visible) || + val = armada_spu_contrast(new_state); + if ((!old_state->visible && new_state->visible) || armada_spu_contrast(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST); - val = armada_spu_saturation(state); - if ((!old_state->visible && state->visible) || + val = armada_spu_saturation(new_state); + if ((!old_state->visible && new_state->visible) || armada_spu_saturation(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION); - if (!old_state->visible && state->visible) + if (!old_state->visible && new_state->visible) armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE); - val = armada_csc(state); - if ((!old_state->visible && state->visible) || + val = armada_csc(new_state); + if ((!old_state->visible && new_state->visible) || armada_csc(old_state) != val) armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK, LCD_SPU_IOPAD_CONTROL); - val = drm_to_overlay_state(state)->colorkey_yr; - if ((!old_state->visible && state->visible) || + val = drm_to_overlay_state(new_state)->colorkey_yr; + if ((!old_state->visible && new_state->visible) || drm_to_overlay_state(old_state)->colorkey_yr != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y); - val = drm_to_overlay_state(state)->colorkey_ug; - if ((!old_state->visible && state->visible) || + val = drm_to_overlay_state(new_state)->colorkey_ug; + if ((!old_state->visible && new_state->visible) || drm_to_overlay_state(old_state)->colorkey_ug != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U); - val = drm_to_overlay_state(state)->colorkey_vb; - if ((!old_state->visible && state->visible) || + val = drm_to_overlay_state(new_state)->colorkey_vb; + if ((!old_state->visible && new_state->visible) || drm_to_overlay_state(old_state)->colorkey_vb != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V); - val = drm_to_overlay_state(state)->colorkey_mode; - if ((!old_state->visible && state->visible) || + val = drm_to_overlay_state(new_state)->colorkey_mode; + if ((!old_state->visible && new_state->visible) || drm_to_overlay_state(old_state)->colorkey_mode != val) armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, LCD_SPU_DMA_CTRL1); - val = drm_to_overlay_state(state)->colorkey_enable; - if (((!old_state->visible && state->visible) || + val = drm_to_overlay_state(new_state)->colorkey_enable; + if (((!old_state->visible && new_state->visible) || drm_to_overlay_state(old_state)->colorkey_enable != val) && dcrtc->variant->has_spu_adv_reg) armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY | @@ -212,8 +217,10 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane, } static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct armada_crtc *dcrtc; struct armada_regs *regs; unsigned int idx = 0; diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c index e7cc2b343bcb..40209e49f34a 100644 --- a/drivers/gpu/drm/armada/armada_plane.c +++ b/drivers/gpu/drm/armada/armada_plane.c @@ -106,59 +106,67 @@ void armada_drm_plane_cleanup_fb(struct drm_plane *plane, } int armada_drm_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct armada_plane_state *st = to_armada_plane_state(state); - struct drm_crtc *crtc = state->crtc; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct armada_plane_state *st = to_armada_plane_state(new_plane_state); + struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; bool interlace; int ret; - if (!state->fb || WARN_ON(!state->crtc)) { - state->visible = false; + if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) { + new_plane_state->visible = false; return 0; } - if (state->state) - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + if (state) + crtc_state = drm_atomic_get_existing_crtc_state(state, + crtc); else crtc_state = crtc->state; - ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + 0, INT_MAX, true, false); if (ret) return ret; interlace = crtc_state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE; if (interlace) { - if ((state->dst.y1 | state->dst.y2) & 1) + if ((new_plane_state->dst.y1 | new_plane_state->dst.y2) & 1) return -EINVAL; - st->src_hw = drm_rect_height(&state->src) >> 17; - st->dst_yx = state->dst.y1 >> 1; - st->dst_hw = drm_rect_height(&state->dst) >> 1; + st->src_hw = drm_rect_height(&new_plane_state->src) >> 17; + st->dst_yx = new_plane_state->dst.y1 >> 1; + st->dst_hw = drm_rect_height(&new_plane_state->dst) >> 1; } else { - st->src_hw = drm_rect_height(&state->src) >> 16; - st->dst_yx = state->dst.y1; - st->dst_hw = drm_rect_height(&state->dst); + st->src_hw = drm_rect_height(&new_plane_state->src) >> 16; + st->dst_yx = new_plane_state->dst.y1; + st->dst_hw = drm_rect_height(&new_plane_state->dst); } st->src_hw <<= 16; - st->src_hw |= drm_rect_width(&state->src) >> 16; + st->src_hw |= drm_rect_width(&new_plane_state->src) >> 16; st->dst_yx <<= 16; - st->dst_yx |= state->dst.x1 & 0x0000ffff; + st->dst_yx |= new_plane_state->dst.x1 & 0x0000ffff; st->dst_hw <<= 16; - st->dst_hw |= drm_rect_width(&state->dst) & 0x0000ffff; + st->dst_hw |= drm_rect_width(&new_plane_state->dst) & 0x0000ffff; - armada_drm_plane_calc(state, st->addrs, st->pitches, interlace); + armada_drm_plane_calc(new_plane_state, st->addrs, st->pitches, + interlace); st->interlace = interlace; return 0; } static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct armada_crtc *dcrtc; struct armada_regs *regs; u32 cfg, cfg_mask, val; @@ -166,71 +174,72 @@ static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane, DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name); - if (!state->fb || WARN_ON(!state->crtc)) + if (!new_state->fb || WARN_ON(!new_state->crtc)) return; DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n", plane->base.id, plane->name, - state->crtc->base.id, state->crtc->name, - state->fb->base.id, - old_state->visible, state->visible); + new_state->crtc->base.id, new_state->crtc->name, + new_state->fb->base.id, + old_state->visible, new_state->visible); - dcrtc = drm_to_armada_crtc(state->crtc); + dcrtc = drm_to_armada_crtc(new_state->crtc); regs = dcrtc->regs + dcrtc->regs_idx; idx = 0; - if (!old_state->visible && state->visible) { + if (!old_state->visible && new_state->visible) { val = CFG_PDWN64x66; - if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420) + if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420) val |= CFG_PDWN256x24; armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1); } - val = armada_src_hw(state); + val = armada_src_hw(new_state); if (armada_src_hw(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN); - val = armada_dst_yx(state); + val = armada_dst_yx(new_state); if (armada_dst_yx(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN); - val = armada_dst_hw(state); + val = armada_dst_hw(new_state); if (armada_dst_hw(old_state) != val) armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN); - if (old_state->src.x1 != state->src.x1 || - old_state->src.y1 != state->src.y1 || - old_state->fb != state->fb || - state->crtc->state->mode_changed) { - armada_reg_queue_set(regs, idx, armada_addr(state, 0, 0), + if (old_state->src.x1 != new_state->src.x1 || + old_state->src.y1 != new_state->src.y1 || + old_state->fb != new_state->fb || + new_state->crtc->state->mode_changed) { + armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0), LCD_CFG_GRA_START_ADDR0); - armada_reg_queue_set(regs, idx, armada_addr(state, 1, 0), + armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0), LCD_CFG_GRA_START_ADDR1); - armada_reg_queue_mod(regs, idx, armada_pitch(state, 0), 0xffff, + armada_reg_queue_mod(regs, idx, armada_pitch(new_state, 0), + 0xffff, LCD_CFG_GRA_PITCH); } - if (old_state->fb != state->fb || - state->crtc->state->mode_changed) { - cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) | - CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod); - if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420) + if (old_state->fb != new_state->fb || + new_state->crtc->state->mode_changed) { + cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) | + CFG_GRA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod); + if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420) cfg |= CFG_PALETTE_ENA; - if (state->visible) + if (new_state->visible) cfg |= CFG_GRA_ENA; - if (to_armada_plane_state(state)->interlace) + if (to_armada_plane_state(new_state)->interlace) cfg |= CFG_GRA_FTOGGLE; cfg_mask = CFG_GRAFORMAT | CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU | CFG_YUV2RGB) | CFG_PALETTE_ENA | CFG_GRA_FTOGGLE | CFG_GRA_ENA; - } else if (old_state->visible != state->visible) { - cfg = state->visible ? CFG_GRA_ENA : 0; + } else if (old_state->visible != new_state->visible) { + cfg = new_state->visible ? CFG_GRA_ENA : 0; cfg_mask = CFG_GRA_ENA; } else { cfg = cfg_mask = 0; } - if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) || - drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) { + if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) || + drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) { cfg_mask |= CFG_GRA_HSMOOTH; - if (drm_rect_width(&state->src) >> 16 != - drm_rect_width(&state->dst)) + if (drm_rect_width(&new_state->src) >> 16 != + drm_rect_width(&new_state->dst)) cfg |= CFG_GRA_HSMOOTH; } @@ -242,8 +251,10 @@ static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane, } static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct armada_crtc *dcrtc; struct armada_regs *regs; unsigned int idx = 0; diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h index 2707ec781941..51dab8d8da22 100644 --- a/drivers/gpu/drm/armada/armada_plane.h +++ b/drivers/gpu/drm/armada/armada_plane.h @@ -26,7 +26,7 @@ int armada_drm_plane_prepare_fb(struct drm_plane *plane, void armada_drm_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state); int armada_drm_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); void armada_plane_reset(struct drm_plane *plane); struct drm_plane_state *armada_plane_duplicate_state(struct drm_plane *plane); void armada_plane_destroy_state(struct drm_plane *plane, diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h index f1e7e56abc02..96501152bafa 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx.h +++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h @@ -11,6 +11,11 @@ struct aspeed_gfx { struct reset_control *rst; struct regmap *scu; + u32 dac_reg; + u32 vga_scratch_reg; + u32 throd_val; + u32 scan_line_max; + struct drm_simple_display_pipe pipe; struct drm_connector connector; }; @@ -100,6 +105,3 @@ int aspeed_gfx_create_output(struct drm_device *drm); /* CRT_THROD */ #define CRT_THROD_LOW(x) (x) #define CRT_THROD_HIGH(x) ((x) << 8) - -/* Default Threshold Seting */ -#define G5_CRT_THROD_VAL (CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3C)) diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c index e54686c31a90..098f96d4d50d 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c @@ -9,8 +9,8 @@ #include <drm/drm_device.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_panel.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> @@ -59,8 +59,8 @@ static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv) u32 ctrl1 = readl(priv->base + CRT_CTRL1); u32 ctrl2 = readl(priv->base + CRT_CTRL2); - /* SCU2C: set DAC source for display output to Graphics CRT (GFX) */ - regmap_update_bits(priv->scu, 0x2c, BIT(16), BIT(16)); + /* Set DAC source for display output to Graphics CRT (GFX) */ + regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), BIT(16)); writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1); writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2); @@ -74,7 +74,7 @@ static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv) writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1); writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2); - regmap_update_bits(priv->scu, 0x2c, BIT(16), 0); + regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), 0); } static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv) @@ -127,7 +127,8 @@ static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv) * Terminal Count: memory size of one scan line */ d_offset = m->hdisplay * bpp / 8; - t_count = (m->hdisplay * bpp + 127) / 128; + t_count = DIV_ROUND_UP(m->hdisplay * bpp, priv->scan_line_max); + writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count), priv->base + CRT_OFFSET); @@ -135,7 +136,7 @@ static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv) * Threshold: FIFO thresholds of refill and stop (16 byte chunks * per line, rounded up) */ - writel(G5_CRT_THROD_VAL, priv->base + CRT_THROD); + writel(priv->throd_val, priv->base + CRT_THROD); } static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe, @@ -219,7 +220,7 @@ static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = { .enable = aspeed_gfx_pipe_enable, .disable = aspeed_gfx_pipe_disable, .update = aspeed_gfx_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, .enable_vblank = aspeed_gfx_enable_vblank, .disable_vblank = aspeed_gfx_disable_vblank, }; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 457ec04950f7..b53fee6f1c17 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -7,6 +7,7 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -57,6 +58,34 @@ * which is available under NDA from ASPEED. */ +struct aspeed_gfx_config { + u32 dac_reg; /* DAC register in SCU */ + u32 vga_scratch_reg; /* VGA scratch register in SCU */ + u32 throd_val; /* Default Threshold Seting */ + u32 scan_line_max; /* Max memory size of one scan line */ +}; + +static const struct aspeed_gfx_config ast2400_config = { + .dac_reg = 0x2c, + .vga_scratch_reg = 0x50, + .throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12), + .scan_line_max = 64, +}; + +static const struct aspeed_gfx_config ast2500_config = { + .dac_reg = 0x2c, + .vga_scratch_reg = 0x50, + .throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c), + .scan_line_max = 128, +}; + +static const struct of_device_id aspeed_gfx_match[] = { + { .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config }, + { .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config }, + { }, +}; +MODULE_DEVICE_TABLE(of, aspeed_gfx_match); + static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, @@ -97,12 +126,13 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) return IRQ_NONE; } - - static int aspeed_gfx_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct aspeed_gfx *priv = to_aspeed_gfx(drm); + struct device_node *np = pdev->dev.of_node; + const struct aspeed_gfx_config *config; + const struct of_device_id *match; struct resource *res; int ret; @@ -111,10 +141,23 @@ static int aspeed_gfx_load(struct drm_device *drm) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu"); + match = of_match_device(aspeed_gfx_match, &pdev->dev); + if (!match) + return -EINVAL; + config = match->data; + + priv->dac_reg = config->dac_reg; + priv->vga_scratch_reg = config->vga_scratch_reg; + priv->throd_val = config->throd_val; + priv->scan_line_max = config->scan_line_max; + + priv->scu = syscon_regmap_lookup_by_phandle(np, "syscon"); if (IS_ERR(priv->scu)) { - dev_err(&pdev->dev, "failed to find SCU regmap\n"); - return PTR_ERR(priv->scu); + priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu"); + if (IS_ERR(priv->scu)) { + dev_err(&pdev->dev, "failed to find SCU regmap\n"); + return PTR_ERR(priv->scu); + } } ret = of_reserved_mem_device_init(drm->dev); @@ -202,14 +245,6 @@ static const struct drm_driver aspeed_gfx_driver = { .minor = 0, }; -static const struct of_device_id aspeed_gfx_match[] = { - { .compatible = "aspeed,ast2500-gfx" }, - { } -}; - -#define ASPEED_SCU_VGA0 0x50 -#define ASPEED_SCU_MISC_CTRL 0x2c - static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -224,7 +259,7 @@ static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr, if (val > 3) return -EINVAL; - rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16); + rc = regmap_update_bits(priv->scu, priv->dac_reg, 0x30000, val << 16); if (rc < 0) return 0; @@ -237,7 +272,7 @@ static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, c u32 reg; int rc; - rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, ®); + rc = regmap_read(priv->scu, priv->dac_reg, ®); if (rc) return rc; @@ -252,7 +287,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) u32 reg; int rc; - rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, ®); + rc = regmap_read(priv->scu, priv->vga_scratch_reg, ®); if (rc) return rc; @@ -284,7 +319,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev) if (ret) return ret; - dev_set_drvdata(&pdev->dev, priv); + platform_set_drvdata(pdev, priv); ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); if (ret) diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 2265a8a624dd..438a2d05b115 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -3,7 +3,6 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ast-y := ast_cursor.o ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o \ - ast_dp501.o +ast-y := ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o obj-$(CONFIG_DRM_AST) := ast.o diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c deleted file mode 100644 index fac1ee79c372..000000000000 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * Parts based on xf86-video-ast - * Copyright (c) 2005 ASPEED Technology Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - */ -/* - * Authors: Dave Airlie <airlied@redhat.com> - */ - -#include <drm/drm_gem_vram_helper.h> -#include <drm/drm_managed.h> - -#include "ast_drv.h" - -static void ast_cursor_fini(struct ast_private *ast) -{ - size_t i; - struct drm_gem_vram_object *gbo; - - for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { - gbo = ast->cursor.gbo[i]; - drm_gem_vram_unpin(gbo); - drm_gem_vram_put(gbo); - } -} - -static void ast_cursor_release(struct drm_device *dev, void *ptr) -{ - struct ast_private *ast = to_ast_private(dev); - - ast_cursor_fini(ast); -} - -/* - * Allocate cursor BOs and pin them at the end of VRAM. - */ -int ast_cursor_init(struct ast_private *ast) -{ - struct drm_device *dev = &ast->base; - size_t size, i; - struct drm_gem_vram_object *gbo; - int ret; - - size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); - - for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { - gbo = drm_gem_vram_create(dev, size, 0); - if (IS_ERR(gbo)) { - ret = PTR_ERR(gbo); - goto err_drm_gem_vram_put; - } - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | - DRM_GEM_VRAM_PL_FLAG_TOPDOWN); - if (ret) { - drm_gem_vram_put(gbo); - goto err_drm_gem_vram_put; - } - ast->cursor.gbo[i] = gbo; - } - - return drmm_add_action_or_reset(dev, ast_cursor_release, NULL); - -err_drm_gem_vram_put: - while (i) { - --i; - gbo = ast->cursor.gbo[i]; - drm_gem_vram_unpin(gbo); - drm_gem_vram_put(gbo); - } - return ret; -} - -static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height) -{ - union { - u32 ul; - u8 b[4]; - } srcdata32[2], data32; - union { - u16 us; - u8 b[2]; - } data16; - u32 csum = 0; - s32 alpha_dst_delta, last_alpha_dst_delta; - u8 __iomem *dstxor; - const u8 *srcxor; - int i, j; - u32 per_pixel_copy, two_pixel_copy; - - alpha_dst_delta = AST_MAX_HWC_WIDTH << 1; - last_alpha_dst_delta = alpha_dst_delta - (width << 1); - - srcxor = src; - dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta; - per_pixel_copy = width & 1; - two_pixel_copy = width >> 1; - - for (j = 0; j < height; j++) { - for (i = 0; i < two_pixel_copy; i++) { - srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; - srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; - data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); - data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); - data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4); - data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4); - - writel(data32.ul, dstxor); - csum += data32.ul; - - dstxor += 4; - srcxor += 8; - - } - - for (i = 0; i < per_pixel_copy; i++) { - srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; - data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); - data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); - writew(data16.us, dstxor); - csum += (u32)data16.us; - - dstxor += 2; - srcxor += 4; - } - dstxor += last_alpha_dst_delta; - } - - /* write checksum + signature */ - dst += AST_HWC_SIZE; - writel(csum, dst); - writel(width, dst + AST_HWC_SIGNATURE_SizeX); - writel(height, dst + AST_HWC_SIGNATURE_SizeY); - writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX); - writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); -} - -int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb) -{ - struct drm_device *dev = &ast->base; - struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index]; - struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]); - struct dma_buf_map src_map, dst_map; - void __iomem *dst; - void *src; - int ret; - - if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) || - drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT)) - return -EINVAL; - - ret = drm_gem_vram_vmap(src_gbo, &src_map); - if (ret) - return ret; - src = src_map.vaddr; /* TODO: Use mapping abstraction properly */ - - ret = drm_gem_vram_vmap(dst_gbo, &dst_map); - if (ret) - goto err_drm_gem_vram_vunmap; - dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */ - - /* do data transfer to cursor BO */ - update_cursor_image(dst, src, fb->width, fb->height); - - drm_gem_vram_vunmap(dst_gbo, &dst_map); - drm_gem_vram_vunmap(src_gbo, &src_map); - - return 0; - -err_drm_gem_vram_vunmap: - drm_gem_vram_vunmap(src_gbo, &src_map); - return ret; -} - -static void ast_cursor_set_base(struct ast_private *ast, u64 address) -{ - u8 addr0 = (address >> 3) & 0xff; - u8 addr1 = (address >> 11) & 0xff; - u8 addr2 = (address >> 19) & 0xff; - - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2); -} - -void ast_cursor_page_flip(struct ast_private *ast) -{ - struct drm_device *dev = &ast->base; - struct drm_gem_vram_object *gbo; - s64 off; - - gbo = ast->cursor.gbo[ast->cursor.next_index]; - - off = drm_gem_vram_offset(gbo); - if (drm_WARN_ON_ONCE(dev, off < 0)) - return; /* Bug: we didn't pin the cursor HW BO to VRAM. */ - - ast_cursor_set_base(ast, off); - - ++ast->cursor.next_index; - ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo); -} - -static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y, - u8 x_offset, u8 y_offset) -{ - u8 x0 = (x & 0x00ff); - u8 x1 = (x & 0x0f00) >> 8; - u8 y0 = (y & 0x00ff); - u8 y1 = (y & 0x0700) >> 8; - - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1); -} - -void ast_cursor_show(struct ast_private *ast, int x, int y, - unsigned int offset_x, unsigned int offset_y) -{ - struct drm_device *dev = &ast->base; - struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index]; - struct dma_buf_map map; - u8 x_offset, y_offset; - u8 __iomem *dst; - u8 __iomem *sig; - u8 jreg; - int ret; - - ret = drm_gem_vram_vmap(gbo, &map); - if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret)) - return; - dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */ - - sig = dst + AST_HWC_SIZE; - writel(x, sig + AST_HWC_SIGNATURE_X); - writel(y, sig + AST_HWC_SIGNATURE_Y); - - drm_gem_vram_vunmap(gbo, &map); - - if (x < 0) { - x_offset = (-x) + offset_x; - x = 0; - } else { - x_offset = offset_x; - } - if (y < 0) { - y_offset = (-y) + offset_y; - y = 0; - } else { - y_offset = offset_y; - } - - ast_cursor_set_location(ast, x, y, x_offset, y_offset); - - /* dummy write to fire HWC */ - jreg = 0x02 | - 0x01; /* enable ARGB4444 cursor */ - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg); -} - -void ast_cursor_hide(struct ast_private *ast) -{ - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00); -} diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index ea8164e7a6dc..01837bea18c2 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/pci.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> @@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); drm_dev_unregister(dev); + drm_atomic_helper_shutdown(dev); } static int ast_drm_freeze(struct drm_device *dev) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index f871fc36c2f7..e82ab8628770 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -81,6 +81,9 @@ enum ast_tx_chip { #define AST_DRAM_4Gx16 7 #define AST_DRAM_8Gx16 8 +/* + * Cursor plane + */ #define AST_MAX_HWC_WIDTH 64 #define AST_MAX_HWC_HEIGHT 64 @@ -99,6 +102,28 @@ enum ast_tx_chip { #define AST_HWC_SIGNATURE_HOTSPOTX 0x14 #define AST_HWC_SIGNATURE_HOTSPOTY 0x18 +struct ast_cursor_plane { + struct drm_plane base; + + struct { + struct drm_gem_vram_object *gbo; + struct dma_buf_map map; + u64 off; + } hwc[AST_DEFAULT_HWC_NUM]; + + unsigned int next_hwc_index; +}; + +static inline struct ast_cursor_plane * +to_ast_cursor_plane(struct drm_plane *plane) +{ + return container_of(plane, struct ast_cursor_plane, base); +} + +/* + * Connector with i2c channel + */ + struct ast_i2c_chan { struct i2c_adapter adapter; struct drm_device *dev; @@ -116,6 +141,10 @@ to_ast_connector(struct drm_connector *connector) return container_of(connector, struct ast_connector, base); } +/* + * Device + */ + struct ast_private { struct drm_device base; @@ -130,13 +159,8 @@ struct ast_private { int fb_mtrr; - struct { - struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM]; - unsigned int next_index; - } cursor; - struct drm_plane primary_plane; - struct drm_plane cursor_plane; + struct ast_cursor_plane cursor_plane; struct drm_crtc crtc; struct drm_encoder encoder; struct ast_connector connector; @@ -179,6 +203,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv, #define AST_IO_VGAIR1_VREFRESH BIT(3) +#define AST_IO_VGACRCB_HWC_ENABLED BIT(1) +#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ + #define __ast_read(x) \ static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ u##x val = 0;\ @@ -314,12 +341,4 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); -/* ast_cursor.c */ -int ast_cursor_init(struct ast_private *ast); -int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb); -void ast_cursor_page_flip(struct ast_private *ast); -void ast_cursor_show(struct ast_private *ast, int x, int y, - unsigned int offset_x, unsigned int offset_y); -void ast_cursor_hide(struct ast_private *ast); - #endif diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 988b270fea5e..36d9575aa27b 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -37,6 +37,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_plane_helper.h> @@ -535,48 +536,54 @@ static const uint32_t ast_primary_plane_formats[] = { }; static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; struct ast_crtc_state *ast_crtc_state; int ret; - if (!state->crtc) + if (!new_plane_state->crtc) return 0; - crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, false, true); if (ret) return ret; - if (!state->visible) + if (!new_plane_state->visible) return 0; ast_crtc_state = to_ast_crtc_state(crtc_state); - ast_crtc_state->format = state->fb->format; + ast_crtc_state->format = new_plane_state->fb->format; return 0; } static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_device *dev = plane->dev; struct ast_private *ast = to_ast_private(dev); - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_gem_vram_object *gbo; s64 gpu_addr; - struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *fb = new_state->fb; struct drm_framebuffer *old_fb = old_state->fb; if (!old_fb || (fb->format != old_fb->format)) { - struct drm_crtc_state *crtc_state = state->crtc->state; + struct drm_crtc_state *crtc_state = new_state->crtc->state; struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info; @@ -597,7 +604,7 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane, static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct ast_private *ast = to_ast_private(plane->dev); @@ -621,55 +628,161 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = { .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; +static int ast_primary_plane_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_plane *primary_plane = &ast->primary_plane; + int ret; + + ret = drm_universal_plane_init(dev, primary_plane, 0x01, + &ast_primary_plane_funcs, + ast_primary_plane_formats, + ARRAY_SIZE(ast_primary_plane_formats), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); + return ret; + } + drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs); + + return 0; +} + /* * Cursor plane */ -static const uint32_t ast_cursor_plane_formats[] = { - DRM_FORMAT_ARGB8888, -}; +static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height) +{ + union { + u32 ul; + u8 b[4]; + } srcdata32[2], data32; + union { + u16 us; + u8 b[2]; + } data16; + u32 csum = 0; + s32 alpha_dst_delta, last_alpha_dst_delta; + u8 __iomem *dstxor; + const u8 *srcxor; + int i, j; + u32 per_pixel_copy, two_pixel_copy; + + alpha_dst_delta = AST_MAX_HWC_WIDTH << 1; + last_alpha_dst_delta = alpha_dst_delta - (width << 1); + + srcxor = src; + dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta; + per_pixel_copy = width & 1; + two_pixel_copy = width >> 1; + + for (j = 0; j < height; j++) { + for (i = 0; i < two_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; + data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); + data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); + data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4); + data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4); + + writel(data32.ul, dstxor); + csum += data32.ul; + + dstxor += 4; + srcxor += 8; + + } -static int -ast_cursor_plane_helper_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) + for (i = 0; i < per_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); + data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); + writew(data16.us, dstxor); + csum += (u32)data16.us; + + dstxor += 2; + srcxor += 4; + } + dstxor += last_alpha_dst_delta; + } + + /* write checksum + signature */ + dst += AST_HWC_SIZE; + writel(csum, dst); + writel(width, dst + AST_HWC_SIGNATURE_SizeX); + writel(height, dst + AST_HWC_SIGNATURE_SizeY); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); +} + +static void ast_set_cursor_base(struct ast_private *ast, u64 address) { - struct drm_framebuffer *fb = new_state->fb; - struct drm_crtc *crtc = new_state->crtc; - struct ast_private *ast; - int ret; + u8 addr0 = (address >> 3) & 0xff; + u8 addr1 = (address >> 11) & 0xff; + u8 addr2 = (address >> 19) & 0xff; - if (!crtc || !fb) - return 0; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2); +} - ast = to_ast_private(plane->dev); +static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y, + u8 x_offset, u8 y_offset) +{ + u8 x0 = (x & 0x00ff); + u8 x1 = (x & 0x0f00) >> 8; + u8 y0 = (y & 0x00ff); + u8 y1 = (y & 0x0700) >> 8; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1); +} - ret = ast_cursor_blit(ast, fb); - if (ret) - return ret; +static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled) +{ + static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP | + AST_IO_VGACRCB_HWC_ENABLED); - return 0; + u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP; + + if (enabled) + vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, mask, vgacrcb); } +static const uint32_t ast_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_framebuffer *fb = state->fb; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; struct drm_crtc_state *crtc_state; int ret; - if (!state->crtc) + if (!new_plane_state->crtc) return 0; - crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true); if (ret) return ret; - if (!state->visible) + if (!new_plane_state->visible) return 0; if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT) @@ -680,51 +793,192 @@ static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane, static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; - struct drm_framebuffer *fb = state->fb; + struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); + struct drm_framebuffer *fb = new_state->fb; struct ast_private *ast = to_ast_private(plane->dev); + struct dma_buf_map dst_map = + ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map; + u64 dst_off = + ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off; + struct dma_buf_map src_map = shadow_plane_state->map[0]; unsigned int offset_x, offset_y; + u16 x, y; + u8 x_offset, y_offset; + u8 __iomem *dst; + u8 __iomem *sig; + const u8 *src; + + src = src_map.vaddr; /* TODO: Use mapping abstraction properly */ + dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */ + sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */ + + /* + * Do data transfer to HW cursor BO. If a new cursor image was installed, + * point the scanout engine to dst_gbo's offset and page-flip the HWC buffers. + */ + + ast_update_cursor_image(dst, src, fb->width, fb->height); + + if (new_state->fb != old_state->fb) { + ast_set_cursor_base(ast, dst_off); + + ++ast_cursor_plane->next_hwc_index; + ast_cursor_plane->next_hwc_index %= ARRAY_SIZE(ast_cursor_plane->hwc); + } + + /* + * Update location in HWC signature and registers. + */ + + writel(new_state->crtc_x, sig + AST_HWC_SIGNATURE_X); + writel(new_state->crtc_y, sig + AST_HWC_SIGNATURE_Y); offset_x = AST_MAX_HWC_WIDTH - fb->width; - offset_y = AST_MAX_HWC_WIDTH - fb->height; + offset_y = AST_MAX_HWC_HEIGHT - fb->height; - if (state->fb != old_state->fb) { - /* A new cursor image was installed. */ - ast_cursor_page_flip(ast); + if (new_state->crtc_x < 0) { + x_offset = (-new_state->crtc_x) + offset_x; + x = 0; + } else { + x_offset = offset_x; + x = new_state->crtc_x; + } + if (new_state->crtc_y < 0) { + y_offset = (-new_state->crtc_y) + offset_y; + y = 0; + } else { + y_offset = offset_y; + y = new_state->crtc_y; } - ast_cursor_show(ast, state->crtc_x, state->crtc_y, - offset_x, offset_y); + ast_set_cursor_location(ast, x, y, x_offset, y_offset); + + /* Dummy write to enable HWC and make the HW pick-up the changes. */ + ast_set_cursor_enabled(ast, true); } static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct ast_private *ast = to_ast_private(plane->dev); - ast_cursor_hide(ast); + ast_set_cursor_enabled(ast, false); } static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = { - .prepare_fb = ast_cursor_plane_helper_prepare_fb, - .cleanup_fb = NULL, /* not required for cursor plane */ + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, .atomic_check = ast_cursor_plane_helper_atomic_check, .atomic_update = ast_cursor_plane_helper_atomic_update, .atomic_disable = ast_cursor_plane_helper_atomic_disable, }; +static void ast_cursor_plane_destroy(struct drm_plane *plane) +{ + struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane); + size_t i; + struct drm_gem_vram_object *gbo; + struct dma_buf_map map; + + for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) { + gbo = ast_cursor_plane->hwc[i].gbo; + map = ast_cursor_plane->hwc[i].map; + drm_gem_vram_vunmap(gbo, &map); + drm_gem_vram_unpin(gbo); + drm_gem_vram_put(gbo); + } + + drm_plane_cleanup(plane); +} + static const struct drm_plane_funcs ast_cursor_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .destroy = ast_cursor_plane_destroy, + DRM_GEM_SHADOW_PLANE_FUNCS, }; +static int ast_cursor_plane_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane; + struct drm_plane *cursor_plane = &ast_cursor_plane->base; + size_t size, i; + struct drm_gem_vram_object *gbo; + struct dma_buf_map map; + int ret; + s64 off; + + /* + * Allocate backing storage for cursors. The BOs are permanently + * pinned to the top end of the VRAM. + */ + + size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); + + for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) { + gbo = drm_gem_vram_create(dev, size, 0); + if (IS_ERR(gbo)) { + ret = PTR_ERR(gbo); + goto err_hwc; + } + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | + DRM_GEM_VRAM_PL_FLAG_TOPDOWN); + if (ret) + goto err_drm_gem_vram_put; + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) + goto err_drm_gem_vram_unpin; + off = drm_gem_vram_offset(gbo); + if (off < 0) { + ret = off; + goto err_drm_gem_vram_vunmap; + } + ast_cursor_plane->hwc[i].gbo = gbo; + ast_cursor_plane->hwc[i].map = map; + ast_cursor_plane->hwc[i].off = off; + } + + /* + * Create the cursor plane. The plane's destroy callback will release + * the backing storages' BO memory. + */ + + ret = drm_universal_plane_init(dev, cursor_plane, 0x01, + &ast_cursor_plane_funcs, + ast_cursor_plane_formats, + ARRAY_SIZE(ast_cursor_plane_formats), + NULL, DRM_PLANE_TYPE_CURSOR, NULL); + if (ret) { + drm_err(dev, "drm_universal_plane failed(): %d\n", ret); + goto err_hwc; + } + drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs); + + return 0; + +err_hwc: + while (i) { + --i; + gbo = ast_cursor_plane->hwc[i].gbo; + map = ast_cursor_plane->hwc[i].map; +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(gbo, &map); +err_drm_gem_vram_unpin: + drm_gem_vram_unpin(gbo); +err_drm_gem_vram_put: + drm_gem_vram_put(gbo); + } + return ret; +} + /* * CRTC */ @@ -917,7 +1171,7 @@ static int ast_crtc_init(struct drm_device *dev) int ret; ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane, - &ast->cursor_plane, &ast_crtc_funcs, + &ast->cursor_plane.base, &ast_crtc_funcs, NULL); if (ret) return ret; @@ -1109,10 +1363,6 @@ int ast_mode_config_init(struct ast_private *ast) struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; - ret = ast_cursor_init(ast); - if (ret) - return ret; - ret = drmm_mode_config_init(dev); if (ret) return ret; @@ -1138,30 +1388,14 @@ int ast_mode_config_init(struct ast_private *ast) dev->mode_config.helper_private = &ast_mode_config_helper_funcs; - memset(&ast->primary_plane, 0, sizeof(ast->primary_plane)); - ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01, - &ast_primary_plane_funcs, - ast_primary_plane_formats, - ARRAY_SIZE(ast_primary_plane_formats), - NULL, DRM_PLANE_TYPE_PRIMARY, NULL); - if (ret) { - drm_err(dev, "ast: drm_universal_plane_init() failed: %d\n", ret); + + ret = ast_primary_plane_init(ast); + if (ret) return ret; - } - drm_plane_helper_add(&ast->primary_plane, - &ast_primary_plane_helper_funcs); - ret = drm_universal_plane_init(dev, &ast->cursor_plane, 0x01, - &ast_cursor_plane_funcs, - ast_cursor_plane_formats, - ARRAY_SIZE(ast_cursor_plane_formats), - NULL, DRM_PLANE_TYPE_CURSOR, NULL); - if (ret) { - drm_err(dev, "drm_universal_plane_failed(): %d\n", ret); + ret = ast_cursor_plane_init(ast); + if (ret) return ret; - } - drm_plane_helper_add(&ast->cursor_plane, - &ast_cursor_plane_helper_funcs); ast_crtc_init(dev); ast_encoder_init(dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 98fb53b75f77..65af56e47129 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -557,103 +557,10 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data) return IRQ_HANDLED; } -struct atmel_hlcdc_dc_commit { - struct work_struct work; - struct drm_device *dev; - struct drm_atomic_state *state; -}; - -static void -atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit) -{ - struct drm_device *dev = commit->dev; - struct atmel_hlcdc_dc *dc = dev->dev_private; - struct drm_atomic_state *old_state = commit->state; - - /* Apply the atomic update. */ - drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, 0); - drm_atomic_helper_commit_modeset_enables(dev, old_state); - - drm_atomic_helper_wait_for_vblanks(dev, old_state); - - drm_atomic_helper_cleanup_planes(dev, old_state); - - drm_atomic_state_put(old_state); - - /* Complete the commit, wake up any waiter. */ - spin_lock(&dc->commit.wait.lock); - dc->commit.pending = false; - wake_up_all_locked(&dc->commit.wait); - spin_unlock(&dc->commit.wait.lock); - - kfree(commit); -} - -static void atmel_hlcdc_dc_atomic_work(struct work_struct *work) -{ - struct atmel_hlcdc_dc_commit *commit = - container_of(work, struct atmel_hlcdc_dc_commit, work); - - atmel_hlcdc_dc_atomic_complete(commit); -} - -static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool async) -{ - struct atmel_hlcdc_dc *dc = dev->dev_private; - struct atmel_hlcdc_dc_commit *commit; - int ret; - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - - /* Allocate the commit object. */ - commit = kzalloc(sizeof(*commit), GFP_KERNEL); - if (!commit) { - ret = -ENOMEM; - goto error; - } - - INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work); - commit->dev = dev; - commit->state = state; - - spin_lock(&dc->commit.wait.lock); - ret = wait_event_interruptible_locked(dc->commit.wait, - !dc->commit.pending); - if (ret == 0) - dc->commit.pending = true; - spin_unlock(&dc->commit.wait.lock); - - if (ret) - goto err_free; - - /* We have our own synchronization through the commit lock. */ - BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); - - /* Swap state succeeded, this is the point of no return. */ - drm_atomic_state_get(state); - if (async) - queue_work(dc->wq, &commit->work); - else - atmel_hlcdc_dc_atomic_complete(commit); - - return 0; - -err_free: - kfree(commit); -error: - drm_atomic_helper_cleanup_planes(dev, state); - return ret; -} - static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, - .atomic_commit = atmel_hlcdc_dc_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) @@ -712,11 +619,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) if (!dc) return -ENOMEM; - dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); - if (!dc->wq) - return -ENOMEM; - - init_waitqueue_head(&dc->commit.wait); dc->desc = match->data; dc->hlcdc = dev_get_drvdata(dev->dev->parent); dev->dev_private = dc; @@ -724,7 +626,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { dev_err(dev->dev, "failed to enable periph_clk\n"); - goto err_destroy_wq; + return ret; } pm_runtime_enable(dev->dev); @@ -761,9 +663,6 @@ err_periph_clk_disable: pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); -err_destroy_wq: - destroy_workqueue(dc->wq); - return ret; } @@ -771,7 +670,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; - flush_workqueue(dc->wq); drm_kms_helper_poll_fini(dev); drm_atomic_helper_shutdown(dev); drm_mode_config_cleanup(dev); @@ -784,7 +682,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev) pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); - destroy_workqueue(dc->wq); } static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index 469d4507e576..5b5c774e0edf 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -331,9 +331,7 @@ struct atmel_hlcdc_dc_desc { * @crtc: CRTC provided by the display controller * @planes: instantiated planes * @layers: active HLCDC layers - * @wq: display controller workqueue * @suspend: used to store the HLCDC state when entering suspend - * @commit: used for async commit handling */ struct atmel_hlcdc_dc { const struct atmel_hlcdc_dc_desc *desc; @@ -341,15 +339,10 @@ struct atmel_hlcdc_dc { struct atmel_hlcdc *hlcdc; struct drm_crtc *crtc; struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS]; - struct workqueue_struct *wq; struct { u32 imr; struct drm_atomic_state *state; } suspend; - struct { - wait_queue_head_t wait; - bool pending; - } commit; }; extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 15bc93163833..a077d93c78d7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -593,22 +593,23 @@ atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane, } static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, - struct drm_plane_state *s) + struct drm_atomic_state *state) { + struct drm_plane_state *s = drm_atomic_get_new_plane_state(state, p); struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); - struct atmel_hlcdc_plane_state *state = + struct atmel_hlcdc_plane_state *hstate = drm_plane_state_to_atmel_hlcdc_plane_state(s); const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; - struct drm_framebuffer *fb = state->base.fb; + struct drm_framebuffer *fb = hstate->base.fb; const struct drm_display_mode *mode; struct drm_crtc_state *crtc_state; int ret; int i; - if (!state->base.crtc || WARN_ON(!fb)) + if (!hstate->base.crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc); mode = &crtc_state->adjusted_mode; ret = drm_atomic_helper_check_plane_state(s, crtc_state, @@ -617,101 +618,101 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if (ret || !s->visible) return ret; - state->src_x = s->src.x1; - state->src_y = s->src.y1; - state->src_w = drm_rect_width(&s->src); - state->src_h = drm_rect_height(&s->src); - state->crtc_x = s->dst.x1; - state->crtc_y = s->dst.y1; - state->crtc_w = drm_rect_width(&s->dst); - state->crtc_h = drm_rect_height(&s->dst); + hstate->src_x = s->src.x1; + hstate->src_y = s->src.y1; + hstate->src_w = drm_rect_width(&s->src); + hstate->src_h = drm_rect_height(&s->src); + hstate->crtc_x = s->dst.x1; + hstate->crtc_y = s->dst.y1; + hstate->crtc_w = drm_rect_width(&s->dst); + hstate->crtc_h = drm_rect_height(&s->dst); - if ((state->src_x | state->src_y | state->src_w | state->src_h) & + if ((hstate->src_x | hstate->src_y | hstate->src_w | hstate->src_h) & SUBPIXEL_MASK) return -EINVAL; - state->src_x >>= 16; - state->src_y >>= 16; - state->src_w >>= 16; - state->src_h >>= 16; + hstate->src_x >>= 16; + hstate->src_y >>= 16; + hstate->src_w >>= 16; + hstate->src_h >>= 16; - state->nplanes = fb->format->num_planes; - if (state->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES) + hstate->nplanes = fb->format->num_planes; + if (hstate->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES) return -EINVAL; - for (i = 0; i < state->nplanes; i++) { + for (i = 0; i < hstate->nplanes; i++) { unsigned int offset = 0; int xdiv = i ? fb->format->hsub : 1; int ydiv = i ? fb->format->vsub : 1; - state->bpp[i] = fb->format->cpp[i]; - if (!state->bpp[i]) + hstate->bpp[i] = fb->format->cpp[i]; + if (!hstate->bpp[i]) return -EINVAL; - switch (state->base.rotation & DRM_MODE_ROTATE_MASK) { + switch (hstate->base.rotation & DRM_MODE_ROTATE_MASK) { case DRM_MODE_ROTATE_90: - offset = (state->src_y / ydiv) * + offset = (hstate->src_y / ydiv) * fb->pitches[i]; - offset += ((state->src_x + state->src_w - 1) / - xdiv) * state->bpp[i]; - state->xstride[i] = -(((state->src_h - 1) / ydiv) * + offset += ((hstate->src_x + hstate->src_w - 1) / + xdiv) * hstate->bpp[i]; + hstate->xstride[i] = -(((hstate->src_h - 1) / ydiv) * fb->pitches[i]) - - (2 * state->bpp[i]); - state->pstride[i] = fb->pitches[i] - state->bpp[i]; + (2 * hstate->bpp[i]); + hstate->pstride[i] = fb->pitches[i] - hstate->bpp[i]; break; case DRM_MODE_ROTATE_180: - offset = ((state->src_y + state->src_h - 1) / + offset = ((hstate->src_y + hstate->src_h - 1) / ydiv) * fb->pitches[i]; - offset += ((state->src_x + state->src_w - 1) / - xdiv) * state->bpp[i]; - state->xstride[i] = ((((state->src_w - 1) / xdiv) - 1) * - state->bpp[i]) - fb->pitches[i]; - state->pstride[i] = -2 * state->bpp[i]; + offset += ((hstate->src_x + hstate->src_w - 1) / + xdiv) * hstate->bpp[i]; + hstate->xstride[i] = ((((hstate->src_w - 1) / xdiv) - 1) * + hstate->bpp[i]) - fb->pitches[i]; + hstate->pstride[i] = -2 * hstate->bpp[i]; break; case DRM_MODE_ROTATE_270: - offset = ((state->src_y + state->src_h - 1) / + offset = ((hstate->src_y + hstate->src_h - 1) / ydiv) * fb->pitches[i]; - offset += (state->src_x / xdiv) * state->bpp[i]; - state->xstride[i] = ((state->src_h - 1) / ydiv) * + offset += (hstate->src_x / xdiv) * hstate->bpp[i]; + hstate->xstride[i] = ((hstate->src_h - 1) / ydiv) * fb->pitches[i]; - state->pstride[i] = -fb->pitches[i] - state->bpp[i]; + hstate->pstride[i] = -fb->pitches[i] - hstate->bpp[i]; break; case DRM_MODE_ROTATE_0: default: - offset = (state->src_y / ydiv) * fb->pitches[i]; - offset += (state->src_x / xdiv) * state->bpp[i]; - state->xstride[i] = fb->pitches[i] - - ((state->src_w / xdiv) * - state->bpp[i]); - state->pstride[i] = 0; + offset = (hstate->src_y / ydiv) * fb->pitches[i]; + offset += (hstate->src_x / xdiv) * hstate->bpp[i]; + hstate->xstride[i] = fb->pitches[i] - + ((hstate->src_w / xdiv) * + hstate->bpp[i]); + hstate->pstride[i] = 0; break; } - state->offsets[i] = offset + fb->offsets[i]; + hstate->offsets[i] = offset + fb->offsets[i]; } /* * Swap width and size in case of 90 or 270 degrees rotation */ - if (drm_rotation_90_or_270(state->base.rotation)) { - swap(state->src_w, state->src_h); + if (drm_rotation_90_or_270(hstate->base.rotation)) { + swap(hstate->src_w, hstate->src_h); } if (!desc->layout.size && - (mode->hdisplay != state->crtc_w || - mode->vdisplay != state->crtc_h)) + (mode->hdisplay != hstate->crtc_w || + mode->vdisplay != hstate->crtc_h)) return -EINVAL; - if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) && + if ((hstate->crtc_h != hstate->src_h || hstate->crtc_w != hstate->src_w) && (!desc->layout.memsize || - state->base.fb->format->has_alpha)) + hstate->base.fb->format->has_alpha)) return -EINVAL; return 0; } static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); @@ -730,27 +731,29 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, } static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p, - struct drm_plane_state *old_s) + struct drm_atomic_state *state) { + struct drm_plane_state *new_s = drm_atomic_get_new_plane_state(state, + p); struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); - struct atmel_hlcdc_plane_state *state = - drm_plane_state_to_atmel_hlcdc_plane_state(p->state); + struct atmel_hlcdc_plane_state *hstate = + drm_plane_state_to_atmel_hlcdc_plane_state(new_s); u32 sr; - if (!p->state->crtc || !p->state->fb) + if (!new_s->crtc || !new_s->fb) return; - if (!state->base.visible) { - atmel_hlcdc_plane_atomic_disable(p, old_s); + if (!hstate->base.visible) { + atmel_hlcdc_plane_atomic_disable(p, state); return; } - atmel_hlcdc_plane_update_pos_and_size(plane, state); - atmel_hlcdc_plane_update_general_settings(plane, state); - atmel_hlcdc_plane_update_format(plane, state); - atmel_hlcdc_plane_update_clut(plane, state); - atmel_hlcdc_plane_update_buffers(plane, state); - atmel_hlcdc_plane_update_disc_area(plane, state); + atmel_hlcdc_plane_update_pos_and_size(plane, hstate); + atmel_hlcdc_plane_update_general_settings(plane, hstate); + atmel_hlcdc_plane_update_format(plane, hstate); + atmel_hlcdc_plane_update_clut(plane, hstate); + atmel_hlcdc_plane_update_buffers(plane, hstate); + atmel_hlcdc_plane_update_disc_area(plane, hstate); /* Enable the overrun interrupts. */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IER, diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index d0c65610ebb5..989a05bc8197 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2457,7 +2457,7 @@ clk_disable: static int cdns_mhdp_remove(struct platform_device *pdev) { - struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev); + struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev); unsigned long timeout = msecs_to_jiffies(100); bool stop_fw = false; int ret; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 0c98d27f84ac..fee27952ec6d 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -14,6 +14,7 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/wait.h> +#include <linux/workqueue.h> #include <sound/hdmi-codec.h> @@ -36,6 +37,7 @@ struct lt9611uxc { struct mutex ocm_lock; struct wait_queue_head wq; + struct work_struct work; struct device_node *dsi0_node; struct device_node *dsi1_node; @@ -52,6 +54,8 @@ struct lt9611uxc { bool hpd_supported; bool edid_read; + /* can be accessed from different threads, so protect this with ocm_lock */ + bool hdmi_connected; uint8_t fw_version; }; @@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id) if (irq_status) regmap_write(lt9611uxc->regmap, 0xb022, 0); - lt9611uxc_unlock(lt9611uxc); - - if (irq_status & BIT(0)) + if (irq_status & BIT(0)) { lt9611uxc->edid_read = !!(hpd_status & BIT(0)); + wake_up_all(<9611uxc->wq); + } if (irq_status & BIT(1)) { - if (lt9611uxc->connector.dev) - drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); - else - drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1))); + lt9611uxc->hdmi_connected = hpd_status & BIT(1); + schedule_work(<9611uxc->work); } + lt9611uxc_unlock(lt9611uxc); + return IRQ_HANDLED; } +static void lt9611uxc_hpd_work(struct work_struct *work) +{ + struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); + bool connected; + + if (lt9611uxc->connector.dev) + drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); + else { + + mutex_lock(<9611uxc->ocm_lock); + connected = lt9611uxc->hdmi_connected; + mutex_unlock(<9611uxc->ocm_lock); + + drm_bridge_hpd_notify(<9611uxc->bridge, + connected ? + connector_status_connected : + connector_status_disconnected); + } +} + static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) { gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); @@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); unsigned int reg_val = 0; int ret; - int connected = 1; + bool connected = true; + + lt9611uxc_lock(lt9611uxc); if (lt9611uxc->hpd_supported) { - lt9611uxc_lock(lt9611uxc); ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val); - lt9611uxc_unlock(lt9611uxc); if (ret) dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret); else connected = reg_val & BIT(1); } + lt9611uxc->hdmi_connected = connected; + + lt9611uxc_unlock(lt9611uxc); return connected ? connector_status_connected : connector_status_disconnected; @@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc) { return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read, - msecs_to_jiffies(100)); + msecs_to_jiffies(500)); } static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) @@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, ret = lt9611uxc_wait_for_edid(lt9611uxc); if (ret < 0) { dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret); - return ERR_PTR(ret); + return NULL; + } else if (ret == 0) { + dev_err(lt9611uxc->dev, "wait for EDID timeout\n"); + return NULL; } return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); @@ -926,6 +956,8 @@ retry: lt9611uxc->fw_version = ret; init_waitqueue_head(<9611uxc->wq); + INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work); + ret = devm_request_threaded_irq(dev, client->irq, NULL, lt9611uxc_irq_thread_handler, IRQF_ONESHOT, "lt9611uxc", lt9611uxc); @@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client) struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); disable_irq(client->irq); + flush_scheduled_work(); lt9611uxc_audio_exit(lt9611uxc); drm_bridge_remove(<9611uxc->bridge); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index dda60051854b..5b4547e0f775 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -53,6 +53,45 @@ void __drm_crtc_commit_free(struct kref *kref) EXPORT_SYMBOL(__drm_crtc_commit_free); /** + * drm_crtc_commit_wait - Waits for a commit to complete + * @commit: &drm_crtc_commit to wait for + * + * Waits for a given &drm_crtc_commit to be programmed into the + * hardware and flipped to. + * + * Returns: + * + * 0 on success, a negative error code otherwise. + */ +int drm_crtc_commit_wait(struct drm_crtc_commit *commit) +{ + unsigned long timeout = 10 * HZ; + int ret; + + if (!commit) + return 0; + + ret = wait_for_completion_timeout(&commit->hw_done, timeout); + if (!ret) { + DRM_ERROR("hw_done timed out\n"); + return -ETIMEDOUT; + } + + /* + * Currently no support for overwriting flips, hence + * stall for previous one to execute completely. + */ + ret = wait_for_completion_timeout(&commit->flip_done, timeout); + if (!ret) { + DRM_ERROR("flip_done timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} +EXPORT_SYMBOL(drm_crtc_commit_wait); + +/** * drm_atomic_state_default_release - * release memory initialized by drm_atomic_state_init * @state: atomic state @@ -578,13 +617,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, ret = drm_plane_check_pixel_format(plane, fb->format->format, fb->modifier); if (ret) { - struct drm_format_name_buf format_name; - - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", plane->base.id, plane->name, - drm_get_format_name(fb->format->format, - &format_name), - fb->modifier); + &fb->format->format, fb->modifier); return ret; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 560aaecba31b..47ced8bf7e36 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -902,7 +902,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_check) continue; - ret = funcs->atomic_check(plane, new_plane_state); + ret = funcs->atomic_check(plane, state); if (ret) { DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", plane->base.id, plane->name); @@ -1742,7 +1742,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev, return -EBUSY; } - return funcs->atomic_async_check(plane, new_plane_state); + return funcs->atomic_async_check(plane, state); } EXPORT_SYMBOL(drm_atomic_helper_async_check); @@ -1772,7 +1772,7 @@ void drm_atomic_helper_async_commit(struct drm_device *dev, struct drm_framebuffer *old_fb = plane->state->fb; funcs = plane->helper_private; - funcs->atomic_async_update(plane, plane_state); + funcs->atomic_async_update(plane, state); /* * ->atomic_async_update() is supposed to update the @@ -2202,70 +2202,27 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) struct drm_plane_state *old_plane_state; struct drm_connector *conn; struct drm_connector_state *old_conn_state; - struct drm_crtc_commit *commit; int i; long ret; for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { - commit = old_crtc_state->commit; - - if (!commit) - continue; - - ret = wait_for_completion_timeout(&commit->hw_done, - 10*HZ); - if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", - crtc->base.id, crtc->name); - - /* Currently no support for overwriting flips, hence - * stall for previous one to execute completely. */ - ret = wait_for_completion_timeout(&commit->flip_done, - 10*HZ); - if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + ret = drm_crtc_commit_wait(old_crtc_state->commit); + if (ret) + DRM_ERROR("[CRTC:%d:%s] commit wait timed out\n", crtc->base.id, crtc->name); } for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { - commit = old_conn_state->commit; - - if (!commit) - continue; - - ret = wait_for_completion_timeout(&commit->hw_done, - 10*HZ); - if (ret == 0) - DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n", - conn->base.id, conn->name); - - /* Currently no support for overwriting flips, hence - * stall for previous one to execute completely. */ - ret = wait_for_completion_timeout(&commit->flip_done, - 10*HZ); - if (ret == 0) - DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n", + ret = drm_crtc_commit_wait(old_conn_state->commit); + if (ret) + DRM_ERROR("[CONNECTOR:%d:%s] commit wait timed out\n", conn->base.id, conn->name); } for_each_old_plane_in_state(old_state, plane, old_plane_state, i) { - commit = old_plane_state->commit; - - if (!commit) - continue; - - ret = wait_for_completion_timeout(&commit->hw_done, - 10*HZ); - if (ret == 0) - DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n", - plane->base.id, plane->name); - - /* Currently no support for overwriting flips, hence - * stall for previous one to execute completely. */ - ret = wait_for_completion_timeout(&commit->flip_done, - 10*HZ); - if (ret == 0) - DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n", + ret = drm_crtc_commit_wait(old_plane_state->commit); + if (ret) + DRM_ERROR("[PLANE:%d:%s] commit wait timed out\n", plane->base.id, plane->name); } } @@ -2571,9 +2528,9 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, no_disable) continue; - funcs->atomic_disable(plane, old_plane_state); + funcs->atomic_disable(plane, old_state); } else if (new_plane_state->crtc || disabling) { - funcs->atomic_update(plane, old_plane_state); + funcs->atomic_update(plane, old_state); } } @@ -2645,10 +2602,10 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) && plane_funcs->atomic_disable) - plane_funcs->atomic_disable(plane, old_plane_state); + plane_funcs->atomic_disable(plane, old_state); else if (new_plane_state->crtc || drm_atomic_plane_disabling(old_plane_state, new_plane_state)) - plane_funcs->atomic_update(plane, old_plane_state); + plane_funcs->atomic_update(plane, old_state); } if (crtc_funcs && crtc_funcs->atomic_flush) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index b7e9e1c2564c..ced09c7c06f9 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -7,6 +7,7 @@ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> */ +#include "drm/drm_modeset_lock.h" #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -1181,9 +1182,11 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp struct drm_device *dev = client->dev; struct drm_connector *connector; struct drm_mode_set *modeset; + struct drm_modeset_acquire_ctx ctx; int j; + int ret; - drm_modeset_lock_all(dev); + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); drm_client_for_each_modeset(modeset, client) { if (!modeset->crtc->enabled) continue; @@ -1195,7 +1198,7 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp dev->mode_config.dpms_property, dpms_mode); } } - drm_modeset_unlock_all(dev); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); } /** diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9c4f9947b194..26a77a735905 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -735,11 +735,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, fb->format->format, fb->modifier); if (ret) { - struct drm_format_name_buf format_name; - - DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n", - drm_get_format_name(fb->format->format, - &format_name), + DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n", + &fb->format->format, fb->modifier); goto out; } diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h index 25ce42e79995..61e09f8a8d0f 100644 --- a/drivers/gpu/drm/drm_crtc_helper_internal.h +++ b/drivers/gpu/drm/drm_crtc_helper_internal.h @@ -32,16 +32,6 @@ #include <drm/drm_encoder.h> #include <drm/drm_modes.h> -/* drm_fb_helper.c */ -#ifdef CONFIG_DRM_FBDEV_EMULATION -int drm_fb_helper_modinit(void); -#else -static inline int drm_fb_helper_modinit(void) -{ - return 0; -} -#endif - /* drm_dp_aux_dev.c */ #ifdef CONFIG_DRM_DP_AUX_CHARDEV int drm_dp_aux_dev_init(void); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e82b596d646c..42a0c6888c33 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE; drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; } static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, @@ -2302,11 +2303,10 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, } if (port->pdt != DP_PEER_DEVICE_NONE && - drm_dp_mst_is_end_device(port->pdt, port->mcs)) { + drm_dp_mst_is_end_device(port->pdt, port->mcs) && + port->port_num >= DP_MST_LOGICAL_PORT_0) port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); - drm_connector_set_tile_property(port->connector); - } drm_connector_register(port->connector); return; @@ -2823,15 +2823,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, req_type = txmsg->msg[0] & 0x7f; if (req_type == DP_CONNECTION_STATUS_NOTIFY || - req_type == DP_RESOURCE_STATUS_NOTIFY) + req_type == DP_RESOURCE_STATUS_NOTIFY || + req_type == DP_CLEAR_PAYLOAD_ID_TABLE) hdr->broadcast = 1; else hdr->broadcast = 0; hdr->path_msg = txmsg->path_msg; - hdr->lct = mstb->lct; - hdr->lcr = mstb->lct - 1; - if (mstb->lct > 1) - memcpy(hdr->rad, mstb->rad, mstb->lct / 2); + if (hdr->broadcast) { + hdr->lct = 1; + hdr->lcr = 6; + } else { + hdr->lct = mstb->lct; + hdr->lcr = mstb->lct - 1; + } + + memcpy(hdr->rad, mstb->rad, hdr->lct / 2); return 0; } @@ -4224,6 +4230,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector, switch (port->pdt) { case DP_PEER_DEVICE_NONE: + break; case DP_PEER_DEVICE_MST_BRANCHING: if (!port->mcs) ret = connector_status_connected; @@ -4232,9 +4239,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, case DP_PEER_DEVICE_SST_SINK: ret = connector_status_connected; /* for logical ports - cache the EDID */ - if (port->port_num >= 8 && !port->cached_edid) { + if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid) port->cached_edid = drm_get_edid(connector, &port->aux.ddc); - } break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) @@ -5119,11 +5125,16 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, if (!found) return 0; - /* This should never happen, as it means we tried to - * set a mode before querying the full_pbn + /* + * This could happen if the sink deasserted its HPD line, but + * the branch device still reports it as attached (PDT != NONE). */ - if (WARN_ON(!port->full_pbn)) + if (!port->full_pbn) { + drm_dbg_atomic(port->mgr->dev, + "[MSTB:%p] [MST PORT:%p] no BW available for the port\n", + port->parent, port); return -EINVAL; + } pbn_used = vcpi->pbn; } else { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 20d22e41d7ce..c2f78dee9f2d 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -61,7 +61,7 @@ static struct idr drm_minors_idr; * prefer to embed struct drm_device into their own device * structure and call drm_dev_init() themselves. */ -static bool drm_core_init_complete = false; +static bool drm_core_init_complete; static struct dentry *drm_debugfs_root; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a44c3a438059..f6baa2046124 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2048,7 +2048,7 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) if (shadow) vfree(shadow); - else + else if (fb_helper->buffer) drm_client_buffer_vunmap(fb_helper->buffer); drm_client_framebuffer_delete(fb_helper->buffer); @@ -2514,24 +2514,3 @@ void drm_fbdev_generic_setup(struct drm_device *dev, drm_client_register(&fb_helper->client); } EXPORT_SYMBOL(drm_fbdev_generic_setup); - -/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) - * but the module doesn't depend on any fb console symbols. At least - * attempt to load fbcon to avoid leaving the system without a usable console. - */ -int __init drm_fb_helper_modinit(void) -{ -#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT) - const char name[] = "fbcon"; - struct module *fbcon; - - mutex_lock(&module_mutex); - fbcon = find_module(name); - mutex_unlock(&module_mutex); - - if (!fbcon) - request_module_nowait(name); -#endif - return 0; -} -EXPORT_SYMBOL(drm_fb_helper_modinit); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 6b116bfd747c..7efbccffc2ea 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -775,20 +775,19 @@ void drm_event_cancel_free(struct drm_device *dev, EXPORT_SYMBOL(drm_event_cancel_free); /** - * drm_send_event_locked - send DRM event to file descriptor + * drm_send_event_helper - send DRM event to file descriptor * @dev: DRM device * @e: DRM event to deliver + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC + * time domain * - * This function sends the event @e, initialized with drm_event_reserve_init(), - * to its associated userspace DRM file. Callers must already hold - * &drm_device.event_lock, see drm_send_event() for the unlocked version. - * - * Note that the core will take care of unlinking and disarming events when the - * corresponding DRM file is closed. Drivers need not worry about whether the - * DRM file for this event still exists and can call this function upon - * completion of the asynchronous work unconditionally. + * This helper function sends the event @e, initialized with + * drm_event_reserve_init(), to its associated userspace DRM file. + * The timestamp variant of dma_fence_signal is used when the caller + * sends a valid timestamp. */ -void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +void drm_send_event_helper(struct drm_device *dev, + struct drm_pending_event *e, ktime_t timestamp) { assert_spin_locked(&dev->event_lock); @@ -799,7 +798,10 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) } if (e->fence) { - dma_fence_signal(e->fence); + if (timestamp) + dma_fence_signal_timestamp(e->fence, timestamp); + else + dma_fence_signal(e->fence); dma_fence_put(e->fence); } @@ -814,6 +816,48 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) wake_up_interruptible_poll(&e->file_priv->event_wait, EPOLLIN | EPOLLRDNORM); } + +/** + * drm_send_event_timestamp_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC + * time domain + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * &drm_device.event_lock. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_timestamp_locked(struct drm_device *dev, + struct drm_pending_event *e, ktime_t timestamp) +{ + drm_send_event_helper(dev, e, timestamp); +} +EXPORT_SYMBOL(drm_send_event_timestamp_locked); + +/** + * drm_send_event_locked - send DRM event to file descriptor + * @dev: DRM device + * @e: DRM event to deliver + * + * This function sends the event @e, initialized with drm_event_reserve_init(), + * to its associated userspace DRM file. Callers must already hold + * &drm_device.event_lock, see drm_send_event() for the unlocked version. + * + * Note that the core will take care of unlinking and disarming events when the + * corresponding DRM file is closed. Drivers need not worry about whether the + * DRM file for this event still exists and can call this function upon + * completion of the asynchronous work unconditionally. + */ +void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) +{ + drm_send_event_helper(dev, e, 0); +} EXPORT_SYMBOL(drm_send_event_locked); /** @@ -836,7 +880,7 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e) unsigned long irqflags; spin_lock_irqsave(&dev->event_lock, irqflags); - drm_send_event_locked(dev, e); + drm_send_event_helper(dev, e, 0); spin_unlock_irqrestore(&dev->event_lock, irqflags); } EXPORT_SYMBOL(drm_send_event); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index aca62ed51e82..4d01464b6f95 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -177,11 +177,8 @@ static int framebuffer_check(struct drm_device *dev, /* check if the format is supported at all */ if (!__drm_format_info(r->pixel_format)) { - struct drm_format_name_buf format_name; - - DRM_DEBUG_KMS("bad framebuffer format %s\n", - drm_get_format_name(r->pixel_format, - &format_name)); + DRM_DEBUG_KMS("bad framebuffer format %p4cc\n", + &r->pixel_format); return -EINVAL; } @@ -1160,14 +1157,12 @@ EXPORT_SYMBOL(drm_framebuffer_plane_height); void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, const struct drm_framebuffer *fb) { - struct drm_format_name_buf format_name; unsigned int i; drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm); drm_printf_indent(p, indent, "refcount=%u\n", drm_framebuffer_read_refcount(fb)); - drm_printf_indent(p, indent, "format=%s\n", - drm_get_format_name(fb->format->format, &format_name)); + drm_printf_indent(p, indent, "format=%p4cc\n", &fb->format->format); drm_printf_indent(p, indent, "modifier=0x%llx\n", fb->modifier); drm_printf_indent(p, indent, "size=%ux%u\n", fb->width, fb->height); drm_printf_indent(p, indent, "layers:\n"); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index c2ce78c4edc3..9989425e9875 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1212,6 +1212,7 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) return 0; } +EXPORT_SYMBOL(drm_gem_vmap); void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { @@ -1224,6 +1225,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) /* Always set the mapping to NULL. Callers may rely on this. */ dma_buf_map_clear(map); } +EXPORT_SYMBOL(drm_gem_vunmap); /** * drm_gem_lock_reservations - Sets up the ww context and acquires diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c new file mode 100644 index 000000000000..a005c5a0ba46 --- /dev/null +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/dma-resv.h> + +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_atomic_uapi.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "drm_internal.h" + +/** + * DOC: overview + * + * The GEM atomic helpers library implements generic atomic-commit + * functions for drivers that use GEM objects. Currently, it provides + * synchronization helpers, and plane state and framebuffer BO mappings + * for planes with shadow buffers. + * + * Before scanout, a plane's framebuffer needs to be synchronized with + * possible writers that draw into the framebuffer. All drivers should + * call drm_gem_plane_helper_prepare_fb() from their implementation of + * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from + * the framebuffer so that the DRM core can synchronize access automatically. + * + * drm_gem_plane_helper_prepare_fb() can also be used directly as + * implementation of prepare_fb. For drivers based on + * struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb() + * provides equivalent functionality. + * + * .. code-block:: c + * + * #include <drm/drm_gem_atomic_helper.h> + * + * struct drm_plane_helper_funcs driver_plane_helper_funcs = { + * ..., + * . prepare_fb = drm_gem_plane_helper_prepare_fb, + * }; + * + * struct drm_simple_display_pipe_funcs driver_pipe_funcs = { + * ..., + * . prepare_fb = drm_gem_simple_display_pipe_prepare_fb, + * }; + * + * A driver using a shadow buffer copies the content of the shadow buffers + * into the HW's framebuffer memory during an atomic update. This requires + * a mapping of the shadow buffer into kernel address space. The mappings + * cannot be established by commit-tail functions, such as atomic_update, + * as this would violate locking rules around dma_buf_vmap(). + * + * The helpers for shadow-buffered planes establish and release mappings, + * and provide struct drm_shadow_plane_state, which stores the plane's mapping + * for commit-tail functons. + * + * Shadow-buffered planes can easily be enabled by using the provided macros + * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS. + * These macros set up the plane and plane-helper callbacks to point to the + * shadow-buffer helpers. + * + * .. code-block:: c + * + * #include <drm/drm_gem_atomic_helper.h> + * + * struct drm_plane_funcs driver_plane_funcs = { + * ..., + * DRM_GEM_SHADOW_PLANE_FUNCS, + * }; + * + * struct drm_plane_helper_funcs driver_plane_helper_funcs = { + * ..., + * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + * }; + * + * In the driver's atomic-update function, shadow-buffer mappings are available + * from the plane state. Use to_drm_shadow_plane_state() to upcast from + * struct drm_plane_state. + * + * .. code-block:: c + * + * void driver_plane_atomic_update(struct drm_plane *plane, + * struct drm_plane_state *old_plane_state) + * { + * struct drm_plane_state *plane_state = plane->state; + * struct drm_shadow_plane_state *shadow_plane_state = + * to_drm_shadow_plane_state(plane_state); + * + * // access shadow buffer via shadow_plane_state->map + * } + * + * A mapping address for each of the framebuffer's buffer object is stored in + * struct &drm_shadow_plane_state.map. The mappings are valid while the state + * is being used. + * + * Drivers that use struct drm_simple_display_pipe can use + * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp + * callbacks. Access to shadow-buffer mappings is similar to regular + * atomic_update. + * + * .. code-block:: c + * + * struct drm_simple_display_pipe_funcs driver_pipe_funcs = { + * ..., + * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, + * }; + * + * void driver_pipe_enable(struct drm_simple_display_pipe *pipe, + * struct drm_crtc_state *crtc_state, + * struct drm_plane_state *plane_state) + * { + * struct drm_shadow_plane_state *shadow_plane_state = + * to_drm_shadow_plane_state(plane_state); + * + * // access shadow buffer via shadow_plane_state->map + * } + */ + +/* + * Plane Helpers + */ + +/** + * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer + * @plane: Plane + * @state: Plane state the fence will be attached to + * + * This function extracts the exclusive fence from &drm_gem_object.resv and + * attaches it to plane state for the atomic helper to wait on. This is + * necessary to correctly implement implicit synchronization for any buffers + * shared as a struct &dma_buf. This function can be used as the + * &drm_plane_helper_funcs.prepare_fb callback. + * + * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple + * GEM based framebuffer drivers which have their buffers always pinned in + * memory. + * + * See drm_atomic_set_fence_for_plane() for a discussion of implicit and + * explicit fencing in atomic modeset updates. + */ +int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct drm_gem_object *obj; + struct dma_fence *fence; + + if (!state->fb) + return 0; + + obj = drm_gem_fb_get_obj(state->fb, 0); + fence = dma_resv_get_excl_rcu(obj->resv); + drm_atomic_set_fence_for_plane(state, fence); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb); + +/** + * drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe + * @pipe: Simple display pipe + * @plane_state: Plane state + * + * This function uses drm_gem_plane_helper_prepare_fb() to extract the exclusive fence + * from &drm_gem_object.resv and attaches it to plane state for the atomic + * helper to wait on. This is necessary to correctly implement implicit + * synchronization for any buffers shared as a struct &dma_buf. Drivers can use + * this as their &drm_simple_display_pipe_funcs.prepare_fb callback. + * + * See drm_atomic_set_fence_for_plane() for a discussion of implicit and + * explicit fencing in atomic modeset updates. + */ +int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) +{ + return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state); +} +EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb); + +/* + * Shadow-buffered Planes + */ + +/** + * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state + * @plane: the plane + * + * This function implements struct &drm_plane_funcs.atomic_duplicate_state for + * shadow-buffered planes. It assumes the existing state to be of type + * struct drm_shadow_plane_state and it allocates the new state to be of this + * type. + * + * The function does not duplicate existing mappings of the shadow buffers. + * Mappings are maintained during the atomic commit by the plane's prepare_fb + * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() + * for corresponding helpers. + * + * Returns: + * A pointer to a new plane state on success, or NULL otherwise. + */ +struct drm_plane_state * +drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane) +{ + struct drm_plane_state *plane_state = plane->state; + struct drm_shadow_plane_state *new_shadow_plane_state; + + if (!plane_state) + return NULL; + + new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL); + if (!new_shadow_plane_state) + return NULL; + __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); + + return &new_shadow_plane_state->base; +} +EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); + +/** + * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state + * @plane: the plane + * @plane_state: the plane state of type struct drm_shadow_plane_state + * + * This function implements struct &drm_plane_funcs.atomic_destroy_state + * for shadow-buffered planes. It expects that mappings of shadow buffers + * have been released already. + */ +void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(plane_state); + + __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); + kfree(shadow_plane_state); +} +EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); + +/** + * drm_gem_reset_shadow_plane - resets a shadow-buffered plane + * @plane: the plane + * + * This function implements struct &drm_plane_funcs.reset_plane for + * shadow-buffered planes. It assumes the current plane state to be + * of type struct drm_shadow_plane and it allocates the new state of + * this type. + */ +void drm_gem_reset_shadow_plane(struct drm_plane *plane) +{ + struct drm_shadow_plane_state *shadow_plane_state; + + if (plane->state) { + drm_gem_destroy_shadow_plane_state(plane, plane->state); + plane->state = NULL; /* must be set to NULL here */ + } + + shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); + if (!shadow_plane_state) + return; + __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); +} +EXPORT_SYMBOL(drm_gem_reset_shadow_plane); + +/** + * drm_gem_prepare_shadow_fb - prepares shadow framebuffers + * @plane: the plane + * @plane_state: the plane state of type struct drm_shadow_plane_state + * + * This function implements struct &drm_plane_helper_funcs.prepare_fb. It + * maps all buffer objects of the plane's framebuffer into kernel address + * space and stores them in &struct drm_shadow_plane_state.map. The + * framebuffer will be synchronized as part of the atomic commit. + * + * See drm_gem_cleanup_shadow_fb() for cleanup. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) +{ + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_gem_object *obj; + struct dma_buf_map map; + int ret; + size_t i; + + if (!fb) + return 0; + + ret = drm_gem_plane_helper_prepare_fb(plane, plane_state); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(shadow_plane_state->map); ++i) { + obj = drm_gem_fb_get_obj(fb, i); + if (!obj) + continue; + ret = drm_gem_vmap(obj, &map); + if (ret) + goto err_drm_gem_vunmap; + shadow_plane_state->map[i] = map; + } + + return 0; + +err_drm_gem_vunmap: + while (i) { + --i; + obj = drm_gem_fb_get_obj(fb, i); + if (!obj) + continue; + drm_gem_vunmap(obj, &shadow_plane_state->map[i]); + } + return ret; +} +EXPORT_SYMBOL(drm_gem_prepare_shadow_fb); + +/** + * drm_gem_cleanup_shadow_fb - releases shadow framebuffers + * @plane: the plane + * @plane_state: the plane state of type struct drm_shadow_plane_state + * + * This function implements struct &drm_plane_helper_funcs.cleanup_fb. + * This function unmaps all buffer objects of the plane's framebuffer. + * + * See drm_gem_prepare_shadow_fb() for more inforamtion. + */ +void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) +{ + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + size_t i = ARRAY_SIZE(shadow_plane_state->map); + struct drm_gem_object *obj; + + if (!fb) + return; + + while (i) { + --i; + obj = drm_gem_fb_get_obj(fb, i); + if (!obj) + continue; + drm_gem_vunmap(obj, &shadow_plane_state->map[i]); + } +} +EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb); + +/** + * drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers + * @pipe: the simple display pipe + * @plane_state: the plane state of type struct drm_shadow_plane_state + * + * This function implements struct drm_simple_display_funcs.prepare_fb. It + * maps all buffer objects of the plane's framebuffer into kernel address + * space and stores them in struct drm_shadow_plane_state.map. The + * framebuffer will be synchronized as part of the atomic commit. + * + * See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) +{ + return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state); +} +EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb); + +/** + * drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers + * @pipe: the simple display pipe + * @plane_state: the plane state of type struct drm_shadow_plane_state + * + * This function implements struct drm_simple_display_funcs.cleanup_fb. + * This function unmaps all buffer objects of the plane's framebuffer. + * + * See drm_gem_simple_kms_prepare_shadow_fb(). + */ +void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) +{ + drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state); +} +EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb); + +/** + * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane + * @pipe: the simple display pipe + * + * This function implements struct drm_simple_display_funcs.reset_plane + * for shadow-buffered planes. + */ +void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe) +{ + drm_gem_reset_shadow_plane(&pipe->plane); +} +EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane); + +/** + * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state + * @pipe: the simple display pipe + * + * This function implements struct drm_simple_display_funcs.duplicate_plane_state + * for shadow-buffered planes. It does not duplicate existing mappings of the shadow + * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb + * and cleanup_fb helpers. + * + * Returns: + * A pointer to a new plane state on success, or NULL otherwise. + */ +struct drm_plane_state * +drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe) +{ + return drm_gem_duplicate_shadow_plane_state(&pipe->plane); +} +EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state); + +/** + * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state + * @pipe: the simple display pipe + * @plane_state: the plane state of type struct drm_shadow_plane_state + * + * This function implements struct drm_simple_display_funcs.destroy_plane_state + * for shadow-buffered planes. It expects that mappings of shadow buffers + * have been released already. + */ +void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) +{ + drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state); +} +EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 109d11fb4cd4..5ed2067cebb6 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -5,13 +5,8 @@ * Copyright (C) 2017 Noralf Trønnes */ -#include <linux/dma-buf.h> -#include <linux/dma-fence.h> -#include <linux/dma-resv.h> #include <linux/slab.h> -#include <drm/drm_atomic.h> -#include <drm/drm_atomic_uapi.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> @@ -19,7 +14,6 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> -#include <drm/drm_simple_kms_helper.h> #define AFBC_HEADER_SIZE 16 #define AFBC_TH_LAYOUT_ALIGNMENT 8 @@ -432,60 +426,3 @@ int drm_gem_fb_afbc_init(struct drm_device *dev, return 0; } EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init); - -/** - * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer - * @plane: Plane - * @state: Plane state the fence will be attached to - * - * This function extracts the exclusive fence from &drm_gem_object.resv and - * attaches it to plane state for the atomic helper to wait on. This is - * necessary to correctly implement implicit synchronization for any buffers - * shared as a struct &dma_buf. This function can be used as the - * &drm_plane_helper_funcs.prepare_fb callback. - * - * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple - * gem based framebuffer drivers which have their buffers always pinned in - * memory. - * - * See drm_atomic_set_fence_for_plane() for a discussion of implicit and - * explicit fencing in atomic modeset updates. - */ -int drm_gem_fb_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct drm_gem_object *obj; - struct dma_fence *fence; - - if (!state->fb) - return 0; - - obj = drm_gem_fb_get_obj(state->fb, 0); - fence = dma_resv_get_excl_rcu(obj->resv); - drm_atomic_set_fence_for_plane(state, fence); - - return 0; -} -EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb); - -/** - * drm_gem_fb_simple_display_pipe_prepare_fb - prepare_fb helper for - * &drm_simple_display_pipe - * @pipe: Simple display pipe - * @plane_state: Plane state - * - * This function uses drm_gem_fb_prepare_fb() to extract the exclusive fence - * from &drm_gem_object.resv and attaches it to plane state for the atomic - * helper to wait on. This is necessary to correctly implement implicit - * synchronization for any buffers shared as a struct &dma_buf. Drivers can use - * this as their &drm_simple_display_pipe_funcs.prepare_fb callback. - * - * See drm_atomic_set_fence_for_plane() for a discussion of implicit and - * explicit fencing in atomic modeset updates. - */ -int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state) -{ - return drm_gem_fb_prepare_fb(&pipe->plane, plane_state); -} -EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 9825c378dfa6..6d625cee7a6a 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -357,13 +357,14 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, if (--shmem->vmap_use_count > 0) return; - if (obj->import_attach) + if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); - else + } else { vunmap(shmem->vaddr); + drm_gem_shmem_put_pages(shmem); + } shmem->vaddr = NULL; - drm_gem_shmem_put_pages(shmem); } /* @@ -525,14 +526,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); loff_t num_pages = obj->size >> PAGE_SHIFT; + vm_fault_t ret; struct page *page; + pgoff_t page_offset; - if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages)) - return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - page = shmem->pages[vmf->pgoff]; + mutex_lock(&shmem->pages_lock); - return vmf_insert_page(vma, vmf->address, page); + if (page_offset >= num_pages || + WARN_ON_ONCE(!shmem->pages) || + shmem->madv < 0) { + ret = VM_FAULT_SIGBUS; + } else { + page = shmem->pages[page_offset]; + + ret = vmf_insert_page(vma, vmf->address, page); + } + + mutex_unlock(&shmem->pages_lock); + + return ret; } static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) @@ -581,9 +596,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct drm_gem_shmem_object *shmem; int ret; - /* Remove the fake offset */ - vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); - if (obj->import_attach) { /* Drop the reference drm_gem_mmap_obj() acquired.*/ drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 0b232a73c1b7..2b7c3a07956d 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -8,7 +8,7 @@ #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_framebuffer.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_managed.h> @@ -187,9 +187,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, struct drm_gem_vram_object *gbo; struct drm_gem_object *gem; struct drm_vram_mm *vmm = dev->vram_mm; - struct ttm_bo_device *bdev; + struct ttm_device *bdev; int ret; - size_t acc_size; if (WARN_ONCE(!vmm, "VRAM MM not initialized")) return ERR_PTR(-EINVAL); @@ -216,7 +215,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, } bdev = &vmm->bdev; - acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); gbo->bo.bdev = bdev; drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); @@ -226,8 +224,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, * to release gbo->bo.base and kfree gbo. */ ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, - &gbo->placement, pg_align, false, acc_size, - NULL, NULL, ttm_buffer_object_destroy); + &gbo->placement, pg_align, false, NULL, NULL, + ttm_buffer_object_destroy); if (ret) return ERR_PTR(ret); @@ -558,7 +556,7 @@ err_drm_gem_object_put: EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); /* - * Helpers for struct ttm_bo_driver + * Helpers for struct ttm_device_funcs */ static bool drm_is_gem_vram(struct ttm_buffer_object *bo) @@ -573,9 +571,7 @@ static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, *pl = gbo->placement; } -static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, - bool evict, - struct ttm_resource *new_mem) +static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo) { struct ttm_buffer_object *bo = &gbo->bo; struct drm_device *dev = bo->base.dev; @@ -592,16 +588,8 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - int ret; - - drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); - ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); - if (ret) { - swap(*new_mem, gbo->bo.mem); - drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem); - swap(*new_mem, gbo->bo.mem); - } - return ret; + drm_gem_vram_bo_driver_move_notify(gbo); + return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); } /* @@ -720,7 +708,7 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, goto err_drm_gem_vram_unpin; } - ret = drm_gem_fb_prepare_fb(plane, new_state); + ret = drm_gem_plane_helper_prepare_fb(plane, new_state); if (ret) goto err_drm_gem_vram_unpin; @@ -901,7 +889,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { * TTM TT */ -static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt) +static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt) { ttm_tt_destroy_common(bdev, tt); ttm_tt_fini(tt); @@ -957,7 +945,7 @@ static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo) gbo = drm_gem_vram_of_bo(bo); - drm_gem_vram_bo_driver_move_notify(gbo, false, NULL); + drm_gem_vram_bo_driver_move_notify(gbo); } static int bo_driver_move(struct ttm_buffer_object *bo, @@ -973,7 +961,7 @@ static int bo_driver_move(struct ttm_buffer_object *bo, return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem); } -static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, +static int bo_driver_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); @@ -993,7 +981,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, return 0; } -static struct ttm_bo_driver bo_driver = { +static struct ttm_device_funcs bo_driver = { .ttm_tt_create = bo_driver_ttm_tt_create, .ttm_tt_destroy = bo_driver_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, @@ -1044,7 +1032,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, vmm->vram_base = vram_base; vmm->vram_size = vram_size; - ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev, + ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, false, true); @@ -1062,7 +1050,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) { ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM); - ttm_bo_device_release(&vmm->bdev); + ttm_device_fini(&vmm->bdev); } /* diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index f86448ab1fe0..33390f02f5eb 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd, if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) return -EFAULT; + memset(&v, 0, sizeof(v)); + v = (struct drm_version) { .name_len = v32.name_len, .name = compat_ptr(v32.name), @@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; + + memset(&uq, 0, sizeof(uq)); + uq = (struct drm_unique){ .unique_len = uq32.unique_len, .unique = compat_ptr(uq32.unique), @@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, if (copy_from_user(&c32, argp, sizeof(c32))) return -EFAULT; + memset(&client, 0, sizeof(client)); + client.idx = c32.idx; err = drm_ioctl_kernel(file, drm_getclient, &client, 0); @@ -295,12 +302,8 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, unsigned long arg) { drm_stats32_t __user *argp = (void __user *)arg; - int err; - - err = drm_ioctl_kernel(file, drm_noop, NULL, 0); - if (err) - return err; + /* getstats is defunct, just clear */ if (clear_user(argp, sizeof(drm_stats32_t))) return -EFAULT; return 0; @@ -813,13 +816,8 @@ typedef struct drm_update_draw32 { static int compat_drm_update_draw(struct file *file, unsigned int cmd, unsigned long arg) { - drm_update_draw32_t update32; - - if (copy_from_user(&update32, (void __user *)arg, sizeof(update32))) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_noop, NULL, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); + /* update_draw is defunct */ + return 0; } #endif @@ -852,6 +850,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; + memset(&req, 0, sizeof(req)); + req.request.type = req32.request.type; req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; @@ -889,6 +889,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, struct drm_mode_fb_cmd2 req64; int err; + memset(&req64, 0, sizeof(req64)); + if (copy_from_user(&req64, argp, offsetof(drm_mode_fb_cmd232_t, modifier))) return -EFAULT; diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index 221a8528c993..f933da1656eb 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -64,19 +64,18 @@ MODULE_PARM_DESC(edid_firmware, static int __init drm_kms_helper_init(void) { - int ret; - - /* Call init functions from specific kms helpers here */ - ret = drm_fb_helper_modinit(); - if (ret < 0) - goto out; - - ret = drm_dp_aux_dev_init(); - if (ret < 0) - goto out; - -out: - return ret; + /* + * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) + * but the module doesn't depend on any fb console symbols. At least + * attempt to load fbcon to avoid leaving the system without a usable + * console. + */ + if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) && + IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) && + !IS_ENABLED(CONFIG_EXPERT)) + request_module_nowait("fbcon"); + + return drm_dp_aux_dev_init(); } static void __exit drm_kms_helper_exit(void) diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 230c4fd7131c..43a9b739bba7 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -203,7 +203,6 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); struct dma_buf_attachment *import_attach = gem->import_attach; - struct drm_format_name_buf format_name; void *src = cma_obj->vaddr; int ret = 0; @@ -225,8 +224,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap); break; default: - drm_err_once(fb->dev, "Format is not supported: %s\n", - drm_get_format_name(fb->format->format, &format_name)); + drm_err_once(fb->dev, "Format is not supported: %p4cc\n", + &fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 58f5dc2f6dd5..f6bdec7fa925 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data onegx1_pro = { + .width = 1200, + .height = 1920, + .bios_dates = (const char * const []){ "12/17/2020", NULL }, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = { .width = 720, .height = 1280, @@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* OneGX1 Pro */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"), + }, + .driver_data = (void *)&onegx1_pro, }, { /* VIOS LTH17 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 338650abd267..0dd43882fe7c 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -50,10 +50,8 @@ * &struct drm_plane (possibly as part of a larger structure) and registers it * with a call to drm_universal_plane_init(). * - * The type of a plane is exposed in the immutable "type" enumeration property, - * which has one of the following values: "Overlay", "Primary", "Cursor" (see - * enum drm_plane_type). A plane can be compatible with multiple CRTCs, see - * &drm_plane.possible_crtcs. + * Each plane has a type, see enum drm_plane_type. A plane can be compatible + * with multiple CRTCs, see &drm_plane.possible_crtcs. * * Each CRTC must have a unique primary plane userspace can attach to enable * the CRTC. In other words, userspace must be able to attach a different @@ -73,6 +71,58 @@ * * DRM planes have a few standardized properties: * + * type: + * Immutable property describing the type of the plane. + * + * For user-space which has enabled the &DRM_CLIENT_CAP_ATOMIC capability, + * the plane type is just a hint and is mostly superseded by atomic + * test-only commits. The type hint can still be used to come up more + * easily with a plane configuration accepted by the driver. + * + * The value of this property can be one of the following: + * + * "Primary": + * To light up a CRTC, attaching a primary plane is the most likely to + * work if it covers the whole CRTC and doesn't have scaling or + * cropping set up. + * + * Drivers may support more features for the primary plane, user-space + * can find out with test-only atomic commits. + * + * Some primary planes are implicitly used by the kernel in the legacy + * IOCTLs &DRM_IOCTL_MODE_SETCRTC and &DRM_IOCTL_MODE_PAGE_FLIP. + * Therefore user-space must not mix explicit usage of any primary + * plane (e.g. through an atomic commit) with these legacy IOCTLs. + * + * "Cursor": + * To enable this plane, using a framebuffer configured without scaling + * or cropping and with the following properties is the most likely to + * work: + * + * - If the driver provides the capabilities &DRM_CAP_CURSOR_WIDTH and + * &DRM_CAP_CURSOR_HEIGHT, create the framebuffer with this size. + * Otherwise, create a framebuffer with the size 64x64. + * - If the driver doesn't support modifiers, create a framebuffer with + * a linear layout. Otherwise, use the IN_FORMATS plane property. + * + * Drivers may support more features for the cursor plane, user-space + * can find out with test-only atomic commits. + * + * Some cursor planes are implicitly used by the kernel in the legacy + * IOCTLs &DRM_IOCTL_MODE_CURSOR and &DRM_IOCTL_MODE_CURSOR2. + * Therefore user-space must not mix explicit usage of any cursor + * plane (e.g. through an atomic commit) with these legacy IOCTLs. + * + * Some drivers may support cursors even if no cursor plane is exposed. + * In this case, the legacy cursor IOCTLs can be used to configure the + * cursor. + * + * "Overlay": + * Neither primary nor cursor. + * + * Overlay planes are the only planes exposed when the + * &DRM_CLIENT_CAP_UNIVERSAL_PLANES capability is disabled. + * * IN_FORMATS: * Blob property which contains the set of buffer format and modifier * pairs supported by this plane. The blob is a struct @@ -719,12 +769,8 @@ static int __setplane_check(struct drm_plane *plane, ret = drm_plane_check_pixel_format(plane, fb->format->format, fb->modifier); if (ret) { - struct drm_format_name_buf format_name; - - DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n", - drm_get_format_name(fb->format->format, - &format_name), - fb->modifier); + DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n", + &fb->format->format, fb->modifier); return ret; } diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 6ce8f5cd1eb5..0b095a313c44 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -177,14 +177,16 @@ static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { }; static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *plane_state) + struct drm_atomic_state *state) { + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_simple_display_pipe *pipe; struct drm_crtc_state *crtc_state; int ret; pipe = container_of(plane, struct drm_simple_display_pipe, plane); - crtc_state = drm_atomic_get_new_crtc_state(plane_state->state, + crtc_state = drm_atomic_get_new_crtc_state(state, &pipe->crtc); ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state, @@ -204,8 +206,10 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, } static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_pstate) + struct drm_atomic_state *state) { + struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state, + plane); struct drm_simple_display_pipe *pipe; pipe = container_of(plane, struct drm_simple_display_pipe, plane); @@ -253,13 +257,47 @@ static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = { .atomic_update = drm_simple_kms_plane_atomic_update, }; +static void drm_simple_kms_plane_reset(struct drm_plane *plane) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->reset_plane) + return drm_atomic_helper_plane_reset(plane); + + return pipe->funcs->reset_plane(pipe); +} + +static struct drm_plane_state *drm_simple_kms_plane_duplicate_state(struct drm_plane *plane) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->duplicate_plane_state) + return drm_atomic_helper_plane_duplicate_state(plane); + + return pipe->funcs->duplicate_plane_state(pipe); +} + +static void drm_simple_kms_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->destroy_plane_state) + drm_atomic_helper_plane_destroy_state(plane, state); + else + pipe->funcs->destroy_plane_state(pipe, state); +} + static const struct drm_plane_funcs drm_simple_kms_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .reset = drm_simple_kms_plane_reset, + .atomic_duplicate_state = drm_simple_kms_plane_duplicate_state, + .atomic_destroy_state = drm_simple_kms_plane_destroy_state, .format_mod_supported = drm_simple_kms_format_mod_supported, }; diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 349146049849..6231a8214c25 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -387,6 +387,15 @@ int drm_syncobj_find_fence(struct drm_file *file_private, if (!syncobj) return -ENOENT; + /* Waiting for userspace with locks help is illegal cause that can + * trivial deadlock with page faults for example. Make lockdep complain + * about it early on. + */ + if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + might_sleep(); + lockdep_assert_none_held_once(); + } + *fence = drm_syncobj_fence_get(syncobj); if (*fence) { @@ -942,6 +951,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, uint64_t *points; uint32_t signaled_count, i; + if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) + lockdep_assert_none_held_once(); + points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); if (points == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 30912d8f82a5..2bd989688eae 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -1006,7 +1006,14 @@ static void send_vblank_event(struct drm_device *dev, break; } trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq); - drm_send_event_locked(dev, &e->base); + /* + * Use the same timestamp for any associated fence signal to avoid + * mismatch in timestamps for vsync & fence events triggered by the + * same HW event. Frameworks like SurfaceFlinger in Android expects the + * retire-fence timestamp to match exactly with HW vsync as it uses it + * for its software vsync modeling. + */ + drm_send_event_timestamp_locked(dev, &e->base, now); } /** @@ -1463,20 +1470,7 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_on); -/** - * drm_vblank_restore - estimate missed vblanks and update vblank count. - * @dev: DRM device - * @pipe: CRTC index - * - * Power manamement features can cause frame counter resets between vblank - * disable and enable. Drivers can use this function in their - * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since - * the last &drm_crtc_funcs.disable_vblank using timestamps and update the - * vblank counter. - * - * This function is the legacy version of drm_crtc_vblank_restore(). - */ -void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) +static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) { ktime_t t_vblank; struct drm_vblank_crtc *vblank; @@ -1512,7 +1506,6 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) diff, diff_ns, framedur_ns, cur_vblank - vblank->last); store_vblank(dev, pipe, diff, t_vblank, cur_vblank); } -EXPORT_SYMBOL(drm_vblank_restore); /** * drm_crtc_vblank_restore - estimate missed vblanks and update vblank count. @@ -1523,9 +1516,18 @@ EXPORT_SYMBOL(drm_vblank_restore); * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since * the last &drm_crtc_funcs.disable_vblank using timestamps and update the * vblank counter. + * + * Note that drivers must have race-free high-precision timestamping support, + * i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and + * &drm_driver.vblank_disable_immediate must be set to indicate the + * time-stamping functions are race-free against vblank hardware counter + * increments. */ void drm_crtc_vblank_restore(struct drm_crtc *crtc) { + WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp); + WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate); + drm_vblank_restore(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_restore); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index cd46c882269c..19826e504efc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -82,7 +82,8 @@ static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job) return fence; } -static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) +static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job + *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; @@ -120,9 +121,13 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) drm_sched_resubmit_jobs(&gpu->sched); + drm_sched_start(&gpu->sched, true); + return DRM_GPU_SCHED_STAT_NOMINAL; + out_no_timeout: /* restart scheduler after GPU is usable again */ drm_sched_start(&gpu->sched, true); + return DRM_GPU_SCHED_STAT_NOMINAL; } static void etnaviv_sched_free_job(struct drm_sched_job *sched_job) @@ -185,7 +190,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu) ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, - msecs_to_jiffies(500), dev_name(gpu->dev)); + msecs_to_jiffies(500), NULL, dev_name(gpu->dev)); if (ret) return ret; diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 951d5f708e92..6a251e3aa779 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -89,7 +89,6 @@ comment "Sub-drivers" config DRM_EXYNOS_G2D bool "G2D" depends on VIDEO_SAMSUNG_S5P_G2D=n || COMPILE_TEST - select FRAME_VECTOR help Choose this option if you want to use Exynos G2D for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 967a5cdc120e..1e0c5a7f206e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -205,7 +205,8 @@ struct g2d_cmdlist_userptr { dma_addr_t dma_addr; unsigned long userptr; unsigned long size; - struct frame_vector *vec; + struct page **pages; + unsigned int npages; struct sg_table *sgt; atomic_t refcount; bool in_pool; @@ -378,7 +379,6 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, bool force) { struct g2d_cmdlist_userptr *g2d_userptr = obj; - struct page **pages; if (!obj) return; @@ -398,15 +398,9 @@ out: dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt, DMA_BIDIRECTIONAL, 0); - pages = frame_vector_pages(g2d_userptr->vec); - if (!IS_ERR(pages)) { - int i; - - for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++) - set_page_dirty_lock(pages[i]); - } - put_vaddr_frames(g2d_userptr->vec); - frame_vector_destroy(g2d_userptr->vec); + unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages, + true); + kvfree(g2d_userptr->pages); if (!g2d_userptr->out_of_list) list_del_init(&g2d_userptr->list); @@ -474,35 +468,35 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, offset = userptr & ~PAGE_MASK; end = PAGE_ALIGN(userptr + size); npages = (end - start) >> PAGE_SHIFT; - g2d_userptr->vec = frame_vector_create(npages); - if (!g2d_userptr->vec) { + g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages), + GFP_KERNEL); + if (!g2d_userptr->pages) { ret = -ENOMEM; goto err_free; } - ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, - g2d_userptr->vec); + ret = pin_user_pages_fast(start, npages, + FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM, + g2d_userptr->pages); if (ret != npages) { DRM_DEV_ERROR(g2d->dev, "failed to get user pages from userptr.\n"); if (ret < 0) - goto err_destroy_framevec; - ret = -EFAULT; - goto err_put_framevec; - } - if (frame_vector_to_pages(g2d_userptr->vec) < 0) { + goto err_destroy_pages; + npages = ret; ret = -EFAULT; - goto err_put_framevec; + goto err_unpin_pages; } + g2d_userptr->npages = npages; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) { ret = -ENOMEM; - goto err_put_framevec; + goto err_unpin_pages; } ret = sg_alloc_table_from_pages(sgt, - frame_vector_pages(g2d_userptr->vec), + g2d_userptr->pages, npages, offset, size, GFP_KERNEL); if (ret < 0) { DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n"); @@ -538,11 +532,11 @@ err_sg_free_table: err_free_sgt: kfree(sgt); -err_put_framevec: - put_vaddr_frames(g2d_userptr->vec); +err_unpin_pages: + unpin_user_pages(g2d_userptr->pages, npages); -err_destroy_framevec: - frame_vector_destroy(g2d_userptr->vec); +err_destroy_pages: + kvfree(g2d_userptr->pages); err_free: kfree(g2d_userptr); diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index b29afced7374..df76bdee7dca 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -228,14 +228,16 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config, } static int exynos_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); struct exynos_drm_plane_state *exynos_state = - to_exynos_plane_state(state); + to_exynos_plane_state(new_plane_state); int ret = 0; - if (!state->crtc || !state->fb) + if (!new_plane_state->crtc || !new_plane_state->fb) return 0; /* translate state into exynos_state */ @@ -250,13 +252,14 @@ static int exynos_plane_atomic_check(struct drm_plane *plane, } static void exynos_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(state->crtc); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(new_state->crtc); struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); - if (!state->crtc) + if (!new_state->crtc) return; if (exynos_crtc->ops->update_plane) @@ -264,8 +267,9 @@ static void exynos_plane_atomic_update(struct drm_plane *plane, } static void exynos_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(old_state->crtc); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 3c6d9f3913d5..8fe953d6e0a9 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -7,6 +7,7 @@ #include <linux/regmap.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_cma_helper.h> @@ -33,11 +34,13 @@ static int fsl_dcu_drm_plane_index(struct drm_plane *plane) } static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_framebuffer *fb = state->fb; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; - if (!state->fb || !state->crtc) + if (!new_plane_state->fb || !new_plane_state->crtc) return 0; switch (fb->format->format) { @@ -57,7 +60,7 @@ static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane, } static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; unsigned int value; @@ -73,11 +76,12 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, } static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_framebuffer *fb = plane->state->fb; struct drm_gem_cma_object *gem; unsigned int alpha = DCU_LAYER_AB_NONE, bpp; @@ -125,11 +129,11 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, } regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1), - DCU_LAYER_HEIGHT(state->crtc_h) | - DCU_LAYER_WIDTH(state->crtc_w)); + DCU_LAYER_HEIGHT(new_state->crtc_h) | + DCU_LAYER_WIDTH(new_state->crtc_w)); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2), - DCU_LAYER_POSY(state->crtc_y) | - DCU_LAYER_POSX(state->crtc_x)); + DCU_LAYER_POSY(new_state->crtc_y) | + DCU_LAYER_POSX(new_state->crtc_x)); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 3), gem->paddr); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index 0e23c93a1094..0cff20265f97 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig @@ -1,35 +1,13 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_GMA500 - tristate "Intel GMA5/600 KMS Framebuffer" + tristate "Intel GMA500/600/3600/3650 KMS Framebuffer" depends on DRM && PCI && X86 && MMU select DRM_KMS_HELPER - select DRM_TTM # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 select ACPI_VIDEO if ACPI select BACKLIGHT_CLASS_DEVICE if ACPI select INPUT if ACPI help Say yes for an experimental 2D KMS framebuffer driver for the - Intel GMA500 ('Poulsbo') and other Intel IMG based graphics - devices. - -config DRM_GMA600 - bool "Intel GMA600 support (Experimental)" - depends on DRM_GMA500 - help - Say yes to include support for GMA600 (Intel Moorestown/Oaktrail) - platforms with LVDS ports. MIPI is not currently supported. - -config DRM_GMA3600 - bool "Intel GMA3600/3650 support (Experimental)" - depends on DRM_GMA500 - help - Say yes to include basic support for Intel GMA3600/3650 (Intel - Cedar Trail) platforms. - -config DRM_MEDFIELD - bool "Intel Medfield support (Experimental)" - depends on DRM_GMA500 && X86_INTEL_MID - help - Say yes to include support for the Intel Medfield platform. - + Intel GMA500 (Poulsbo), Intel GMA600 (Moorestown/Oak Trail) and + Intel GMA3600/3650 (Cedar Trail). diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile index c8f2c89be99d..63012bf2485a 100644 --- a/drivers/gpu/drm/gma500/Makefile +++ b/drivers/gpu/drm/gma500/Makefile @@ -4,53 +4,39 @@ # gma500_gfx-y += \ - accel_2d.o \ backlight.o \ + cdv_device.o \ + cdv_intel_crt.o \ + cdv_intel_display.o \ + cdv_intel_dp.o \ + cdv_intel_hdmi.o \ + cdv_intel_lvds.o \ framebuffer.o \ gem.o \ + gma_device.o \ + gma_display.o \ gtt.o \ intel_bios.o \ - intel_i2c.o \ intel_gmbus.o \ + intel_i2c.o \ + mid_bios.o \ mmu.o \ - blitter.o \ + oaktrail_device.o \ + oaktrail_crtc.o \ + oaktrail_hdmi.o \ + oaktrail_hdmi_i2c.o \ + oaktrail_lvds.o \ + oaktrail_lvds_i2c.o \ power.o \ + psb_device.o \ psb_drv.o \ - gma_display.o \ - gma_device.o \ psb_intel_display.o \ psb_intel_lvds.o \ psb_intel_modes.o \ psb_intel_sdvo.o \ psb_lid.o \ - psb_irq.o \ - psb_device.o \ - mid_bios.o - -gma500_gfx-$(CONFIG_ACPI) += opregion.o \ - -gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \ - cdv_intel_crt.o \ - cdv_intel_display.o \ - cdv_intel_hdmi.o \ - cdv_intel_lvds.o \ - cdv_intel_dp.o - -gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \ - oaktrail_crtc.o \ - oaktrail_lvds.o \ - oaktrail_lvds_i2c.o \ - oaktrail_hdmi.o \ - oaktrail_hdmi_i2c.o + psb_irq.o -gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \ - mdfld_output.o \ - mdfld_intel_display.o \ - mdfld_dsi_output.o \ - mdfld_dsi_dpi.o \ - mdfld_dsi_pkg_sender.o \ - mdfld_tpo_vid.o \ - mdfld_tmd_vid.o \ - tc35876x-dsi-lvds.o +gma500_gfx-$(CONFIG_ACPI) += opregion.o obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c deleted file mode 100644 index 437bbb6af9e6..000000000000 --- a/drivers/gpu/drm/gma500/accel_2d.c +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/************************************************************************** - * Copyright (c) 2007-2011, Intel Corporation. - * All Rights Reserved. - * - * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to - * develop this driver. - * - **************************************************************************/ - -#include <linux/console.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/tty.h> - -#include <drm/drm.h> -#include <drm/drm_crtc.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> - -#include "psb_drv.h" -#include "psb_reg.h" - -/** - * psb_spank - reset the 2D engine - * @dev_priv: our PSB DRM device - * - * Soft reset the graphics engine and then reload the necessary registers. - * We use this at initialisation time but it will become relevant for - * accelerated X later - */ -void psb_spank(struct drm_psb_private *dev_priv) -{ - PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET | - _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET | - _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET | - _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET); - PSB_RSGX32(PSB_CR_SOFT_RESET); - - msleep(1); - - PSB_WSGX32(0, PSB_CR_SOFT_RESET); - wmb(); - PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT, - PSB_CR_BIF_CTRL); - wmb(); - (void) PSB_RSGX32(PSB_CR_BIF_CTRL); - - msleep(1); - PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT, - PSB_CR_BIF_CTRL); - (void) PSB_RSGX32(PSB_CR_BIF_CTRL); - PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); -} diff --git a/drivers/gpu/drm/gma500/blitter.c b/drivers/gpu/drm/gma500/blitter.c deleted file mode 100644 index cb2504a4a15f..000000000000 --- a/drivers/gpu/drm/gma500/blitter.c +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014, Patrik Jakobsson - * All Rights Reserved. - * - * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com> - */ - -#include "psb_drv.h" - -#include "blitter.h" -#include "psb_reg.h" - -/* Wait for the blitter to be completely idle */ -int gma_blt_wait_idle(struct drm_psb_private *dev_priv) -{ - unsigned long stop = jiffies + HZ; - int busy = 1; - - /* NOP for Cedarview */ - if (IS_CDV(dev_priv->dev)) - return 0; - - /* First do a quick check */ - if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && - ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0)) - return 0; - - do { - busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY); - } while (busy && !time_after_eq(jiffies, stop)); - - if (busy) - return -EBUSY; - - do { - busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & - _PSB_C2B_STATUS_BUSY) != 0); - } while (busy && !time_after_eq(jiffies, stop)); - - /* If still busy, we probably have a hang */ - return (busy) ? -EBUSY : 0; -} diff --git a/drivers/gpu/drm/gma500/blitter.h b/drivers/gpu/drm/gma500/blitter.h deleted file mode 100644 index 8d67dabd9ba3..000000000000 --- a/drivers/gpu/drm/gma500/blitter.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2014, Patrik Jakobsson - * All Rights Reserved. - * - * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com> - */ - -#ifndef __BLITTER_H -#define __BLITTER_H - -struct drm_psb_private; - -extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 19e055dbd4c2..1342e7fb382f 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -603,7 +603,7 @@ const struct psb_ops cdv_chip_ops = { .errata = cdv_errata, .crtc_helper = &cdv_intel_helper_funcs, - .crtc_funcs = &cdv_intel_crtc_funcs, + .crtc_funcs = &gma_intel_crtc_funcs, .clock_funcs = &cdv_clock_funcs, .output_init = cdv_output_init, diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h index 37e4bdc84c03..504d717385cd 100644 --- a/drivers/gpu/drm/gma500/cdv_device.h +++ b/drivers/gpu/drm/gma500/cdv_device.h @@ -8,7 +8,6 @@ struct drm_device; struct psb_intel_mode_device; extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs; -extern const struct drm_crtc_funcs cdv_intel_crtc_funcs; extern const struct gma_clock_funcs cdv_clock_funcs; extern void cdv_intel_crt_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index c48c9d322dfb..4a9bb4994a26 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -248,8 +248,6 @@ void cdv_intel_crt_init(struct drm_device *dev, struct drm_connector *connector; struct drm_encoder *encoder; - u32 i2c_reg; - gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL); if (!gma_encoder) return; @@ -269,24 +267,13 @@ void cdv_intel_crt_init(struct drm_device *dev, gma_connector_attach_encoder(gma_connector, gma_encoder); /* Set up the DDC bus. */ - i2c_reg = GPIOA; - /* Remove the following code for CDV */ - /* - if (dev_priv->crt_ddc_bus != 0) - i2c_reg = dev_priv->crt_ddc_bus; - }*/ - gma_encoder->ddc_bus = psb_intel_i2c_create(dev, - i2c_reg, "CRTDDC_A"); + gma_encoder->ddc_bus = psb_intel_i2c_create(dev, GPIOA, "CRTDDC_A"); if (!gma_encoder->ddc_bus) { dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n"); goto failed_ddc; } gma_encoder->type = INTEL_OUTPUT_ANALOG; - /* - psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT); - psb_intel_output->crtc_mask = (1 << 0) | (1 << 1); - */ connector->interlace_allowed = 0; connector->doublescan_allowed = 0; diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 5d3302249779..c3a9f6b3c848 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -582,7 +582,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, struct gma_clock_t clock; u32 dpll = 0, dspcntr, pipeconf; bool ok; - bool is_lvds = false, is_tv = false; + bool is_lvds = false; bool is_dp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; @@ -603,9 +603,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, case INTEL_OUTPUT_LVDS: is_lvds = true; break; - case INTEL_OUTPUT_TVOUT: - is_tv = true; - break; case INTEL_OUTPUT_ANALOG: case INTEL_OUTPUT_HDMI: break; @@ -660,12 +657,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, } dpll = DPLL_VGA_MODE_DIS; - if (is_tv) { - /* XXX: just matching BIOS for now */ -/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ - dpll |= 3; - } -/* dpll |= PLL_REF_INPUT_DREFCLK; */ if (is_dp || is_edp) { cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode); @@ -970,18 +961,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = { .disable = gma_crtc_disable, }; -const struct drm_crtc_funcs cdv_intel_crtc_funcs = { - .cursor_set = gma_crtc_cursor_set, - .cursor_move = gma_crtc_cursor_move, - .gamma_set = gma_crtc_gamma_set, - .set_config = gma_crtc_set_config, - .destroy = gma_crtc_destroy, - .page_flip = gma_crtc_page_flip, - .enable_vblank = psb_enable_vblank, - .disable_vblank = psb_disable_vblank, - .get_vblank_counter = psb_get_vblank_counter, -}; - const struct gma_clock_funcs cdv_clock_funcs = { .clock = cdv_intel_clock, .limit = cdv_intel_limit, diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 0d12c6ffbc40..e525689f84f0 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -22,9 +22,6 @@ * * Authors: * jim liu <jim.liu@intel.com> - * - * FIXME: - * We should probably make this generic and share it with Medfield */ #include <linux/pm_runtime.h> @@ -56,7 +53,6 @@ struct mid_intel_hdmi_priv { bool has_hdmi_audio; /* Should set this when detect hotplug */ bool hdmi_device_connected; - struct mdfld_hdmi_i2c *i2c_bus; struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */ struct drm_device *dev; }; diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index e884750bc123..df9b611b856a 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -11,7 +11,6 @@ #include <asm/set_memory.h> -#include "blitter.h" #include "psb_drv.h" @@ -229,18 +228,9 @@ void psb_gtt_unpin(struct gtt_range *gt) struct drm_device *dev = gt->gem.dev; struct drm_psb_private *dev_priv = dev->dev_private; u32 gpu_base = dev_priv->gtt.gatt_start; - int ret; - /* While holding the gtt_mutex no new blits can be initiated */ mutex_lock(&dev_priv->gtt_mutex); - /* Wait for any possible usage of the memory to be finished */ - ret = gma_blt_wait_idle(dev_priv); - if (ret) { - DRM_ERROR("Failed to idle the blitter, unpin failed!"); - goto out; - } - WARN_ON(!gt->in_gart); gt->in_gart--; @@ -251,7 +241,6 @@ void psb_gtt_unpin(struct gtt_range *gt) psb_gtt_detach_pages(gt); } -out: mutex_unlock(&dev_priv->gtt_mutex); } diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c index 370bd6451bd9..eb0924473a21 100644 --- a/drivers/gpu/drm/gma500/intel_gmbus.c +++ b/drivers/gpu/drm/gma500/intel_gmbus.c @@ -44,13 +44,13 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if (W && !(in_atomic() || in_dbg_master())) msleep(W); \ + if (W && !(in_dbg_master())) \ + msleep(W); \ } \ ret__; \ }) #define wait_for(COND, MS) _wait_for(COND, MS, 1) -#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) #define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg)) #define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg)) diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c deleted file mode 100644 index 684d6cf9856f..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_device.c +++ /dev/null @@ -1,564 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/************************************************************************** - * Copyright (c) 2011, Intel Corporation. - * All Rights Reserved. - * - **************************************************************************/ - -#include <linux/delay.h> -#include <linux/gpio/machine.h> - -#include <asm/intel_scu_ipc.h> - -#include "mdfld_dsi_output.h" -#include "mdfld_output.h" -#include "mid_bios.h" -#include "psb_drv.h" -#include "tc35876x-dsi-lvds.h" - -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - -#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF -#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */ -#define BLC_PWM_FREQ_CALC_CONSTANT 32 -#define MHz 1000000 -#define BRIGHTNESS_MIN_LEVEL 1 -#define BRIGHTNESS_MAX_LEVEL 100 -#define BRIGHTNESS_MASK 0xFF -#define BLC_POLARITY_NORMAL 0 -#define BLC_POLARITY_INVERSE 1 -#define BLC_ADJUSTMENT_MAX 100 - -#define MDFLD_BLC_PWM_PRECISION_FACTOR 10 -#define MDFLD_BLC_MAX_PWM_REG_FREQ 0xFFFE -#define MDFLD_BLC_MIN_PWM_REG_FREQ 0x2 - -#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) -#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT (16) - -static struct backlight_device *mdfld_backlight_device; - -int mdfld_set_brightness(struct backlight_device *bd) -{ - struct drm_device *dev = - (struct drm_device *)bl_get_data(mdfld_backlight_device); - struct drm_psb_private *dev_priv = dev->dev_private; - int level = bd->props.brightness; - - DRM_DEBUG_DRIVER("backlight level set to %d\n", level); - - /* Perform value bounds checking */ - if (level < BRIGHTNESS_MIN_LEVEL) - level = BRIGHTNESS_MIN_LEVEL; - - if (gma_power_begin(dev, false)) { - u32 adjusted_level = 0; - - /* - * Adjust the backlight level with the percent in - * dev_priv->blc_adj2 - */ - adjusted_level = level * dev_priv->blc_adj2; - adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX; - dev_priv->brightness_adjusted = adjusted_level; - - if (mdfld_get_panel_type(dev, 0) == TC35876X) { - if (dev_priv->dpi_panel_on[0] || - dev_priv->dpi_panel_on[2]) - tc35876x_brightness_control(dev, - dev_priv->brightness_adjusted); - } else { - if (dev_priv->dpi_panel_on[0]) - mdfld_dsi_brightness_control(dev, 0, - dev_priv->brightness_adjusted); - } - - if (dev_priv->dpi_panel_on[2]) - mdfld_dsi_brightness_control(dev, 2, - dev_priv->brightness_adjusted); - gma_power_end(dev); - } - - /* cache the brightness for later use */ - dev_priv->brightness = level; - return 0; -} - -static int mdfld_get_brightness(struct backlight_device *bd) -{ - struct drm_device *dev = - (struct drm_device *)bl_get_data(mdfld_backlight_device); - struct drm_psb_private *dev_priv = dev->dev_private; - - DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness); - - /* return locally cached var instead of HW read (due to DPST etc.) */ - return dev_priv->brightness; -} - -static const struct backlight_ops mdfld_ops = { - .get_brightness = mdfld_get_brightness, - .update_status = mdfld_set_brightness, -}; - -static int device_backlight_init(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = (struct drm_psb_private *) - dev->dev_private; - - dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX; - dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX; - - return 0; -} - -static int mdfld_backlight_init(struct drm_device *dev) -{ - struct backlight_properties props; - int ret = 0; - - memset(&props, 0, sizeof(struct backlight_properties)); - props.max_brightness = BRIGHTNESS_MAX_LEVEL; - props.type = BACKLIGHT_PLATFORM; - mdfld_backlight_device = backlight_device_register("mdfld-bl", - NULL, (void *)dev, &mdfld_ops, &props); - - if (IS_ERR(mdfld_backlight_device)) - return PTR_ERR(mdfld_backlight_device); - - ret = device_backlight_init(dev); - if (ret) - return ret; - - mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL; - mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL; - backlight_update_status(mdfld_backlight_device); - return 0; -} -#endif - -struct backlight_device *mdfld_get_backlight_device(void) -{ -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - return mdfld_backlight_device; -#else - return NULL; -#endif -} - -/* - * mdfld_save_display_registers - * - * Description: We are going to suspend so save current display - * register state. - * - * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio - */ -static int mdfld_save_display_registers(struct drm_device *dev, int pipenum) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - struct medfield_state *regs = &dev_priv->regs.mdfld; - struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum]; - const struct psb_offset *map = &dev_priv->regmap[pipenum]; - int i; - u32 *mipi_val; - - /* register */ - u32 mipi_reg = MIPI; - - switch (pipenum) { - case 0: - mipi_val = ®s->saveMIPI; - break; - case 1: - mipi_val = ®s->saveMIPI; - break; - case 2: - /* register */ - mipi_reg = MIPI_C; - /* pointer to values */ - mipi_val = ®s->saveMIPI_C; - break; - default: - DRM_ERROR("%s, invalid pipe number.\n", __func__); - return -EINVAL; - } - - /* Pipe & plane A info */ - pipe->dpll = PSB_RVDC32(map->dpll); - pipe->fp0 = PSB_RVDC32(map->fp0); - pipe->conf = PSB_RVDC32(map->conf); - pipe->htotal = PSB_RVDC32(map->htotal); - pipe->hblank = PSB_RVDC32(map->hblank); - pipe->hsync = PSB_RVDC32(map->hsync); - pipe->vtotal = PSB_RVDC32(map->vtotal); - pipe->vblank = PSB_RVDC32(map->vblank); - pipe->vsync = PSB_RVDC32(map->vsync); - pipe->src = PSB_RVDC32(map->src); - pipe->stride = PSB_RVDC32(map->stride); - pipe->linoff = PSB_RVDC32(map->linoff); - pipe->tileoff = PSB_RVDC32(map->tileoff); - pipe->size = PSB_RVDC32(map->size); - pipe->pos = PSB_RVDC32(map->pos); - pipe->surf = PSB_RVDC32(map->surf); - pipe->cntr = PSB_RVDC32(map->cntr); - pipe->status = PSB_RVDC32(map->status); - - /*save palette (gamma) */ - for (i = 0; i < 256; i++) - pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2)); - - if (pipenum == 1) { - regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL); - regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS); - - regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL); - regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL); - return 0; - } - - *mipi_val = PSB_RVDC32(mipi_reg); - return 0; -} - -/* - * mdfld_restore_display_registers - * - * Description: We are going to resume so restore display register state. - * - * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio - */ -static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum) -{ - /* To get panel out of ULPS mode. */ - u32 temp = 0; - u32 device_ready_reg = DEVICE_READY_REG; - struct drm_psb_private *dev_priv = dev->dev_private; - struct mdfld_dsi_config *dsi_config = NULL; - struct medfield_state *regs = &dev_priv->regs.mdfld; - struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum]; - const struct psb_offset *map = &dev_priv->regmap[pipenum]; - u32 i; - u32 dpll; - u32 timeout = 0; - - /* register */ - u32 mipi_reg = MIPI; - - /* values */ - u32 dpll_val = pipe->dpll; - u32 mipi_val = regs->saveMIPI; - - switch (pipenum) { - case 0: - dpll_val &= ~DPLL_VCO_ENABLE; - dsi_config = dev_priv->dsi_configs[0]; - break; - case 1: - dpll_val &= ~DPLL_VCO_ENABLE; - break; - case 2: - mipi_reg = MIPI_C; - mipi_val = regs->saveMIPI_C; - dsi_config = dev_priv->dsi_configs[1]; - break; - default: - DRM_ERROR("%s, invalid pipe number.\n", __func__); - return -EINVAL; - } - - /*make sure VGA plane is off. it initializes to on after reset!*/ - PSB_WVDC32(0x80000000, VGACNTRL); - - if (pipenum == 1) { - PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll); - PSB_RVDC32(map->dpll); - - PSB_WVDC32(pipe->fp0, map->fp0); - } else { - - dpll = PSB_RVDC32(map->dpll); - - if (!(dpll & DPLL_VCO_ENABLE)) { - - /* When ungating power of DPLL, needs to wait 0.5us - before enable the VCO */ - if (dpll & MDFLD_PWR_GATE_EN) { - dpll &= ~MDFLD_PWR_GATE_EN; - PSB_WVDC32(dpll, map->dpll); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - } - - PSB_WVDC32(pipe->fp0, map->fp0); - PSB_WVDC32(dpll_val, map->dpll); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - - dpll_val |= DPLL_VCO_ENABLE; - PSB_WVDC32(dpll_val, map->dpll); - PSB_RVDC32(map->dpll); - - /* wait for DSI PLL to lock */ - while (timeout < 20000 && - !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) { - udelay(150); - timeout++; - } - - if (timeout == 20000) { - DRM_ERROR("%s, can't lock DSIPLL.\n", - __func__); - return -EINVAL; - } - } - } - /* Restore mode */ - PSB_WVDC32(pipe->htotal, map->htotal); - PSB_WVDC32(pipe->hblank, map->hblank); - PSB_WVDC32(pipe->hsync, map->hsync); - PSB_WVDC32(pipe->vtotal, map->vtotal); - PSB_WVDC32(pipe->vblank, map->vblank); - PSB_WVDC32(pipe->vsync, map->vsync); - PSB_WVDC32(pipe->src, map->src); - PSB_WVDC32(pipe->status, map->status); - - /*set up the plane*/ - PSB_WVDC32(pipe->stride, map->stride); - PSB_WVDC32(pipe->linoff, map->linoff); - PSB_WVDC32(pipe->tileoff, map->tileoff); - PSB_WVDC32(pipe->size, map->size); - PSB_WVDC32(pipe->pos, map->pos); - PSB_WVDC32(pipe->surf, map->surf); - - if (pipenum == 1) { - /* restore palette (gamma) */ - /* udelay(50000); */ - for (i = 0; i < 256; i++) - PSB_WVDC32(pipe->palette[i], map->palette + (i << 2)); - - PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL); - PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS); - - /*TODO: resume HDMI port */ - - /*TODO: resume pipe*/ - - /*enable the plane*/ - PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr); - - return 0; - } - - /*set up pipe related registers*/ - PSB_WVDC32(mipi_val, mipi_reg); - - /*setup MIPI adapter + MIPI IP registers*/ - if (dsi_config) - mdfld_dsi_controller_init(dsi_config, pipenum); - - if (in_atomic() || in_interrupt()) - mdelay(20); - else - msleep(20); - - /*enable the plane*/ - PSB_WVDC32(pipe->cntr, map->cntr); - - if (in_atomic() || in_interrupt()) - mdelay(20); - else - msleep(20); - - /* LP Hold Release */ - temp = REG_READ(mipi_reg); - temp |= LP_OUTPUT_HOLD_RELEASE; - REG_WRITE(mipi_reg, temp); - mdelay(1); - - - /* Set DSI host to exit from Utra Low Power State */ - temp = REG_READ(device_ready_reg); - temp &= ~ULPS_MASK; - temp |= 0x3; - temp |= EXIT_ULPS_DEV_READY; - REG_WRITE(device_ready_reg, temp); - mdelay(1); - - temp = REG_READ(device_ready_reg); - temp &= ~ULPS_MASK; - temp |= EXITING_ULPS; - REG_WRITE(device_ready_reg, temp); - mdelay(1); - - /*enable the pipe*/ - PSB_WVDC32(pipe->conf, map->conf); - - /* restore palette (gamma) */ - /* udelay(50000); */ - for (i = 0; i < 256; i++) - PSB_WVDC32(pipe->palette[i], map->palette + (i << 2)); - - return 0; -} - -static int mdfld_save_registers(struct drm_device *dev) -{ - /* mdfld_save_cursor_overlay_registers(dev); */ - mdfld_save_display_registers(dev, 0); - mdfld_save_display_registers(dev, 2); - mdfld_disable_crtc(dev, 0); - mdfld_disable_crtc(dev, 2); - - return 0; -} - -static int mdfld_restore_registers(struct drm_device *dev) -{ - mdfld_restore_display_registers(dev, 2); - mdfld_restore_display_registers(dev, 0); - /* mdfld_restore_cursor_overlay_registers(dev); */ - - return 0; -} - -static int mdfld_power_down(struct drm_device *dev) -{ - /* FIXME */ - return 0; -} - -static int mdfld_power_up(struct drm_device *dev) -{ - /* FIXME */ - return 0; -} - -/* Medfield */ -static const struct psb_offset mdfld_regmap[3] = { - { - .fp0 = MRST_FPA0, - .fp1 = MRST_FPA1, - .cntr = DSPACNTR, - .conf = PIPEACONF, - .src = PIPEASRC, - .dpll = MRST_DPLL_A, - .htotal = HTOTAL_A, - .hblank = HBLANK_A, - .hsync = HSYNC_A, - .vtotal = VTOTAL_A, - .vblank = VBLANK_A, - .vsync = VSYNC_A, - .stride = DSPASTRIDE, - .size = DSPASIZE, - .pos = DSPAPOS, - .surf = DSPASURF, - .addr = MRST_DSPABASE, - .status = PIPEASTAT, - .linoff = DSPALINOFF, - .tileoff = DSPATILEOFF, - .palette = PALETTE_A, - }, - { - .fp0 = MDFLD_DPLL_DIV0, - .cntr = DSPBCNTR, - .conf = PIPEBCONF, - .src = PIPEBSRC, - .dpll = MDFLD_DPLL_B, - .htotal = HTOTAL_B, - .hblank = HBLANK_B, - .hsync = HSYNC_B, - .vtotal = VTOTAL_B, - .vblank = VBLANK_B, - .vsync = VSYNC_B, - .stride = DSPBSTRIDE, - .size = DSPBSIZE, - .pos = DSPBPOS, - .surf = DSPBSURF, - .addr = MRST_DSPBBASE, - .status = PIPEBSTAT, - .linoff = DSPBLINOFF, - .tileoff = DSPBTILEOFF, - .palette = PALETTE_B, - }, - { - .fp0 = MRST_FPA0, /* This is what the old code did ?? */ - .cntr = DSPCCNTR, - .conf = PIPECCONF, - .src = PIPECSRC, - /* No DPLL_C */ - .dpll = MRST_DPLL_A, - .htotal = HTOTAL_C, - .hblank = HBLANK_C, - .hsync = HSYNC_C, - .vtotal = VTOTAL_C, - .vblank = VBLANK_C, - .vsync = VSYNC_C, - .stride = DSPCSTRIDE, - .size = DSPBSIZE, - .pos = DSPCPOS, - .surf = DSPCSURF, - .addr = MDFLD_DSPCBASE, - .status = PIPECSTAT, - .linoff = DSPCLINOFF, - .tileoff = DSPCTILEOFF, - .palette = PALETTE_C, - }, -}; - -/* - * The GPIO lines for resetting DSI pipe 0 and 2 are available in the - * PCI device 0000:00:0c.0 on the Medfield. - */ -static struct gpiod_lookup_table mdfld_dsi_pipe_gpio_table = { - .table = { - GPIO_LOOKUP("0000:00:0c.0", 128, "dsi-pipe0-reset", - GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("0000:00:0c.0", 34, "dsi-pipe2-reset", - GPIO_ACTIVE_HIGH), - { }, - }, -}; - -static int mdfld_chip_setup(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - struct pci_dev *pdev = to_pci_dev(dev->dev); - - if (pci_enable_msi(pdev)) - dev_warn(dev->dev, "Enabling MSI failed!\n"); - dev_priv->regmap = mdfld_regmap; - - /* Associate the GPIO lines with the DRM device */ - mdfld_dsi_pipe_gpio_table.dev_id = dev_name(dev->dev); - gpiod_add_lookup_table(&mdfld_dsi_pipe_gpio_table); - - return mid_chip_setup(dev); -} - -const struct psb_ops mdfld_chip_ops = { - .name = "mdfld", - .pipes = 3, - .crtcs = 3, - .lvds_mask = (1 << 1), - .hdmi_mask = (1 << 1), - .cursor_needs_phys = 0, - .sgx_offset = MRST_SGX_OFFSET, - - .chip_setup = mdfld_chip_setup, - .crtc_helper = &mdfld_helper_funcs, - .crtc_funcs = &psb_intel_crtc_funcs, - - .output_init = mdfld_output_init, - -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - .backlight_init = mdfld_backlight_init, -#endif - - .save_regs = mdfld_save_registers, - .restore_regs = mdfld_restore_registers, - .save_crtc = gma_crtc_save, - .restore_crtc = gma_crtc_restore, - .power_down = mdfld_power_down, - .power_up = mdfld_power_up, -}; diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c deleted file mode 100644 index 4c5a2f7348c5..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ /dev/null @@ -1,1017 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * jim liu <jim.liu@intel.com> - * Jackie Li<yaodong.li@intel.com> - */ - -#include <linux/delay.h> - -#include <drm/drm_simple_kms_helper.h> - -#include "mdfld_dsi_dpi.h" -#include "mdfld_dsi_pkg_sender.h" -#include "mdfld_output.h" -#include "psb_drv.h" -#include "tc35876x-dsi-lvds.h" - -static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, - int pipe); - -static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe) -{ - u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); - int timeout = 0; - - udelay(500); - - /* This will time out after approximately 2+ seconds */ - while ((timeout < 20000) && - (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) { - udelay(100); - timeout++; - } - - if (timeout == 20000) - DRM_INFO("MIPI: HS Data FIFO was never cleared!\n"); -} - -static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe) -{ - u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); - int timeout = 0; - - udelay(500); - - /* This will time out after approximately 2+ seconds */ - while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) - & DSI_FIFO_GEN_HS_CTRL_FULL)) { - udelay(100); - timeout++; - } - if (timeout == 20000) - DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n"); -} - -static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe) -{ - u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); - int timeout = 0; - - udelay(500); - - /* This will time out after approximately 2+ seconds */ - while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & - DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) { - udelay(100); - timeout++; - } - - if (timeout == 20000) - DRM_ERROR("MIPI: DPI FIFO was never cleared\n"); -} - -static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe) -{ - u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe); - int timeout = 0; - - udelay(500); - - /* This will time out after approximately 2+ seconds */ - while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) - & DSI_INTR_STATE_SPL_PKG_SENT))) { - udelay(100); - timeout++; - } - - if (timeout == 20000) - DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n"); -} - -/* For TC35876X */ - -static void dsi_set_device_ready_state(struct drm_device *dev, int state, - int pipe) -{ - REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0); -} - -static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, - int state, int pipe) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - u32 pipeconf_reg = PIPEACONF; - u32 dspcntr_reg = DSPACNTR; - - u32 dspcntr = dev_priv->dspcntr[pipe]; - u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; - - if (pipe) { - pipeconf_reg = PIPECCONF; - dspcntr_reg = DSPCCNTR; - } else - mipi &= (~0x03); - - if (state) { - /*Set up pipe */ - REG_WRITE(pipeconf_reg, BIT(31)); - - if (REG_BIT_WAIT(pipeconf_reg, 1, 30)) - dev_err(dev->dev, "%s: Pipe enable timeout\n", - __func__); - - /*Set up display plane */ - REG_WRITE(dspcntr_reg, dspcntr); - } else { - u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE; - - /* Put DSI lanes to ULPS to disable pipe */ - REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1); - REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */ - - /* LP Hold */ - REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16); - REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */ - - /* Disable display plane */ - REG_FLD_MOD(dspcntr_reg, 0, 31, 31); - - /* Flush the plane changes ??? posted write? */ - REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); - REG_READ(dspbase_reg); - - /* Disable PIPE */ - REG_FLD_MOD(pipeconf_reg, 0, 31, 31); - - if (REG_BIT_WAIT(pipeconf_reg, 0, 30)) - dev_err(dev->dev, "%s: Pipe disable timeout\n", - __func__); - - if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28)) - dev_err(dev->dev, "%s: FIFO not empty\n", - __func__); - } -} - -static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder, - int pipe) -{ - struct mdfld_dsi_dpi_output *dpi_output = - MDFLD_DSI_DPI_OUTPUT(dsi_encoder); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_encoder_get_config(dsi_encoder); - struct drm_device *dev = dsi_config->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - - if (!dev_priv->dpi_panel_on[pipe]) { - dev_err(dev->dev, "DPI panel is already off\n"); - return; - } - tc35876x_toshiba_bridge_panel_off(dev); - tc35876x_set_bridge_reset_state(dev, 1); - dsi_set_pipe_plane_enable_state(dev, 0, pipe); - mdfld_dsi_dpi_shut_down(dpi_output, pipe); - dsi_set_device_ready_state(dev, 0, pipe); -} - -static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder, - int pipe) -{ - struct mdfld_dsi_dpi_output *dpi_output = - MDFLD_DSI_DPI_OUTPUT(dsi_encoder); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_encoder_get_config(dsi_encoder); - struct drm_device *dev = dsi_config->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - - if (dev_priv->dpi_panel_on[pipe]) { - dev_err(dev->dev, "DPI panel is already on\n"); - return; - } - - /* For resume path sequence */ - mdfld_dsi_dpi_shut_down(dpi_output, pipe); - dsi_set_device_ready_state(dev, 0, pipe); - - dsi_set_device_ready_state(dev, 1, pipe); - tc35876x_set_bridge_reset_state(dev, 0); - tc35876x_configure_lvds_bridge(dev); - mdfld_dsi_dpi_turn_on(dpi_output, pipe); /* Send turn on command */ - dsi_set_pipe_plane_enable_state(dev, 1, pipe); -} -/* End for TC35876X */ - -/* ************************************************************************* *\ - * FUNCTION: mdfld_dsi_tpo_ic_init - * - * DESCRIPTION: This function is called only by mrst_dsi_mode_set and - * restore_display_registers. since this function does not - * acquire the mutex, it is important that the calling function - * does! -\* ************************************************************************* */ -static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe) -{ - struct drm_device *dev = dsi_config->dev; - u32 dcsChannelNumber = dsi_config->channel_num; - u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); - u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); - u32 gen_ctrl_val = GEN_LONG_WRITE; - - DRM_INFO("Enter mrst init TPO MIPI display.\n"); - - gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS; - - /* Flip page order */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x00008036); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); - - /* 0xF0 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x005a5af0); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); - - /* Write protection key */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x005a5af1); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); - - /* 0xFC */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x005a5afc); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); - - /* 0xB7 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x770000b7); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x00000044); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS)); - - /* 0xB6 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x000a0ab6); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); - - /* 0xF2 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x081010f2); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x4a070708); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x000000c5); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); - - /* 0xF8 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x024003f8); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x01030a04); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x0e020220); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x00000004); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS)); - - /* 0xE2 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x398fc3e2); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x0000916f); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS)); - - /* 0xB0 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x000000b0); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS)); - - /* 0xF4 */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x240242f4); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x78ee2002); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x2a071050); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x507fee10); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x10300710); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS)); - - /* 0xBA */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x19fe07ba); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x101c0a31); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x00000010); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); - - /* 0xBB */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x28ff07bb); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x24280a31); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x00000034); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS)); - - /* 0xFB */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x535d05fb); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x1b1a2130); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x221e180e); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x131d2120); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x535d0508); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x1c1a2131); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x231f160d); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x111b2220); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x535c2008); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x1f1d2433); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x2c251a10); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x2c34372d); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x00000023); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); - - /* 0xFA */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x525c0bfa); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x1c1c232f); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x2623190e); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x18212625); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x545d0d0e); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x1e1d2333); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x26231a10); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x1a222725); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x545d280f); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x21202635); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x31292013); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x31393d33); - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x00000029); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS)); - - /* Set DM */ - mdfld_wait_for_HS_DATA_FIFO(dev, pipe); - REG_WRITE(gen_data_reg, 0x000100f7); - mdfld_wait_for_HS_CTRL_FIFO(dev, pipe); - REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS)); -} - -static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count, - int num_lane, int bpp) -{ - return (u16)((pixel_clock_count * bpp) / (num_lane * 8)); -} - -/* - * Calculate the dpi time basing on a given drm mode @mode - * return 0 on success. - * FIXME: I was using proposed mode value for calculation, may need to - * use crtc mode values later - */ -int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode, - struct mdfld_dsi_dpi_timing *dpi_timing, - int num_lane, int bpp) -{ - int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive; - int pclk_vsync, pclk_vfp, pclk_vbp; - - pclk_hactive = mode->hdisplay; - pclk_hfp = mode->hsync_start - mode->hdisplay; - pclk_hsync = mode->hsync_end - mode->hsync_start; - pclk_hbp = mode->htotal - mode->hsync_end; - - pclk_vfp = mode->vsync_start - mode->vdisplay; - pclk_vsync = mode->vsync_end - mode->vsync_start; - pclk_vbp = mode->vtotal - mode->vsync_end; - - /* - * byte clock counts were calculated by following formula - * bclock_count = pclk_count * bpp / num_lane / 8 - */ - dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count( - pclk_hsync, num_lane, bpp); - dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count( - pclk_hbp, num_lane, bpp); - dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count( - pclk_hfp, num_lane, bpp); - dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count( - pclk_hactive, num_lane, bpp); - dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count( - pclk_vsync, num_lane, bpp); - dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count( - pclk_vbp, num_lane, bpp); - dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count( - pclk_vfp, num_lane, bpp); - - return 0; -} - -void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, - int pipe) -{ - struct drm_device *dev = dsi_config->dev; - int lane_count = dsi_config->lane_count; - struct mdfld_dsi_dpi_timing dpi_timing; - struct drm_display_mode *mode = dsi_config->mode; - u32 val; - - /*un-ready device*/ - REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0); - - /*init dsi adapter before kicking off*/ - REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); - - /*enable all interrupts*/ - REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); - - /*set up func_prg*/ - val = lane_count; - val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET; - - switch (dsi_config->bpp) { - case 16: - val |= DSI_DPI_COLOR_FORMAT_RGB565; - break; - case 18: - val |= DSI_DPI_COLOR_FORMAT_RGB666; - break; - case 24: - val |= DSI_DPI_COLOR_FORMAT_RGB888; - break; - default: - DRM_ERROR("unsupported color format, bpp = %d\n", - dsi_config->bpp); - } - REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val); - - REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), - (mode->vtotal * mode->htotal * dsi_config->bpp / - (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK); - REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), - 0xffff & DSI_LP_RX_TIMEOUT_MASK); - - /*max value: 20 clock cycles of txclkesc*/ - REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), - 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK); - - /*min 21 txclkesc, max: ffffh*/ - REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), - 0xffff & DSI_RESET_TIMER_MASK); - - REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), - mode->vdisplay << 16 | mode->hdisplay); - - /*set DPI timing registers*/ - mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, - dsi_config->lane_count, dsi_config->bpp); - - REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), - dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_HBP_COUNT_REG(pipe), - dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_HFP_COUNT_REG(pipe), - dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), - dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), - dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_VBP_COUNT_REG(pipe), - dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_VFP_COUNT_REG(pipe), - dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); - - REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46); - - /*min: 7d0 max: 4e20*/ - REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0); - - /*set up video mode*/ - val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE; - REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val); - - REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); - - REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); - - /*TODO: figure out how to setup these registers*/ - if (mdfld_get_panel_type(dev, pipe) == TC35876X) - REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); - else - REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408); - - REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); - - if (mdfld_get_panel_type(dev, pipe) == TC35876X) - tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ - - /*set device ready*/ - REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0); -} - -void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe) -{ - struct drm_device *dev = output->dev; - - /* clear special packet sent bit */ - if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) - REG_WRITE(MIPI_INTR_STAT_REG(pipe), - DSI_INTR_STATE_SPL_PKG_SENT); - - /*send turn on package*/ - REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON); - - /*wait for SPL_PKG_SENT interrupt*/ - mdfld_wait_for_SPL_PKG_SENT(dev, pipe); - - if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) - REG_WRITE(MIPI_INTR_STAT_REG(pipe), - DSI_INTR_STATE_SPL_PKG_SENT); - - output->panel_on = 1; - - /* FIXME the following is disabled to WA the X slow start issue - for TMD panel - if (pipe == 2) - dev_priv->dpi_panel_on2 = true; - else if (pipe == 0) - dev_priv->dpi_panel_on = true; */ -} - -static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, - int pipe) -{ - struct drm_device *dev = output->dev; - - /*if output is on, or mode setting didn't happen, ignore this*/ - if ((!output->panel_on) || output->first_boot) { - output->first_boot = 0; - return; - } - - /* Wait for dpi fifo to empty */ - mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe); - - /* Clear the special packet interrupt bit if set */ - if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT) - REG_WRITE(MIPI_INTR_STAT_REG(pipe), - DSI_INTR_STATE_SPL_PKG_SENT); - - if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN) - goto shutdown_out; - - REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN); - -shutdown_out: - output->panel_on = 0; - output->first_boot = 0; - - /* FIXME the following is disabled to WA the X slow start issue - for TMD panel - if (pipe == 2) - dev_priv->dpi_panel_on2 = false; - else if (pipe == 0) - dev_priv->dpi_panel_on = false; */ -} - -static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on) -{ - struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); - struct mdfld_dsi_dpi_output *dpi_output = - MDFLD_DSI_DPI_OUTPUT(dsi_encoder); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_encoder_get_config(dsi_encoder); - int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); - struct drm_device *dev = dsi_config->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - - /*start up display island if it was shutdown*/ - if (!gma_power_begin(dev, true)) - return; - - if (on) { - if (mdfld_get_panel_type(dev, pipe) == TMD_VID) - mdfld_dsi_dpi_turn_on(dpi_output, pipe); - else if (mdfld_get_panel_type(dev, pipe) == TC35876X) - mdfld_dsi_configure_up(dsi_encoder, pipe); - else { - /*enable mipi port*/ - REG_WRITE(MIPI_PORT_CONTROL(pipe), - REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31)); - REG_READ(MIPI_PORT_CONTROL(pipe)); - - mdfld_dsi_dpi_turn_on(dpi_output, pipe); - mdfld_dsi_tpo_ic_init(dsi_config, pipe); - } - dev_priv->dpi_panel_on[pipe] = true; - } else { - if (mdfld_get_panel_type(dev, pipe) == TMD_VID) - mdfld_dsi_dpi_shut_down(dpi_output, pipe); - else if (mdfld_get_panel_type(dev, pipe) == TC35876X) - mdfld_dsi_configure_down(dsi_encoder, pipe); - else { - mdfld_dsi_dpi_shut_down(dpi_output, pipe); - - /*disable mipi port*/ - REG_WRITE(MIPI_PORT_CONTROL(pipe), - REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31)); - REG_READ(MIPI_PORT_CONTROL(pipe)); - } - dev_priv->dpi_panel_on[pipe] = false; - } - gma_power_end(dev); -} - -void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode) -{ - mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON); -} - -bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_encoder_get_config(dsi_encoder); - struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; - - if (fixed_mode) { - adjusted_mode->hdisplay = fixed_mode->hdisplay; - adjusted_mode->hsync_start = fixed_mode->hsync_start; - adjusted_mode->hsync_end = fixed_mode->hsync_end; - adjusted_mode->htotal = fixed_mode->htotal; - adjusted_mode->vdisplay = fixed_mode->vdisplay; - adjusted_mode->vsync_start = fixed_mode->vsync_start; - adjusted_mode->vsync_end = fixed_mode->vsync_end; - adjusted_mode->vtotal = fixed_mode->vtotal; - adjusted_mode->clock = fixed_mode->clock; - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); - } - return true; -} - -void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder) -{ - mdfld_dsi_dpi_set_power(encoder, false); -} - -void mdfld_dsi_dpi_commit(struct drm_encoder *encoder) -{ - mdfld_dsi_dpi_set_power(encoder, true); -} - -/* For TC35876X */ -/* This functionality was implemented in FW in iCDK */ -/* But removed in DV0 and later. So need to add here. */ -static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe) -{ - struct drm_device *dev = dsi_config->dev; - - REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018); - REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff); - REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff); - REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff); - REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14); - REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff); - REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25); - REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0); - REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000); - REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004); - REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820); - REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14); -} - -static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config, - int pipe) -{ - struct drm_device *dev = dsi_config->dev; - struct mdfld_dsi_dpi_timing dpi_timing; - struct drm_display_mode *mode = dsi_config->mode; - - mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, - dsi_config->lane_count, - dsi_config->bpp); - - REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe), - mode->vdisplay << 16 | mode->hdisplay); - REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe), - dpi_timing.hsync_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_HBP_COUNT_REG(pipe), - dpi_timing.hbp_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_HFP_COUNT_REG(pipe), - dpi_timing.hfp_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe), - dpi_timing.hactive_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe), - dpi_timing.vsync_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_VBP_COUNT_REG(pipe), - dpi_timing.vbp_count & DSI_DPI_TIMING_MASK); - REG_WRITE(MIPI_VFP_COUNT_REG(pipe), - dpi_timing.vfp_count & DSI_DPI_TIMING_MASK); -} - -static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe) -{ - struct drm_device *dev = dsi_config->dev; - int lane_count = dsi_config->lane_count; - - if (pipe) { - REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002); - REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000); - } else { - REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000); - REG_WRITE(MIPI_PORT_CONTROL(2), 0x00); - } - - REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F); - REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F); - - /* lane_count = 3 */ - REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count); - - mdfld_mipi_set_video_timing(dsi_config, pipe); -} - -static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe) -{ - struct drm_device *dev = dsi_config->dev; - struct drm_display_mode *mode = dsi_config->mode; - - REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); - REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1)); - REG_WRITE(HSYNC_A, - ((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1)); - - REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); - REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1)); - REG_WRITE(VSYNC_A, - ((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1)); - - REG_WRITE(PIPEASRC, - ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); -} -/* End for TC35876X */ - -void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder); - struct mdfld_dsi_dpi_output *dpi_output = - MDFLD_DSI_DPI_OUTPUT(dsi_encoder); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_encoder_get_config(dsi_encoder); - struct drm_device *dev = dsi_config->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder); - u32 pipeconf_reg = PIPEACONF; - u32 dspcntr_reg = DSPACNTR; - u32 pipeconf, dspcntr; - - u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; - - if (WARN_ON(pipe < 0)) - return; - - pipeconf = dev_priv->pipeconf[pipe]; - dspcntr = dev_priv->dspcntr[pipe]; - - if (pipe) { - pipeconf_reg = PIPECCONF; - dspcntr_reg = DSPCCNTR; - } else { - if (mdfld_get_panel_type(dev, pipe) == TC35876X) - mipi &= (~0x03); /* Use all four lanes */ - else - mipi |= 2; - } - - /*start up display island if it was shutdown*/ - if (!gma_power_begin(dev, true)) - return; - - if (mdfld_get_panel_type(dev, pipe) == TC35876X) { - /* - * The following logic is required to reset the bridge and - * configure. This also starts the DSI clock at 200MHz. - */ - tc35876x_set_bridge_reset_state(dev, 0); /*Pull High Reset */ - tc35876x_toshiba_bridge_panel_on(dev); - udelay(100); - /* Now start the DSI clock */ - REG_WRITE(MRST_DPLL_A, 0x00); - REG_WRITE(MRST_FPA0, 0xC1); - REG_WRITE(MRST_DPLL_A, 0x00800000); - udelay(500); - REG_WRITE(MRST_DPLL_A, 0x80800000); - - if (REG_BIT_WAIT(pipeconf_reg, 1, 29)) - dev_err(dev->dev, "%s: DSI PLL lock timeout\n", - __func__); - - REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); - - mipi_set_properties(dsi_config, pipe); - mdfld_mipi_config(dsi_config, pipe); - mdfld_set_pipe_timing(dsi_config, pipe); - - REG_WRITE(DSPABASE, 0x00); - REG_WRITE(DSPASIZE, - ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); - - REG_WRITE(DSPACNTR, 0x98000000); - REG_WRITE(DSPASURF, 0x00); - - REG_WRITE(VGACNTRL, 0x80000000); - REG_WRITE(DEVICE_READY_REG, 0x00000001); - - REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000); - } else { - /*set up mipi port FIXME: do at init time */ - REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi); - } - REG_READ(MIPI_PORT_CONTROL(pipe)); - - if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { - /* NOP */ - } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { - /* set up DSI controller DPI interface */ - mdfld_dsi_dpi_controller_init(dsi_config, pipe); - - /* Configure MIPI Bridge and Panel */ - tc35876x_configure_lvds_bridge(dev); - dev_priv->dpi_panel_on[pipe] = true; - } else { - /*turn on DPI interface*/ - mdfld_dsi_dpi_turn_on(dpi_output, pipe); - } - - /*set up pipe*/ - REG_WRITE(pipeconf_reg, pipeconf); - REG_READ(pipeconf_reg); - - /*set up display plane*/ - REG_WRITE(dspcntr_reg, dspcntr); - REG_READ(dspcntr_reg); - - msleep(20); /* FIXME: this should wait for vblank */ - - if (mdfld_get_panel_type(dev, pipe) == TMD_VID) { - /* NOP */ - } else if (mdfld_get_panel_type(dev, pipe) == TC35876X) { - mdfld_dsi_dpi_turn_on(dpi_output, pipe); - } else { - /* init driver ic */ - mdfld_dsi_tpo_ic_init(dsi_config, pipe); - /*init backlight*/ - mdfld_dsi_brightness_init(dsi_config, pipe); - } - - gma_power_end(dev); -} - -/* - * Init DSI DPI encoder. - * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector - * return pointer of newly allocated DPI encoder, NULL on error - */ -struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, - struct mdfld_dsi_connector *dsi_connector, - const struct panel_funcs *p_funcs) -{ - struct mdfld_dsi_dpi_output *dpi_output = NULL; - struct mdfld_dsi_config *dsi_config; - struct drm_connector *connector = NULL; - struct drm_encoder *encoder = NULL; - int pipe; - u32 data; - int ret; - - pipe = dsi_connector->pipe; - - if (mdfld_get_panel_type(dev, pipe) != TC35876X) { - dsi_config = mdfld_dsi_get_config(dsi_connector); - - /* panel hard-reset */ - if (p_funcs->reset) { - ret = p_funcs->reset(dev, pipe); - if (ret) { - DRM_ERROR("Panel %d hard-reset failed\n", pipe); - return NULL; - } - } - - /* panel drvIC init */ - if (p_funcs->drv_ic_init) - p_funcs->drv_ic_init(dsi_config, pipe); - - /* panel power mode detect */ - ret = mdfld_dsi_get_power_mode(dsi_config, &data, false); - if (ret) { - DRM_ERROR("Panel %d get power mode failed\n", pipe); - dsi_connector->status = connector_status_disconnected; - } else { - DRM_INFO("pipe %d power mode 0x%x\n", pipe, data); - dsi_connector->status = connector_status_connected; - } - } - - dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL); - if (!dpi_output) { - DRM_ERROR("No memory\n"); - return NULL; - } - - dpi_output->panel_on = 0; - dpi_output->dev = dev; - if (mdfld_get_panel_type(dev, pipe) != TC35876X) - dpi_output->p_funcs = p_funcs; - dpi_output->first_boot = 1; - - /*get fixed mode*/ - dsi_config = mdfld_dsi_get_config(dsi_connector); - - /*create drm encoder object*/ - connector = &dsi_connector->base.base; - encoder = &dpi_output->base.base.base; - drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS); - drm_encoder_helper_add(encoder, - p_funcs->encoder_helper_funcs); - - /*attach to given connector*/ - drm_connector_attach_encoder(connector, encoder); - - /*set possible crtcs and clones*/ - if (dsi_connector->pipe) { - encoder->possible_crtcs = (1 << 2); - encoder->possible_clones = 0; - } else { - encoder->possible_crtcs = (1 << 0); - encoder->possible_clones = 0; - } - - dsi_connector->base.encoder = &dpi_output->base.base; - - return &dpi_output->base; -} diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h deleted file mode 100644 index 2b40663e1696..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * jim liu <jim.liu@intel.com> - * Jackie Li<yaodong.li@intel.com> - */ - -#ifndef __MDFLD_DSI_DPI_H__ -#define __MDFLD_DSI_DPI_H__ - -#include "mdfld_dsi_output.h" -#include "mdfld_output.h" - -struct mdfld_dsi_dpi_timing { - u16 hsync_count; - u16 hbp_count; - u16 hfp_count; - u16 hactive_count; - u16 vsync_count; - u16 vbp_count; - u16 vfp_count; -}; - -struct mdfld_dsi_dpi_output { - struct mdfld_dsi_encoder base; - struct drm_device *dev; - - int panel_on; - int first_boot; - - const struct panel_funcs *p_funcs; -}; - -#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\ - container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base) - -/* Export functions */ -extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode, - struct mdfld_dsi_dpi_timing *dpi_timing, - int num_lane, int bpp); -extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, - struct mdfld_dsi_connector *dsi_connector, - const struct panel_funcs *p_funcs); - -/* MDFLD DPI helper functions */ -extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode); -extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder); -extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder); -extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); -extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, - int pipe); -extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, - int pipe); -#endif /*__MDFLD_DSI_DPI_H__*/ diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c deleted file mode 100644 index 24105f45c1c4..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ /dev/null @@ -1,603 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * jim liu <jim.liu@intel.com> - * Jackie Li<yaodong.li@intel.com> - */ - -#include <linux/delay.h> -#include <linux/moduleparam.h> -#include <linux/pm_runtime.h> -#include <linux/gpio/consumer.h> - -#include <asm/intel_scu_ipc.h> - -#include "mdfld_dsi_dpi.h" -#include "mdfld_dsi_output.h" -#include "mdfld_dsi_pkg_sender.h" -#include "mdfld_output.h" -#include "tc35876x-dsi-lvds.h" - -/* get the LABC from command line. */ -static int LABC_control = 1; - -#ifdef MODULE -module_param(LABC_control, int, 0644); -#else - -static int __init parse_LABC_control(char *arg) -{ - /* LABC control can be passed in as a cmdline parameter */ - /* to enable this feature add LABC=1 to cmdline */ - /* to disable this feature add LABC=0 to cmdline */ - if (!arg) - return -EINVAL; - - if (!strcasecmp(arg, "0")) - LABC_control = 0; - else if (!strcasecmp(arg, "1")) - LABC_control = 1; - - return 0; -} -early_param("LABC", parse_LABC_control); -#endif - -/* - * Check and see if the generic control or data buffer is empty and ready. - */ -void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg, - u32 fifo_stat) -{ - u32 GEN_BF_time_out_count; - - /* Check MIPI Adatper command registers */ - for (GEN_BF_time_out_count = 0; - GEN_BF_time_out_count < GEN_FB_TIME_OUT; - GEN_BF_time_out_count++) { - if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat) - break; - udelay(100); - } - - if (GEN_BF_time_out_count == GEN_FB_TIME_OUT) - DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n", - gen_fifo_stat_reg); -} - -/* - * Manage the DSI MIPI keyboard and display brightness. - * FIXME: this is exported to OSPM code. should work out an specific - * display interface to OSPM. - */ - -void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe) -{ - struct mdfld_dsi_pkg_sender *sender = - mdfld_dsi_get_pkg_sender(dsi_config); - struct drm_device *dev; - struct drm_psb_private *dev_priv; - u32 gen_ctrl_val; - - if (!sender) { - DRM_ERROR("No sender found\n"); - return; - } - - dev = sender->dev; - dev_priv = dev->dev_private; - - /* Set default display backlight value to 85% (0xd8)*/ - mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1, - true); - - /* Set minimum brightness setting of CABC function to 20% (0x33)*/ - mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true); - - /* Enable backlight or/and LABC */ - gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON | - BACKLIGHT_ON; - if (LABC_control == 1) - gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO - | GAMMA_AUTO; - - if (LABC_control == 1) - gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON; - - dev_priv->mipi_ctrl_display = gen_ctrl_val; - - mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val, - 1, true); - - mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true); -} - -void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level) -{ - struct mdfld_dsi_pkg_sender *sender; - struct drm_psb_private *dev_priv; - struct mdfld_dsi_config *dsi_config; - u32 gen_ctrl_val = 0; - int p_type = TMD_VID; - - if (!dev || (pipe != 0 && pipe != 2)) { - DRM_ERROR("Invalid parameter\n"); - return; - } - - p_type = mdfld_get_panel_type(dev, 0); - - dev_priv = dev->dev_private; - - if (pipe) - dsi_config = dev_priv->dsi_configs[1]; - else - dsi_config = dev_priv->dsi_configs[0]; - - sender = mdfld_dsi_get_pkg_sender(dsi_config); - - if (!sender) { - DRM_ERROR("No sender found\n"); - return; - } - - gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff; - - dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n", - pipe, gen_ctrl_val); - - if (p_type == TMD_VID) { - /* Set display backlight value */ - mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness, - (u8)gen_ctrl_val, 1, true); - } else { - /* Set display backlight value */ - mdfld_dsi_send_mcs_short(sender, write_display_brightness, - (u8)gen_ctrl_val, 1, true); - - /* Enable backlight control */ - if (level == 0) - gen_ctrl_val = 0; - else - gen_ctrl_val = dev_priv->mipi_ctrl_display; - - mdfld_dsi_send_mcs_short(sender, write_ctrl_display, - (u8)gen_ctrl_val, 1, true); - } -} - -static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config, - u8 dcs, u32 *data, bool hs) -{ - struct mdfld_dsi_pkg_sender *sender - = mdfld_dsi_get_pkg_sender(dsi_config); - - if (!sender || !data) { - DRM_ERROR("Invalid parameter\n"); - return -EINVAL; - } - - return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs); -} - -int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode, - bool hs) -{ - if (!dsi_config || !mode) { - DRM_ERROR("Invalid parameter\n"); - return -EINVAL; - } - - return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs); -} - -/* - * NOTE: this function was used by OSPM. - * TODO: will be removed later, should work out display interfaces for OSPM - */ -void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe) -{ - if (!dsi_config || ((pipe != 0) && (pipe != 2))) { - DRM_ERROR("Invalid parameters\n"); - return; - } - - mdfld_dsi_dpi_controller_init(dsi_config, pipe); -} - -static void mdfld_dsi_connector_save(struct drm_connector *connector) -{ -} - -static void mdfld_dsi_connector_restore(struct drm_connector *connector) -{ -} - -/* FIXME: start using the force parameter */ -static enum drm_connector_status -mdfld_dsi_connector_detect(struct drm_connector *connector, bool force) -{ - struct mdfld_dsi_connector *dsi_connector - = mdfld_dsi_connector(connector); - - dsi_connector->status = connector_status_connected; - - return dsi_connector->status; -} - -static int mdfld_dsi_connector_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t value) -{ - struct drm_encoder *encoder = connector->encoder; - - if (!strcmp(property->name, "scaling mode") && encoder) { - struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc); - bool centerechange; - uint64_t val; - - if (!gma_crtc) - goto set_prop_error; - - switch (value) { - case DRM_MODE_SCALE_FULLSCREEN: - break; - case DRM_MODE_SCALE_NO_SCALE: - break; - case DRM_MODE_SCALE_ASPECT: - break; - default: - goto set_prop_error; - } - - if (drm_object_property_get_value(&connector->base, property, &val)) - goto set_prop_error; - - if (val == value) - goto set_prop_done; - - if (drm_object_property_set_value(&connector->base, - property, value)) - goto set_prop_error; - - centerechange = (val == DRM_MODE_SCALE_NO_SCALE) || - (value == DRM_MODE_SCALE_NO_SCALE); - - if (gma_crtc->saved_mode.hdisplay != 0 && - gma_crtc->saved_mode.vdisplay != 0) { - if (centerechange) { - if (!drm_crtc_helper_set_mode(encoder->crtc, - &gma_crtc->saved_mode, - encoder->crtc->x, - encoder->crtc->y, - encoder->crtc->primary->fb)) - goto set_prop_error; - } else { - const struct drm_encoder_helper_funcs *funcs = - encoder->helper_private; - funcs->mode_set(encoder, - &gma_crtc->saved_mode, - &gma_crtc->saved_adjusted_mode); - } - } - } else if (!strcmp(property->name, "backlight") && encoder) { - if (drm_object_property_set_value(&connector->base, property, - value)) - goto set_prop_error; - else - gma_backlight_set(encoder->dev, value); - } -set_prop_done: - return 0; -set_prop_error: - return -1; -} - -static void mdfld_dsi_connector_destroy(struct drm_connector *connector) -{ - struct mdfld_dsi_connector *dsi_connector = - mdfld_dsi_connector(connector); - struct mdfld_dsi_pkg_sender *sender; - - if (!dsi_connector) - return; - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - sender = dsi_connector->pkg_sender; - mdfld_dsi_pkg_sender_destroy(sender); - kfree(dsi_connector); -} - -static int mdfld_dsi_connector_get_modes(struct drm_connector *connector) -{ - struct mdfld_dsi_connector *dsi_connector = - mdfld_dsi_connector(connector); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_get_config(dsi_connector); - struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; - struct drm_display_mode *dup_mode = NULL; - struct drm_device *dev = connector->dev; - - if (fixed_mode) { - dev_dbg(dev->dev, "fixed_mode %dx%d\n", - fixed_mode->hdisplay, fixed_mode->vdisplay); - dup_mode = drm_mode_duplicate(dev, fixed_mode); - drm_mode_probed_add(connector, dup_mode); - return 1; - } - DRM_ERROR("Didn't get any modes!\n"); - return 0; -} - -static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct mdfld_dsi_connector *dsi_connector = - mdfld_dsi_connector(connector); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_get_config(dsi_connector); - struct drm_display_mode *fixed_mode = dsi_config->fixed_mode; - - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - return MODE_NO_INTERLACE; - - /** - * FIXME: current DC has no fitting unit, reject any mode setting - * request - * Will figure out a way to do up-scaling(panel fitting) later. - **/ - if (fixed_mode) { - if (mode->hdisplay != fixed_mode->hdisplay) - return MODE_PANEL; - - if (mode->vdisplay != fixed_mode->vdisplay) - return MODE_PANEL; - } - - return MODE_OK; -} - -static struct drm_encoder *mdfld_dsi_connector_best_encoder( - struct drm_connector *connector) -{ - struct mdfld_dsi_connector *dsi_connector = - mdfld_dsi_connector(connector); - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_get_config(dsi_connector); - return &dsi_config->encoder->base.base; -} - -/*DSI connector funcs*/ -static const struct drm_connector_funcs mdfld_dsi_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = mdfld_dsi_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .set_property = mdfld_dsi_connector_set_property, - .destroy = mdfld_dsi_connector_destroy, -}; - -/*DSI connector helper funcs*/ -static const struct drm_connector_helper_funcs - mdfld_dsi_connector_helper_funcs = { - .get_modes = mdfld_dsi_connector_get_modes, - .mode_valid = mdfld_dsi_connector_mode_valid, - .best_encoder = mdfld_dsi_connector_best_encoder, -}; - -static int mdfld_dsi_get_default_config(struct drm_device *dev, - struct mdfld_dsi_config *config, int pipe) -{ - if (!dev || !config) { - DRM_ERROR("Invalid parameters"); - return -EINVAL; - } - - config->bpp = 24; - if (mdfld_get_panel_type(dev, pipe) == TC35876X) - config->lane_count = 4; - else - config->lane_count = 2; - config->channel_num = 0; - - if (mdfld_get_panel_type(dev, pipe) == TMD_VID) - config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE; - else if (mdfld_get_panel_type(dev, pipe) == TC35876X) - config->video_mode = - MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS; - else - config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE; - - return 0; -} - -int mdfld_dsi_panel_reset(struct drm_device *ddev, int pipe) -{ - struct device *dev = ddev->dev; - struct gpio_desc *gpiod; - - /* - * Raise the GPIO reset line for the corresponding pipe to HIGH, - * this is probably because it is active low so this takes the - * respective pipe out of reset. (We have no code to put it back - * into reset in this driver.) - */ - switch (pipe) { - case 0: - gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_OUT_HIGH); - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - break; - case 2: - gpiod = gpiod_get(dev, "dsi-pipe2-reset", GPIOD_OUT_HIGH); - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - break; - default: - DRM_DEV_ERROR(dev, "Invalid output pipe\n"); - return -EINVAL; - } - gpiod_put(gpiod); - - /* Flush posted writes on the device */ - gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_ASIS); - if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); - gpiod_get_value(gpiod); - gpiod_put(gpiod); - - return 0; -} - -/* - * MIPI output init - * @dev drm device - * @pipe pipe number. 0 or 2 - * @config - * - * Do the initialization of a MIPI output, including create DRM mode objects - * initialization of DSI output on @pipe - */ -void mdfld_dsi_output_init(struct drm_device *dev, - int pipe, - const struct panel_funcs *p_vid_funcs) -{ - struct mdfld_dsi_config *dsi_config; - struct mdfld_dsi_connector *dsi_connector; - struct drm_connector *connector; - struct mdfld_dsi_encoder *encoder; - struct drm_psb_private *dev_priv = dev->dev_private; - struct panel_info dsi_panel_info; - u32 width_mm, height_mm; - - dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe); - - if (pipe != 0 && pipe != 2) { - DRM_ERROR("Invalid parameter\n"); - return; - } - - /*create a new connector*/ - dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL); - if (!dsi_connector) { - DRM_ERROR("No memory"); - return; - } - - dsi_connector->pipe = pipe; - - dsi_config = kzalloc(sizeof(struct mdfld_dsi_config), - GFP_KERNEL); - if (!dsi_config) { - DRM_ERROR("cannot allocate memory for DSI config\n"); - goto dsi_init_err0; - } - mdfld_dsi_get_default_config(dev, dsi_config, pipe); - - dsi_connector->private = dsi_config; - - dsi_config->changed = 1; - dsi_config->dev = dev; - - dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev); - if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info)) - goto dsi_init_err0; - - width_mm = dsi_panel_info.width_mm; - height_mm = dsi_panel_info.height_mm; - - dsi_config->mode = dsi_config->fixed_mode; - dsi_config->connector = dsi_connector; - - if (!dsi_config->fixed_mode) { - DRM_ERROR("No panel fixed mode was found\n"); - goto dsi_init_err0; - } - - if (pipe && dev_priv->dsi_configs[0]) { - dsi_config->dvr_ic_inited = 0; - dev_priv->dsi_configs[1] = dsi_config; - } else if (pipe == 0) { - dsi_config->dvr_ic_inited = 1; - dev_priv->dsi_configs[0] = dsi_config; - } else { - DRM_ERROR("Trying to init MIPI1 before MIPI0\n"); - goto dsi_init_err0; - } - - - connector = &dsi_connector->base.base; - dsi_connector->base.save = mdfld_dsi_connector_save; - dsi_connector->base.restore = mdfld_dsi_connector_restore; - - drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); - - connector->display_info.subpixel_order = SubPixelHorizontalRGB; - connector->display_info.width_mm = width_mm; - connector->display_info.height_mm = height_mm; - connector->interlace_allowed = false; - connector->doublescan_allowed = false; - - /*attach properties*/ - drm_object_attach_property(&connector->base, - dev->mode_config.scaling_mode_property, - DRM_MODE_SCALE_FULLSCREEN); - drm_object_attach_property(&connector->base, - dev_priv->backlight_property, - MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); - - /*init DSI package sender on this output*/ - if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) { - DRM_ERROR("Package Sender initialization failed on pipe %d\n", - pipe); - goto dsi_init_err0; - } - - encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs); - if (!encoder) { - DRM_ERROR("Create DPI encoder failed\n"); - goto dsi_init_err1; - } - encoder->private = dsi_config; - dsi_config->encoder = encoder; - encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI : - INTEL_OUTPUT_MIPI2; - drm_connector_register(connector); - return; - - /*TODO: add code to destroy outputs on error*/ -dsi_init_err1: - /*destroy sender*/ - mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender); - - drm_connector_cleanup(connector); - - kfree(dsi_config->fixed_mode); - kfree(dsi_config); -dsi_init_err0: - kfree(dsi_connector); -} diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h deleted file mode 100644 index 5c0db3c2903f..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h +++ /dev/null @@ -1,377 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * jim liu <jim.liu@intel.com> - * Jackie Li<yaodong.li@intel.com> - */ - -#ifndef __MDFLD_DSI_OUTPUT_H__ -#define __MDFLD_DSI_OUTPUT_H__ - -#include <linux/backlight.h> - -#include <asm/intel-mid.h> - -#include <drm/drm.h> -#include <drm/drm_crtc.h> -#include <drm/drm_edid.h> - -#include "mdfld_output.h" -#include "psb_drv.h" -#include "psb_intel_drv.h" -#include "psb_intel_reg.h" - -#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) -#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) -#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end)) -#define FLD_MOD(orig, val, start, end) \ - (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) - -#define REG_FLD_MOD(reg, val, start, end) \ - REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end)) - -static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg, - u32 val, int start, int end) -{ - int t = 100000; - - while (FLD_GET(REG_READ(reg), start, end) != val) { - if (--t == 0) - return 1; - } - - return 0; -} - -#define REG_FLD_WAIT(reg, val, start, end) \ - REGISTER_FLD_WAIT(dev, reg, val, start, end) - -#define REG_BIT_WAIT(reg, val, bitnum) \ - REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum) - -#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100 - -#ifdef DEBUG -#define CHECK_PIPE(pipe) ({ \ - const typeof(pipe) __pipe = (pipe); \ - BUG_ON(__pipe != 0 && __pipe != 2); \ - __pipe; }) -#else -#define CHECK_PIPE(pipe) (pipe) -#endif - -/* - * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2 - */ -#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400) - -/* mdfld DSI controller registers */ -#define MIPI_DEVICE_READY_REG(pipe) (0xb000 + REG_OFFSET(pipe)) -#define MIPI_INTR_STAT_REG(pipe) (0xb004 + REG_OFFSET(pipe)) -#define MIPI_INTR_EN_REG(pipe) (0xb008 + REG_OFFSET(pipe)) -#define MIPI_DSI_FUNC_PRG_REG(pipe) (0xb00c + REG_OFFSET(pipe)) -#define MIPI_HS_TX_TIMEOUT_REG(pipe) (0xb010 + REG_OFFSET(pipe)) -#define MIPI_LP_RX_TIMEOUT_REG(pipe) (0xb014 + REG_OFFSET(pipe)) -#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe) (0xb018 + REG_OFFSET(pipe)) -#define MIPI_DEVICE_RESET_TIMER_REG(pipe) (0xb01c + REG_OFFSET(pipe)) -#define MIPI_DPI_RESOLUTION_REG(pipe) (0xb020 + REG_OFFSET(pipe)) -#define MIPI_DBI_FIFO_THROTTLE_REG(pipe) (0xb024 + REG_OFFSET(pipe)) -#define MIPI_HSYNC_COUNT_REG(pipe) (0xb028 + REG_OFFSET(pipe)) -#define MIPI_HBP_COUNT_REG(pipe) (0xb02c + REG_OFFSET(pipe)) -#define MIPI_HFP_COUNT_REG(pipe) (0xb030 + REG_OFFSET(pipe)) -#define MIPI_HACTIVE_COUNT_REG(pipe) (0xb034 + REG_OFFSET(pipe)) -#define MIPI_VSYNC_COUNT_REG(pipe) (0xb038 + REG_OFFSET(pipe)) -#define MIPI_VBP_COUNT_REG(pipe) (0xb03c + REG_OFFSET(pipe)) -#define MIPI_VFP_COUNT_REG(pipe) (0xb040 + REG_OFFSET(pipe)) -#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe) (0xb044 + REG_OFFSET(pipe)) -#define MIPI_DPI_CONTROL_REG(pipe) (0xb048 + REG_OFFSET(pipe)) -#define MIPI_DPI_DATA_REG(pipe) (0xb04c + REG_OFFSET(pipe)) -#define MIPI_INIT_COUNT_REG(pipe) (0xb050 + REG_OFFSET(pipe)) -#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe) (0xb054 + REG_OFFSET(pipe)) -#define MIPI_VIDEO_MODE_FORMAT_REG(pipe) (0xb058 + REG_OFFSET(pipe)) -#define MIPI_EOT_DISABLE_REG(pipe) (0xb05c + REG_OFFSET(pipe)) -#define MIPI_LP_BYTECLK_REG(pipe) (0xb060 + REG_OFFSET(pipe)) -#define MIPI_LP_GEN_DATA_REG(pipe) (0xb064 + REG_OFFSET(pipe)) -#define MIPI_HS_GEN_DATA_REG(pipe) (0xb068 + REG_OFFSET(pipe)) -#define MIPI_LP_GEN_CTRL_REG(pipe) (0xb06c + REG_OFFSET(pipe)) -#define MIPI_HS_GEN_CTRL_REG(pipe) (0xb070 + REG_OFFSET(pipe)) -#define MIPI_GEN_FIFO_STAT_REG(pipe) (0xb074 + REG_OFFSET(pipe)) -#define MIPI_HS_LS_DBI_ENABLE_REG(pipe) (0xb078 + REG_OFFSET(pipe)) -#define MIPI_DPHY_PARAM_REG(pipe) (0xb080 + REG_OFFSET(pipe)) -#define MIPI_DBI_BW_CTRL_REG(pipe) (0xb084 + REG_OFFSET(pipe)) -#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe) (0xb088 + REG_OFFSET(pipe)) - -#define MIPI_CTRL_REG(pipe) (0xb104 + REG_OFFSET(pipe)) -#define MIPI_DATA_ADD_REG(pipe) (0xb108 + REG_OFFSET(pipe)) -#define MIPI_DATA_LEN_REG(pipe) (0xb10c + REG_OFFSET(pipe)) -#define MIPI_CMD_ADD_REG(pipe) (0xb110 + REG_OFFSET(pipe)) -#define MIPI_CMD_LEN_REG(pipe) (0xb114 + REG_OFFSET(pipe)) - -/* non-uniform reg offset */ -#define MIPI_PORT_CONTROL(pipe) (CHECK_PIPE(pipe) ? MIPI_C : MIPI) - -#define DSI_DEVICE_READY (0x1) -#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1) -#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1) -#define DSI_POWER_STATE_ULPS_OFFSET (0x1) - - -#define DSI_ONE_DATA_LANE (0x1) -#define DSI_TWO_DATA_LANE (0x2) -#define DSI_THREE_DATA_LANE (0X3) -#define DSI_FOUR_DATA_LANE (0x4) -#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3) -#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5) -#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7) -#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7) -#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7) -#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7) -#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13) - -#define DSI_INTR_STATE_RXSOTERROR BIT(0) - -#define DSI_INTR_STATE_SPL_PKG_SENT BIT(30) -#define DSI_INTR_STATE_TE BIT(31) - -#define DSI_HS_TX_TIMEOUT_MASK (0xffffff) - -#define DSI_LP_RX_TIMEOUT_MASK (0xffffff) - -#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f) - -#define DSI_RESET_TIMER_MASK (0xffff) - -#define DSI_DBI_FIFO_WM_HALF (0x0) -#define DSI_DBI_FIFO_WM_QUARTER (0x1) -#define DSI_DBI_FIFO_WM_LOW (0x2) - -#define DSI_DPI_TIMING_MASK (0xffff) - -#define DSI_INIT_TIMER_MASK (0xffff) - -#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff) - -#define DSI_LP_BYTECLK_MASK (0x0ffff) - -#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03) -#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13) -#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23) -#define DSI_HS_CTRL_GEN_R0 (0x04) -#define DSI_HS_CTRL_GEN_R1 (0x14) -#define DSI_HS_CTRL_GEN_R2 (0x24) -#define DSI_HS_CTRL_GEN_LONG_W (0x29) -#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05) -#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15) -#define DSI_HS_CTRL_MCS_R0 (0x06) -#define DSI_HS_CTRL_MCS_LONG_W (0x39) -#define DSI_HS_CTRL_VC_OFFSET (0x06) -#define DSI_HS_CTRL_WC_OFFSET (0x08) - -#define DSI_FIFO_GEN_HS_DATA_FULL BIT(0) -#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY BIT(1) -#define DSI_FIFO_GEN_HS_DATA_EMPTY BIT(2) -#define DSI_FIFO_GEN_LP_DATA_FULL BIT(8) -#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY BIT(9) -#define DSI_FIFO_GEN_LP_DATA_EMPTY BIT(10) -#define DSI_FIFO_GEN_HS_CTRL_FULL BIT(16) -#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY BIT(17) -#define DSI_FIFO_GEN_HS_CTRL_EMPTY BIT(18) -#define DSI_FIFO_GEN_LP_CTRL_FULL BIT(24) -#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY BIT(25) -#define DSI_FIFO_GEN_LP_CTRL_EMPTY BIT(26) -#define DSI_FIFO_DBI_EMPTY BIT(27) -#define DSI_FIFO_DPI_EMPTY BIT(28) - -#define DSI_DBI_HS_LP_SWITCH_MASK (0x1) - -#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0) -#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16) - -#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001) -#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002) - -/*dsi power modes*/ -#define DSI_POWER_MODE_DISPLAY_ON BIT(2) -#define DSI_POWER_MODE_NORMAL_ON BIT(3) -#define DSI_POWER_MODE_SLEEP_OUT BIT(4) -#define DSI_POWER_MODE_PARTIAL_ON BIT(5) -#define DSI_POWER_MODE_IDLE_ON BIT(6) - -enum { - MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1, - MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2, - MDFLD_DSI_VIDEO_BURST_MODE = 3, -}; - -#define DSI_DPI_COMPLETE_LAST_LINE BIT(2) -#define DSI_DPI_DISABLE_BTA BIT(3) - -struct mdfld_dsi_connector { - struct gma_connector base; - - int pipe; - void *private; - void *pkg_sender; - - /* Connection status */ - enum drm_connector_status status; -}; - -struct mdfld_dsi_encoder { - struct gma_encoder base; - void *private; -}; - -/* - * DSI config, consists of one DSI connector, two DSI encoders. - * DRM will pick up on DSI encoder basing on differents configs. - */ -struct mdfld_dsi_config { - struct drm_device *dev; - struct drm_display_mode *fixed_mode; - struct drm_display_mode *mode; - - struct mdfld_dsi_connector *connector; - struct mdfld_dsi_encoder *encoder; - - int changed; - - int bpp; - int lane_count; - /*Virtual channel number for this encoder*/ - int channel_num; - /*video mode configure*/ - int video_mode; - - int dvr_ic_inited; -}; - -static inline struct mdfld_dsi_connector *mdfld_dsi_connector( - struct drm_connector *connector) -{ - struct gma_connector *gma_connector; - - gma_connector = to_gma_connector(connector); - - return container_of(gma_connector, struct mdfld_dsi_connector, base); -} - -static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder( - struct drm_encoder *encoder) -{ - struct gma_encoder *gma_encoder; - - gma_encoder = to_gma_encoder(encoder); - - return container_of(gma_encoder, struct mdfld_dsi_encoder, base); -} - -static inline struct mdfld_dsi_config * - mdfld_dsi_get_config(struct mdfld_dsi_connector *connector) -{ - if (!connector) - return NULL; - return (struct mdfld_dsi_config *)connector->private; -} - -static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config) -{ - struct mdfld_dsi_connector *dsi_connector; - - if (!config) - return NULL; - - dsi_connector = config->connector; - - if (!dsi_connector) - return NULL; - - return dsi_connector->pkg_sender; -} - -static inline struct mdfld_dsi_config * - mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder) -{ - if (!encoder) - return NULL; - return (struct mdfld_dsi_config *)encoder->private; -} - -static inline struct mdfld_dsi_connector * - mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder) -{ - struct mdfld_dsi_config *config; - - if (!encoder) - return NULL; - - config = mdfld_dsi_encoder_get_config(encoder); - if (!config) - return NULL; - - return config->connector; -} - -static inline void *mdfld_dsi_encoder_get_pkg_sender( - struct mdfld_dsi_encoder *encoder) -{ - struct mdfld_dsi_config *dsi_config; - - dsi_config = mdfld_dsi_encoder_get_config(encoder); - if (!dsi_config) - return NULL; - - return mdfld_dsi_get_pkg_sender(dsi_config); -} - -static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder) -{ - struct mdfld_dsi_connector *connector; - - if (!encoder) - return -1; - - connector = mdfld_dsi_encoder_get_connector(encoder); - if (!connector) - return -1; - return connector->pipe; -} - -/* Export functions */ -extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, - u32 gen_fifo_stat_reg, u32 fifo_stat); -extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, - int pipe); -extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, - int level); -extern void mdfld_dsi_output_init(struct drm_device *dev, - int pipe, - const struct panel_funcs *p_vid_funcs); -extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, - int pipe); - -extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, - u32 *mode, bool hs); -extern int mdfld_dsi_panel_reset(struct drm_device *dev, int pipe); - -#endif /*__MDFLD_DSI_OUTPUT_H__*/ diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c deleted file mode 100644 index 6e0de83e9f7d..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c +++ /dev/null @@ -1,679 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Jackie Li<yaodong.li@intel.com> - */ - -#include <linux/delay.h> -#include <linux/freezer.h> - -#include <video/mipi_display.h> - -#include "mdfld_dsi_dpi.h" -#include "mdfld_dsi_output.h" -#include "mdfld_dsi_pkg_sender.h" - -#define MDFLD_DSI_READ_MAX_COUNT 5000 - -enum { - MDFLD_DSI_PANEL_MODE_SLEEP = 0x1, -}; - -enum { - MDFLD_DSI_PKG_SENDER_FREE = 0x0, - MDFLD_DSI_PKG_SENDER_BUSY = 0x1, -}; - -static const char *const dsi_errors[] = { - "RX SOT Error", - "RX SOT Sync Error", - "RX EOT Sync Error", - "RX Escape Mode Entry Error", - "RX LP TX Sync Error", - "RX HS Receive Timeout Error", - "RX False Control Error", - "RX ECC Single Bit Error", - "RX ECC Multibit Error", - "RX Checksum Error", - "RX DSI Data Type Not Recognised", - "RX DSI VC ID Invalid", - "TX False Control Error", - "TX ECC Single Bit Error", - "TX ECC Multibit Error", - "TX Checksum Error", - "TX DSI Data Type Not Recognised", - "TX DSI VC ID invalid", - "High Contention", - "Low contention", - "DPI FIFO Under run", - "HS TX Timeout", - "LP RX Timeout", - "Turn Around ACK Timeout", - "ACK With No Error", - "RX Invalid TX Length", - "RX Prot Violation", - "HS Generic Write FIFO Full", - "LP Generic Write FIFO Full", - "Generic Read Data Avail", - "Special Packet Sent", - "Tearing Effect", -}; - -static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender, - u32 mask) -{ - struct drm_device *dev = sender->dev; - u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg; - int retry = 0xffff; - - while (retry--) { - if ((mask & REG_READ(gen_fifo_stat_reg)) == mask) - return 0; - udelay(100); - } - DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg)); - return -EIO; -} - -static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender) -{ - return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) | - BIT(26) | BIT(27) | BIT(28))); -} - -static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender) -{ - return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26))); -} - -static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender) -{ - return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18))); -} - -static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask) -{ - u32 intr_stat_reg = sender->mipi_intr_stat_reg; - struct drm_device *dev = sender->dev; - - dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask); - - switch (mask) { - case BIT(0): - case BIT(1): - case BIT(2): - case BIT(3): - case BIT(4): - case BIT(5): - case BIT(6): - case BIT(7): - case BIT(8): - case BIT(9): - case BIT(10): - case BIT(11): - case BIT(12): - case BIT(13): - dev_dbg(sender->dev->dev, "No Action required\n"); - break; - case BIT(14): - /*wait for all fifo empty*/ - /*wait_for_all_fifos_empty(sender)*/ - break; - case BIT(15): - dev_dbg(sender->dev->dev, "No Action required\n"); - break; - case BIT(16): - break; - case BIT(17): - break; - case BIT(18): - case BIT(19): - dev_dbg(sender->dev->dev, "High/Low contention detected\n"); - /*wait for contention recovery time*/ - /*mdelay(10);*/ - /*wait for all fifo empty*/ - if (0) - wait_for_all_fifos_empty(sender); - break; - case BIT(20): - dev_dbg(sender->dev->dev, "No Action required\n"); - break; - case BIT(21): - /*wait for all fifo empty*/ - /*wait_for_all_fifos_empty(sender);*/ - break; - case BIT(22): - break; - case BIT(23): - case BIT(24): - case BIT(25): - case BIT(26): - case BIT(27): - dev_dbg(sender->dev->dev, "HS Gen fifo full\n"); - REG_WRITE(intr_stat_reg, mask); - wait_for_hs_fifos_empty(sender); - break; - case BIT(28): - dev_dbg(sender->dev->dev, "LP Gen fifo full\n"); - REG_WRITE(intr_stat_reg, mask); - wait_for_lp_fifos_empty(sender); - break; - case BIT(29): - case BIT(30): - case BIT(31): - dev_dbg(sender->dev->dev, "No Action required\n"); - break; - } - - if (mask & REG_READ(intr_stat_reg)) - dev_dbg(sender->dev->dev, - "Cannot clean interrupt 0x%08x\n", mask); - return 0; -} - -static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender) -{ - struct drm_device *dev = sender->dev; - u32 intr_stat_reg = sender->mipi_intr_stat_reg; - u32 mask; - u32 intr_stat; - int i; - int err = 0; - - intr_stat = REG_READ(intr_stat_reg); - - for (i = 0; i < 32; i++) { - mask = (0x00000001UL) << i; - if (intr_stat & mask) { - dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]); - err = handle_dsi_error(sender, mask); - if (err) - DRM_ERROR("Cannot handle error\n"); - } - } - return err; -} - -static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, - u8 cmd, u8 param, bool hs) -{ - struct drm_device *dev = sender->dev; - u32 ctrl_reg; - u32 val; - u8 virtual_channel = 0; - - if (hs) { - ctrl_reg = sender->mipi_hs_gen_ctrl_reg; - - /* FIXME: wait_for_hs_fifos_empty(sender); */ - } else { - ctrl_reg = sender->mipi_lp_gen_ctrl_reg; - - /* FIXME: wait_for_lp_fifos_empty(sender); */ - } - - val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) | - FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0); - - REG_WRITE(ctrl_reg, val); - - return 0; -} - -static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, - u8 *data, int len, bool hs) -{ - struct drm_device *dev = sender->dev; - u32 ctrl_reg; - u32 data_reg; - u32 val; - u8 *p; - u8 b1, b2, b3, b4; - u8 virtual_channel = 0; - int i; - - if (hs) { - ctrl_reg = sender->mipi_hs_gen_ctrl_reg; - data_reg = sender->mipi_hs_gen_data_reg; - - /* FIXME: wait_for_hs_fifos_empty(sender); */ - } else { - ctrl_reg = sender->mipi_lp_gen_ctrl_reg; - data_reg = sender->mipi_lp_gen_data_reg; - - /* FIXME: wait_for_lp_fifos_empty(sender); */ - } - - p = data; - for (i = 0; i < len / 4; i++) { - b1 = *p++; - b2 = *p++; - b3 = *p++; - b4 = *p++; - - REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1); - } - - i = len % 4; - if (i) { - b1 = 0; b2 = 0; b3 = 0; - - switch (i) { - case 3: - b1 = *p++; - b2 = *p++; - b3 = *p++; - break; - case 2: - b1 = *p++; - b2 = *p++; - break; - case 1: - b1 = *p++; - break; - } - - REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1); - } - - val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) | - FLD_VAL(data_type, 5, 0); - - REG_WRITE(ctrl_reg, val); - - return 0; -} - -static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type, - u8 *data, u16 len) -{ - u8 cmd; - - switch (data_type) { - case MIPI_DSI_DCS_SHORT_WRITE: - case MIPI_DSI_DCS_SHORT_WRITE_PARAM: - case MIPI_DSI_DCS_LONG_WRITE: - cmd = *data; - break; - default: - return 0; - } - - /*this prevents other package sending while doing msleep*/ - sender->status = MDFLD_DSI_PKG_SENDER_BUSY; - - /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/ - if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) { - /*TODO: replace it with msleep later*/ - mdelay(120); - } - - if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) { - /*TODO: replace it with msleep later*/ - mdelay(120); - } - return 0; -} - -static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type, - u8 *data, u16 len) -{ - u8 cmd; - - switch (data_type) { - case MIPI_DSI_DCS_SHORT_WRITE: - case MIPI_DSI_DCS_SHORT_WRITE_PARAM: - case MIPI_DSI_DCS_LONG_WRITE: - cmd = *data; - break; - default: - return 0; - } - - /*update panel status*/ - if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) { - sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP; - /*TODO: replace it with msleep later*/ - mdelay(120); - } else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) { - sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP; - /*TODO: replace it with msleep later*/ - mdelay(120); - } else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) { - /*TODO: replace it with msleep later*/ - mdelay(5); - } - - sender->status = MDFLD_DSI_PKG_SENDER_FREE; - - return 0; -} - -static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type, - u8 *data, u16 len, bool hs) -{ - int ret; - - /*handle DSI error*/ - ret = dsi_error_handler(sender); - if (ret) { - DRM_ERROR("Error handling failed\n"); - return -EAGAIN; - } - - /* send pkg */ - if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) { - DRM_ERROR("sender is busy\n"); - return -EAGAIN; - } - - ret = send_pkg_prepare(sender, data_type, data, len); - if (ret) { - DRM_ERROR("send_pkg_prepare error\n"); - return ret; - } - - switch (data_type) { - case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: - case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: - case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: - case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: - case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: - case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: - case MIPI_DSI_DCS_SHORT_WRITE: - case MIPI_DSI_DCS_SHORT_WRITE_PARAM: - case MIPI_DSI_DCS_READ: - ret = send_short_pkg(sender, data_type, data[0], data[1], hs); - break; - case MIPI_DSI_GENERIC_LONG_WRITE: - case MIPI_DSI_DCS_LONG_WRITE: - ret = send_long_pkg(sender, data_type, data, len, hs); - break; - } - - send_pkg_done(sender, data_type, data, len); - - /*FIXME: should I query complete and fifo empty here?*/ - - return ret; -} - -int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, - u32 len, bool hs) -{ - unsigned long flags; - - if (!sender || !data || !len) { - DRM_ERROR("Invalid parameters\n"); - return -EINVAL; - } - - spin_lock_irqsave(&sender->lock, flags); - send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs); - spin_unlock_irqrestore(&sender->lock, flags); - - return 0; -} - -int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd, - u8 param, u8 param_num, bool hs) -{ - u8 data[2]; - unsigned long flags; - u8 data_type; - - if (!sender) { - DRM_ERROR("Invalid parameter\n"); - return -EINVAL; - } - - data[0] = cmd; - - if (param_num) { - data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM; - data[1] = param; - } else { - data_type = MIPI_DSI_DCS_SHORT_WRITE; - data[1] = 0; - } - - spin_lock_irqsave(&sender->lock, flags); - send_pkg(sender, data_type, data, sizeof(data), hs); - spin_unlock_irqrestore(&sender->lock, flags); - - return 0; -} - -int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0, - u8 param1, u8 param_num, bool hs) -{ - u8 data[2]; - unsigned long flags; - u8 data_type; - - if (!sender || param_num > 2) { - DRM_ERROR("Invalid parameter\n"); - return -EINVAL; - } - - switch (param_num) { - case 0: - data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM; - data[0] = 0; - data[1] = 0; - break; - case 1: - data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM; - data[0] = param0; - data[1] = 0; - break; - case 2: - data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM; - data[0] = param0; - data[1] = param1; - break; - } - - spin_lock_irqsave(&sender->lock, flags); - send_pkg(sender, data_type, data, sizeof(data), hs); - spin_unlock_irqrestore(&sender->lock, flags); - - return 0; -} - -int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, - u32 len, bool hs) -{ - unsigned long flags; - - if (!sender || !data || !len) { - DRM_ERROR("Invalid parameters\n"); - return -EINVAL; - } - - spin_lock_irqsave(&sender->lock, flags); - send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs); - spin_unlock_irqrestore(&sender->lock, flags); - - return 0; -} - -static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type, - u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs) -{ - unsigned long flags; - struct drm_device *dev; - int i; - u32 gen_data_reg; - int retry = MDFLD_DSI_READ_MAX_COUNT; - - if (!sender || !data_out || !len_out) { - DRM_ERROR("Invalid parameters\n"); - return -EINVAL; - } - - dev = sender->dev; - - /** - * do reading. - * 0) send out generic read request - * 1) polling read data avail interrupt - * 2) read data - */ - spin_lock_irqsave(&sender->lock, flags); - - REG_WRITE(sender->mipi_intr_stat_reg, BIT(29)); - - if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) - DRM_ERROR("Can NOT clean read data valid interrupt\n"); - - /*send out read request*/ - send_pkg(sender, data_type, data, len, hs); - - /*polling read data avail interrupt*/ - while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) { - udelay(100); - retry--; - } - - if (!retry) { - spin_unlock_irqrestore(&sender->lock, flags); - return -ETIMEDOUT; - } - - REG_WRITE(sender->mipi_intr_stat_reg, BIT(29)); - - /*read data*/ - if (hs) - gen_data_reg = sender->mipi_hs_gen_data_reg; - else - gen_data_reg = sender->mipi_lp_gen_data_reg; - - for (i = 0; i < len_out; i++) - *(data_out + i) = REG_READ(gen_data_reg); - - spin_unlock_irqrestore(&sender->lock, flags); - - return 0; -} - -int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd, - u32 *data, u16 len, bool hs) -{ - if (!sender || !data || !len) { - DRM_ERROR("Invalid parameters\n"); - return -EINVAL; - } - - return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1, - data, len, hs); -} - -int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector, - int pipe) -{ - struct mdfld_dsi_pkg_sender *pkg_sender; - struct mdfld_dsi_config *dsi_config = - mdfld_dsi_get_config(dsi_connector); - struct drm_device *dev = dsi_config->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - u32 mipi_val = 0; - - if (!dsi_connector) { - DRM_ERROR("Invalid parameter\n"); - return -EINVAL; - } - - pkg_sender = dsi_connector->pkg_sender; - - if (!pkg_sender || IS_ERR(pkg_sender)) { - pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender), - GFP_KERNEL); - if (!pkg_sender) { - DRM_ERROR("Create DSI pkg sender failed\n"); - return -ENOMEM; - } - dsi_connector->pkg_sender = (void *)pkg_sender; - } - - pkg_sender->dev = dev; - pkg_sender->dsi_connector = dsi_connector; - pkg_sender->pipe = pipe; - pkg_sender->pkg_num = 0; - pkg_sender->panel_mode = 0; - pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE; - - /*init regs*/ - /* FIXME: should just copy the regmap ptr ? */ - pkg_sender->dpll_reg = map->dpll; - pkg_sender->dspcntr_reg = map->cntr; - pkg_sender->pipeconf_reg = map->conf; - pkg_sender->dsplinoff_reg = map->linoff; - pkg_sender->dspsurf_reg = map->surf; - pkg_sender->pipestat_reg = map->status; - - pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe); - pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe); - pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); - pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe); - pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); - pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); - pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe); - pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe); - pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe); - pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe); - - /*init lock*/ - spin_lock_init(&pkg_sender->lock); - - if (mdfld_get_panel_type(dev, pipe) != TC35876X) { - /** - * For video mode, don't enable DPI timing output here, - * will init the DPI timing output during mode setting. - */ - mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; - - if (pipe == 0) - mipi_val |= 0x2; - - REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val); - REG_READ(MIPI_PORT_CONTROL(pipe)); - - /* do dsi controller init */ - mdfld_dsi_controller_init(dsi_config, pipe); - } - - return 0; -} - -void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender) -{ - if (!sender || IS_ERR(sender)) - return; - - /*free*/ - kfree(sender); -} - - diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h deleted file mode 100644 index 0478a21c15d5..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Jackie Li<yaodong.li@intel.com> - */ -#ifndef __MDFLD_DSI_PKG_SENDER_H__ -#define __MDFLD_DSI_PKG_SENDER_H__ - -#include <linux/kthread.h> - -#define MDFLD_MAX_DCS_PARAM 8 - -struct mdfld_dsi_pkg_sender { - struct drm_device *dev; - struct mdfld_dsi_connector *dsi_connector; - u32 status; - u32 panel_mode; - - int pipe; - - spinlock_t lock; - - u32 pkg_num; - - /* Registers */ - u32 dpll_reg; - u32 dspcntr_reg; - u32 pipeconf_reg; - u32 pipestat_reg; - u32 dsplinoff_reg; - u32 dspsurf_reg; - - u32 mipi_intr_stat_reg; - u32 mipi_lp_gen_data_reg; - u32 mipi_hs_gen_data_reg; - u32 mipi_lp_gen_ctrl_reg; - u32 mipi_hs_gen_ctrl_reg; - u32 mipi_gen_fifo_stat_reg; - u32 mipi_data_addr_reg; - u32 mipi_data_len_reg; - u32 mipi_cmd_addr_reg; - u32 mipi_cmd_len_reg; -}; - -extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector, - int pipe); -extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender); -int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd, - u8 param, u8 param_num, bool hs); -int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, - u32 len, bool hs); -int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0, - u8 param1, u8 param_num, bool hs); -int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data, - u32 len, bool hs); -/* Read interfaces */ -int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd, - u32 *data, u16 len, bool hs); - -#endif diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c deleted file mode 100644 index 462aba8f7528..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ /dev/null @@ -1,966 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright © 2006-2007 Intel Corporation - * - * Authors: - * Eric Anholt <eric@anholt.net> - */ - -#include <linux/delay.h> -#include <linux/i2c.h> -#include <linux/pm_runtime.h> - -#include <drm/drm_crtc.h> -#include <drm/drm_fourcc.h> - -#include "framebuffer.h" -#include "gma_display.h" -#include "mdfld_dsi_output.h" -#include "mdfld_output.h" -#include "psb_intel_reg.h" - -/* Hardcoded currently */ -static int ksel = KSEL_CRYSTAL_19; - -struct psb_intel_range_t { - int min, max; -}; - -struct mrst_limit_t { - struct psb_intel_range_t dot, m, p1; -}; - -struct mrst_clock_t { - /* derived values */ - int dot; - int m; - int p1; -}; - -#define COUNT_MAX 0x10000000 - -void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - int count, temp; - - switch (pipe) { - case 0: - case 1: - case 2: - break; - default: - DRM_ERROR("Illegal Pipe Number.\n"); - return; - } - - /* FIXME JLIU7_PO */ - gma_wait_for_vblank(dev); - return; - - /* Wait for for the pipe disable to take effect. */ - for (count = 0; count < COUNT_MAX; count++) { - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_PIPE_STATE) == 0) - break; - } -} - -void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - int count, temp; - - switch (pipe) { - case 0: - case 1: - case 2: - break; - default: - DRM_ERROR("Illegal Pipe Number.\n"); - return; - } - - /* FIXME JLIU7_PO */ - gma_wait_for_vblank(dev); - return; - - /* Wait for for the pipe enable to take effect. */ - for (count = 0; count < COUNT_MAX; count++) { - temp = REG_READ(map->conf); - if (temp & PIPEACONF_PIPE_STATE) - break; - } -} - -/* - * Return the pipe currently connected to the panel fitter, - * or -1 if the panel fitter is not present or not in use - */ -static int psb_intel_panel_fitter_pipe(struct drm_device *dev) -{ - u32 pfit_control; - - pfit_control = REG_READ(PFIT_CONTROL); - - /* See if the panel fitter is in use */ - if ((pfit_control & PFIT_ENABLE) == 0) - return -1; - - /* 965 can place panel fitter on either pipe */ - return (pfit_control >> 29) & 0x3; -} - -static int check_fb(struct drm_framebuffer *fb) -{ - if (!fb) - return 0; - - switch (fb->format->cpp[0] * 8) { - case 8: - case 16: - case 24: - case 32: - return 0; - default: - DRM_ERROR("Unknown color depth\n"); - return -EINVAL; - } -} - -static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->primary->fb; - struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - int pipe = gma_crtc->pipe; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - unsigned long start, offset; - u32 dspcntr; - int ret; - - dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe); - - /* no fb bound */ - if (!fb) { - dev_dbg(dev->dev, "No FB bound\n"); - return 0; - } - - ret = check_fb(fb); - if (ret) - return ret; - - if (pipe > 2) { - DRM_ERROR("Illegal Pipe Number.\n"); - return -EINVAL; - } - - if (!gma_power_begin(dev, true)) - return 0; - - start = to_gtt_range(fb->obj[0])->offset; - offset = y * fb->pitches[0] + x * fb->format->cpp[0]; - - REG_WRITE(map->stride, fb->pitches[0]); - dspcntr = REG_READ(map->cntr); - dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - - switch (fb->format->cpp[0] * 8) { - case 8: - dspcntr |= DISPPLANE_8BPP; - break; - case 16: - if (fb->format->depth == 15) - dspcntr |= DISPPLANE_15_16BPP; - else - dspcntr |= DISPPLANE_16BPP; - break; - case 24: - case 32: - dspcntr |= DISPPLANE_32BPP_NO_ALPHA; - break; - } - REG_WRITE(map->cntr, dspcntr); - - dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n", - start, offset, x, y); - REG_WRITE(map->linoff, offset); - REG_READ(map->linoff); - REG_WRITE(map->surf, start); - REG_READ(map->surf); - - gma_power_end(dev); - - return 0; -} - -/* - * Disable the pipe, plane and pll. - * - */ -void mdfld_disable_crtc(struct drm_device *dev, int pipe) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - u32 temp; - - dev_dbg(dev->dev, "pipe = %d\n", pipe); - - - if (pipe != 1) - mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe), - HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY); - - /* Disable display plane */ - temp = REG_READ(map->cntr); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - REG_WRITE(map->cntr, - temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(map->base, REG_READ(map->base)); - REG_READ(map->base); - } - - /* FIXME_JLIU7 MDFLD_PO revisit */ - - /* Next, disable display pipes */ - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_ENABLE) != 0) { - temp &= ~PIPEACONF_ENABLE; - temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; - REG_WRITE(map->conf, temp); - REG_READ(map->conf); - - /* Wait for for the pipe disable to take effect. */ - mdfldWaitForPipeDisable(dev, pipe); - } - - temp = REG_READ(map->dpll); - if (temp & DPLL_VCO_ENABLE) { - if ((pipe != 1 && - !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) - & PIPEACONF_ENABLE)) || pipe == 1) { - temp &= ~(DPLL_VCO_ENABLE); - REG_WRITE(map->dpll, temp); - REG_READ(map->dpll); - /* Wait for the clocks to turn off. */ - /* FIXME_MDFLD PO may need more delay */ - udelay(500); - - if (!(temp & MDFLD_PWR_GATE_EN)) { - /* gating power of DPLL */ - REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(5000); - } - } - } - -} - -/* - * Sets the power management mode of the pipe and plane. - * - * This code should probably grow support for turning the cursor off and back - * on appropriately at the same time as we're turning the pipe off/on. - */ -static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - int pipe = gma_crtc->pipe; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - u32 pipeconf = dev_priv->pipeconf[pipe]; - u32 temp; - int timeout = 0; - - dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe); - - /* Note: Old code uses pipe a stat for pipe b but that appears - to be a bug */ - - if (!gma_power_begin(dev, true)) - return; - - /* XXX: When our outputs are all unaware of DPMS modes other than off - * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. - */ - switch (mode) { - case DRM_MODE_DPMS_ON: - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - /* Enable the DPLL */ - temp = REG_READ(map->dpll); - - if ((temp & DPLL_VCO_ENABLE) == 0) { - /* When ungating power of DPLL, needs to wait 0.5us - before enable the VCO */ - if (temp & MDFLD_PWR_GATE_EN) { - temp &= ~MDFLD_PWR_GATE_EN; - REG_WRITE(map->dpll, temp); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - } - - REG_WRITE(map->dpll, temp); - REG_READ(map->dpll); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - - REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); - REG_READ(map->dpll); - - /** - * wait for DSI PLL to lock - * NOTE: only need to poll status of pipe 0 and pipe 1, - * since both MIPI pipes share the same PLL. - */ - while ((pipe != 2) && (timeout < 20000) && - !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) { - udelay(150); - timeout++; - } - } - - /* Enable the plane */ - temp = REG_READ(map->cntr); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - REG_WRITE(map->cntr, - temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(map->base, REG_READ(map->base)); - } - - /* Enable the pipe */ - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_ENABLE) == 0) { - REG_WRITE(map->conf, pipeconf); - - /* Wait for for the pipe enable to take effect. */ - mdfldWaitForPipeEnable(dev, pipe); - } - - /*workaround for sighting 3741701 Random X blank display*/ - /*perform w/a in video mode only on pipe A or C*/ - if (pipe == 0 || pipe == 2) { - REG_WRITE(map->status, REG_READ(map->status)); - msleep(100); - if (PIPE_VBLANK_STATUS & REG_READ(map->status)) - dev_dbg(dev->dev, "OK"); - else { - dev_dbg(dev->dev, "STUCK!!!!"); - /*shutdown controller*/ - temp = REG_READ(map->cntr); - REG_WRITE(map->cntr, - temp & ~DISPLAY_PLANE_ENABLE); - REG_WRITE(map->base, REG_READ(map->base)); - /*mdfld_dsi_dpi_shut_down(dev, pipe);*/ - REG_WRITE(0xb048, 1); - msleep(100); - temp = REG_READ(map->conf); - temp &= ~PIPEACONF_ENABLE; - REG_WRITE(map->conf, temp); - msleep(100); /*wait for pipe disable*/ - REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0); - msleep(100); - REG_WRITE(0xb004, REG_READ(0xb004)); - /* try to bring the controller back up again*/ - REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1); - temp = REG_READ(map->cntr); - REG_WRITE(map->cntr, - temp | DISPLAY_PLANE_ENABLE); - REG_WRITE(map->base, REG_READ(map->base)); - /*mdfld_dsi_dpi_turn_on(dev, pipe);*/ - REG_WRITE(0xb048, 2); - msleep(100); - temp = REG_READ(map->conf); - temp |= PIPEACONF_ENABLE; - REG_WRITE(map->conf, temp); - } - } - - gma_crtc_load_lut(crtc); - - /* Give the overlay scaler a chance to enable - if it's on this pipe */ - /* psb_intel_crtc_dpms_video(crtc, true); TODO */ - - break; - case DRM_MODE_DPMS_OFF: - /* Give the overlay scaler a chance to disable - * if it's on this pipe */ - /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ - if (pipe != 1) - mdfld_dsi_gen_fifo_ready(dev, - MIPI_GEN_FIFO_STAT_REG(pipe), - HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY); - - /* Disable the VGA plane that we never use */ - REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); - - /* Disable display plane */ - temp = REG_READ(map->cntr); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - REG_WRITE(map->cntr, - temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - REG_WRITE(map->base, REG_READ(map->base)); - REG_READ(map->base); - } - - /* Next, disable display pipes */ - temp = REG_READ(map->conf); - if ((temp & PIPEACONF_ENABLE) != 0) { - temp &= ~PIPEACONF_ENABLE; - temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF; - REG_WRITE(map->conf, temp); - REG_READ(map->conf); - - /* Wait for for the pipe disable to take effect. */ - mdfldWaitForPipeDisable(dev, pipe); - } - - temp = REG_READ(map->dpll); - if (temp & DPLL_VCO_ENABLE) { - if ((pipe != 1 && !((REG_READ(PIPEACONF) - | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE)) - || pipe == 1) { - temp &= ~(DPLL_VCO_ENABLE); - REG_WRITE(map->dpll, temp); - REG_READ(map->dpll); - /* Wait for the clocks to turn off. */ - /* FIXME_MDFLD PO may need more delay */ - udelay(500); - } - } - break; - } - gma_power_end(dev); -} - - -#define MDFLD_LIMT_DPLL_19 0 -#define MDFLD_LIMT_DPLL_25 1 -#define MDFLD_LIMT_DPLL_83 2 -#define MDFLD_LIMT_DPLL_100 3 -#define MDFLD_LIMT_DSIPLL_19 4 -#define MDFLD_LIMT_DSIPLL_25 5 -#define MDFLD_LIMT_DSIPLL_83 6 -#define MDFLD_LIMT_DSIPLL_100 7 - -#define MDFLD_DOT_MIN 19750 -#define MDFLD_DOT_MAX 120000 -#define MDFLD_DPLL_M_MIN_19 113 -#define MDFLD_DPLL_M_MAX_19 155 -#define MDFLD_DPLL_P1_MIN_19 2 -#define MDFLD_DPLL_P1_MAX_19 10 -#define MDFLD_DPLL_M_MIN_25 101 -#define MDFLD_DPLL_M_MAX_25 130 -#define MDFLD_DPLL_P1_MIN_25 2 -#define MDFLD_DPLL_P1_MAX_25 10 -#define MDFLD_DPLL_M_MIN_83 64 -#define MDFLD_DPLL_M_MAX_83 64 -#define MDFLD_DPLL_P1_MIN_83 2 -#define MDFLD_DPLL_P1_MAX_83 2 -#define MDFLD_DPLL_M_MIN_100 64 -#define MDFLD_DPLL_M_MAX_100 64 -#define MDFLD_DPLL_P1_MIN_100 2 -#define MDFLD_DPLL_P1_MAX_100 2 -#define MDFLD_DSIPLL_M_MIN_19 131 -#define MDFLD_DSIPLL_M_MAX_19 175 -#define MDFLD_DSIPLL_P1_MIN_19 3 -#define MDFLD_DSIPLL_P1_MAX_19 8 -#define MDFLD_DSIPLL_M_MIN_25 97 -#define MDFLD_DSIPLL_M_MAX_25 140 -#define MDFLD_DSIPLL_P1_MIN_25 3 -#define MDFLD_DSIPLL_P1_MAX_25 9 -#define MDFLD_DSIPLL_M_MIN_83 33 -#define MDFLD_DSIPLL_M_MAX_83 92 -#define MDFLD_DSIPLL_P1_MIN_83 2 -#define MDFLD_DSIPLL_P1_MAX_83 3 -#define MDFLD_DSIPLL_M_MIN_100 97 -#define MDFLD_DSIPLL_M_MAX_100 140 -#define MDFLD_DSIPLL_P1_MIN_100 3 -#define MDFLD_DSIPLL_P1_MAX_100 9 - -static const struct mrst_limit_t mdfld_limits[] = { - { /* MDFLD_LIMT_DPLL_19 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19}, - .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19}, - }, - { /* MDFLD_LIMT_DPLL_25 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25}, - .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25}, - }, - { /* MDFLD_LIMT_DPLL_83 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83}, - .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83}, - }, - { /* MDFLD_LIMT_DPLL_100 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100}, - .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100}, - }, - { /* MDFLD_LIMT_DSIPLL_19 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19}, - .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19}, - }, - { /* MDFLD_LIMT_DSIPLL_25 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25}, - .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25}, - }, - { /* MDFLD_LIMT_DSIPLL_83 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83}, - .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83}, - }, - { /* MDFLD_LIMT_DSIPLL_100 */ - .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX}, - .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100}, - .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100}, - }, -}; - -#define MDFLD_M_MIN 21 -#define MDFLD_M_MAX 180 -static const u32 mdfld_m_converts[] = { -/* M configuration table from 9-bit LFSR table */ - 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */ - 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */ - 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */ - 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */ - 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */ - 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ - 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */ - 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */ - 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */ - 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */ - 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */ - 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */ - 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */ - 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */ - 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */ - 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */ -}; - -static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc) -{ - const struct mrst_limit_t *limit = NULL; - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - - if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI) - || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) { - if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) - limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19]; - else if (ksel == KSEL_BYPASS_25) - limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25]; - else if ((ksel == KSEL_BYPASS_83_100) && - (dev_priv->core_freq == 166)) - limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83]; - else if ((ksel == KSEL_BYPASS_83_100) && - (dev_priv->core_freq == 100 || - dev_priv->core_freq == 200)) - limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100]; - } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { - if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) - limit = &mdfld_limits[MDFLD_LIMT_DPLL_19]; - else if (ksel == KSEL_BYPASS_25) - limit = &mdfld_limits[MDFLD_LIMT_DPLL_25]; - else if ((ksel == KSEL_BYPASS_83_100) && - (dev_priv->core_freq == 166)) - limit = &mdfld_limits[MDFLD_LIMT_DPLL_83]; - else if ((ksel == KSEL_BYPASS_83_100) && - (dev_priv->core_freq == 100 || - dev_priv->core_freq == 200)) - limit = &mdfld_limits[MDFLD_LIMT_DPLL_100]; - } else { - limit = NULL; - dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n"); - } - - return limit; -} - -/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ -static void mdfld_clock(int refclk, struct mrst_clock_t *clock) -{ - clock->dot = (refclk * clock->m) / clock->p1; -} - -/* - * Returns a set of divisors for the desired target clock with the given refclk, - * or FALSE. Divisor values are the actual divisors for - */ -static bool -mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk, - struct mrst_clock_t *best_clock) -{ - struct mrst_clock_t clock; - const struct mrst_limit_t *limit = mdfld_limit(crtc); - int err = target; - - memset(best_clock, 0, sizeof(*best_clock)); - - for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { - for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; - clock.p1++) { - int this_err; - - mdfld_clock(refclk, &clock); - - this_err = abs(clock.dot - target); - if (this_err < err) { - *best_clock = clock; - err = this_err; - } - } - } - return err != target; -} - -static int mdfld_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) -{ - struct drm_device *dev = crtc->dev; - struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - struct drm_psb_private *dev_priv = dev->dev_private; - int pipe = gma_crtc->pipe; - const struct psb_offset *map = &dev_priv->regmap[pipe]; - int refclk = 0; - int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, - clk_tmp = 0; - struct mrst_clock_t clock; - bool ok; - u32 dpll = 0, fp = 0; - bool is_mipi = false, is_mipi2 = false, is_hdmi = false; - struct drm_mode_config *mode_config = &dev->mode_config; - struct gma_encoder *gma_encoder = NULL; - uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; - struct drm_encoder *encoder; - struct drm_connector *connector; - int timeout = 0; - int ret; - - dev_dbg(dev->dev, "pipe = 0x%x\n", pipe); - - ret = check_fb(crtc->primary->fb); - if (ret) - return ret; - - dev_dbg(dev->dev, "adjusted_hdisplay = %d\n", - adjusted_mode->hdisplay); - dev_dbg(dev->dev, "adjusted_vdisplay = %d\n", - adjusted_mode->vdisplay); - dev_dbg(dev->dev, "adjusted_hsync_start = %d\n", - adjusted_mode->hsync_start); - dev_dbg(dev->dev, "adjusted_hsync_end = %d\n", - adjusted_mode->hsync_end); - dev_dbg(dev->dev, "adjusted_htotal = %d\n", - adjusted_mode->htotal); - dev_dbg(dev->dev, "adjusted_vsync_start = %d\n", - adjusted_mode->vsync_start); - dev_dbg(dev->dev, "adjusted_vsync_end = %d\n", - adjusted_mode->vsync_end); - dev_dbg(dev->dev, "adjusted_vtotal = %d\n", - adjusted_mode->vtotal); - dev_dbg(dev->dev, "adjusted_clock = %d\n", - adjusted_mode->clock); - dev_dbg(dev->dev, "hdisplay = %d\n", - mode->hdisplay); - dev_dbg(dev->dev, "vdisplay = %d\n", - mode->vdisplay); - - if (!gma_power_begin(dev, true)) - return 0; - - memcpy(&gma_crtc->saved_mode, mode, - sizeof(struct drm_display_mode)); - memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode, - sizeof(struct drm_display_mode)); - - list_for_each_entry(connector, &mode_config->connector_list, head) { - encoder = connector->encoder; - if (!encoder) - continue; - - if (encoder->crtc != crtc) - continue; - - gma_encoder = gma_attached_encoder(connector); - - switch (gma_encoder->type) { - case INTEL_OUTPUT_MIPI: - is_mipi = true; - break; - case INTEL_OUTPUT_MIPI2: - is_mipi2 = true; - break; - case INTEL_OUTPUT_HDMI: - is_hdmi = true; - break; - } - } - - /* Disable the VGA plane that we never use */ - REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); - - /* Disable the panel fitter if it was on our pipe */ - if (psb_intel_panel_fitter_pipe(dev) == pipe) - REG_WRITE(PFIT_CONTROL, 0); - - /* pipesrc and dspsize control the size that is scaled from, - * which should always be the user's requested size. - */ - if (pipe == 1) { - /* FIXME: To make HDMI display with 864x480 (TPO), 480x864 - * (PYR) or 480x854 (TMD), set the sprite width/height and - * souce image size registers with the adjusted mode for - * pipe B. - */ - - /* - * The defined sprite rectangle must always be completely - * contained within the displayable area of the screen image - * (frame buffer). - */ - REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16) - | (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1)); - /* Set the CRTC with encoder mode. */ - REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16) - | (mode->crtc_vdisplay - 1)); - } else { - REG_WRITE(map->size, - ((mode->crtc_vdisplay - 1) << 16) | - (mode->crtc_hdisplay - 1)); - REG_WRITE(map->src, - ((mode->crtc_hdisplay - 1) << 16) | - (mode->crtc_vdisplay - 1)); - } - - REG_WRITE(map->pos, 0); - - if (gma_encoder) - drm_object_property_get_value(&connector->base, - dev->mode_config.scaling_mode_property, &scalingType); - - if (scalingType == DRM_MODE_SCALE_NO_SCALE) { - /* Medfield doesn't have register support for centering so we - * need to mess with the h/vblank and h/vsync start and ends - * to get centering - */ - int offsetX = 0, offsetY = 0; - - offsetX = (adjusted_mode->crtc_hdisplay - - mode->crtc_hdisplay) / 2; - offsetY = (adjusted_mode->crtc_vdisplay - - mode->crtc_vdisplay) / 2; - - REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) | - ((adjusted_mode->crtc_htotal - 1) << 16)); - REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) | - ((adjusted_mode->crtc_vtotal - 1) << 16)); - REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - - offsetX - 1) | - ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16)); - REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - - offsetX - 1) | - ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16)); - REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - - offsetY - 1) | - ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16)); - REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - - offsetY - 1) | - ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16)); - } else { - REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | - ((adjusted_mode->crtc_htotal - 1) << 16)); - REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | - ((adjusted_mode->crtc_vtotal - 1) << 16)); - REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | - ((adjusted_mode->crtc_hblank_end - 1) << 16)); - REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | - ((adjusted_mode->crtc_hsync_end - 1) << 16)); - REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | - ((adjusted_mode->crtc_vblank_end - 1) << 16)); - REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | - ((adjusted_mode->crtc_vsync_end - 1) << 16)); - } - - /* Flush the plane changes */ - { - const struct drm_crtc_helper_funcs *crtc_funcs = - crtc->helper_private; - crtc_funcs->mode_set_base(crtc, x, y, old_fb); - } - - /* setup pipeconf */ - dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */ - - /* Set up the display plane register */ - dev_priv->dspcntr[pipe] = REG_READ(map->cntr); - dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS; - dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE; - - if (is_mipi2) - goto mrst_crtc_mode_set_exit; - clk = adjusted_mode->clock; - - if (is_hdmi) { - if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) { - refclk = 19200; - - if (is_mipi || is_mipi2) - clk_n = 1, clk_p2 = 8; - else if (is_hdmi) - clk_n = 1, clk_p2 = 10; - } else if (ksel == KSEL_BYPASS_25) { - refclk = 25000; - - if (is_mipi || is_mipi2) - clk_n = 1, clk_p2 = 8; - else if (is_hdmi) - clk_n = 1, clk_p2 = 10; - } else if ((ksel == KSEL_BYPASS_83_100) && - dev_priv->core_freq == 166) { - refclk = 83000; - - if (is_mipi || is_mipi2) - clk_n = 4, clk_p2 = 8; - else if (is_hdmi) - clk_n = 4, clk_p2 = 10; - } else if ((ksel == KSEL_BYPASS_83_100) && - (dev_priv->core_freq == 100 || - dev_priv->core_freq == 200)) { - refclk = 100000; - if (is_mipi || is_mipi2) - clk_n = 4, clk_p2 = 8; - else if (is_hdmi) - clk_n = 4, clk_p2 = 10; - } - - if (is_mipi) - clk_byte = dev_priv->bpp / 8; - else if (is_mipi2) - clk_byte = dev_priv->bpp2 / 8; - - clk_tmp = clk * clk_n * clk_p2 * clk_byte; - - dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n", - clk, clk_n, clk_p2); - dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n", - adjusted_mode->clock, clk_tmp); - - ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock); - - if (!ok) { - DRM_ERROR - ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n"); - } else { - m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)]; - - dev_dbg(dev->dev, "dot clock = %d," - "m = %d, p1 = %d, m_conv = %d.\n", - clock.dot, clock.m, - clock.p1, m_conv); - } - - dpll = REG_READ(map->dpll); - - if (dpll & DPLL_VCO_ENABLE) { - dpll &= ~DPLL_VCO_ENABLE; - REG_WRITE(map->dpll, dpll); - REG_READ(map->dpll); - - /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */ - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - - /* reset M1, N1 & P1 */ - REG_WRITE(map->fp0, 0); - dpll &= ~MDFLD_P1_MASK; - REG_WRITE(map->dpll, dpll); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - } - - /* When ungating power of DPLL, needs to wait 0.5us before - * enable the VCO */ - if (dpll & MDFLD_PWR_GATE_EN) { - dpll &= ~MDFLD_PWR_GATE_EN; - REG_WRITE(map->dpll, dpll); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - } - dpll = 0; - - if (is_hdmi) - dpll |= MDFLD_VCO_SEL; - - fp = (clk_n / 2) << 16; - fp |= m_conv; - - /* compute bitmask from p1 value */ - dpll |= (1 << (clock.p1 - 2)) << 17; - - } else { - dpll = 0x00800000; - fp = 0x000000c1; - } - - REG_WRITE(map->fp0, fp); - REG_WRITE(map->dpll, dpll); - /* FIXME_MDFLD PO - change 500 to 1 after PO */ - udelay(500); - - dpll |= DPLL_VCO_ENABLE; - REG_WRITE(map->dpll, dpll); - REG_READ(map->dpll); - - /* wait for DSI PLL to lock */ - while (timeout < 20000 && - !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) { - udelay(150); - timeout++; - } - - if (is_mipi) - goto mrst_crtc_mode_set_exit; - - dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi); - - REG_WRITE(map->conf, dev_priv->pipeconf[pipe]); - REG_READ(map->conf); - - /* Wait for for the pipe enable to take effect. */ - REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]); - gma_wait_for_vblank(dev); - -mrst_crtc_mode_set_exit: - - gma_power_end(dev); - - return 0; -} - -const struct drm_crtc_helper_funcs mdfld_helper_funcs = { - .dpms = mdfld_crtc_dpms, - .mode_set = mdfld_crtc_mode_set, - .mode_set_base = mdfld__intel_pipe_set_base, - .prepare = gma_crtc_prepare, - .commit = gma_crtc_commit, -}; diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c deleted file mode 100644 index c95966bb0c96..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_output.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicensen - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Thomas Eaton <thomas.g.eaton@intel.com> - * Scott Rowe <scott.m.rowe@intel.com> -*/ - -#include "mdfld_output.h" -#include "mdfld_dsi_dpi.h" -#include "mdfld_dsi_output.h" - -#include "tc35876x-dsi-lvds.h" - -int mdfld_get_panel_type(struct drm_device *dev, int pipe) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - return dev_priv->mdfld_panel_id; -} - -static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe, - int p_type) -{ - switch (p_type) { - case TPO_VID: - mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs); - break; - case TC35876X: - tc35876x_init(dev); - mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs); - break; - case TMD_VID: - mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs); - break; - case HDMI: -/* if (dev_priv->mdfld_hdmi_present) - mdfld_hdmi_init(dev, &dev_priv->mode_dev); */ - break; - } -} - - -int mdfld_output_init(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - - /* FIXME: hardcoded for now */ - dev_priv->mdfld_panel_id = TC35876X; - /* MIPI panel 1 */ - mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id); - /* HDMI panel */ - mdfld_init_panel(dev, 1, HDMI); - return 0; -} - diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h deleted file mode 100644 index 37a516cc56be..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_output.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicensen - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Thomas Eaton <thomas.g.eaton@intel.com> - * Scott Rowe <scott.m.rowe@intel.com> -*/ - -#ifndef MDFLD_OUTPUT_H -#define MDFLD_OUTPUT_H - -#include "psb_drv.h" - -#define TPO_PANEL_WIDTH 84 -#define TPO_PANEL_HEIGHT 46 -#define TMD_PANEL_WIDTH 39 -#define TMD_PANEL_HEIGHT 71 - -struct mdfld_dsi_config; - -enum panel_type { - TPO_VID, - TMD_VID, - HDMI, - TC35876X, -}; - -struct panel_info { - u32 width_mm; - u32 height_mm; - /* Other info */ -}; - -struct panel_funcs { - const struct drm_encoder_helper_funcs *encoder_helper_funcs; - struct drm_display_mode * (*get_config_mode)(struct drm_device *); - int (*get_panel_info)(struct drm_device *, int, struct panel_info *); - int (*reset)(struct drm_device *, int); - void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe); -}; - -int mdfld_output_init(struct drm_device *dev); - -struct backlight_device *mdfld_get_backlight_device(void); -int mdfld_set_brightness(struct backlight_device *bd); - -int mdfld_get_panel_type(struct drm_device *dev, int pipe); - -extern const struct drm_crtc_helper_funcs mdfld_helper_funcs; - -extern const struct panel_funcs mdfld_tmd_vid_funcs; -extern const struct panel_funcs mdfld_tpo_vid_funcs; - -extern void mdfld_disable_crtc(struct drm_device *dev, int pipe); -extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe); -extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe); -#endif diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c deleted file mode 100644 index 25e897b98f86..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Jim Liu <jim.liu@intel.com> - * Jackie Li<yaodong.li@intel.com> - * Gideon Eaton <eaton. - * Scott Rowe <scott.m.rowe@intel.com> - */ - -#include <linux/delay.h> - -#include "mdfld_dsi_dpi.h" -#include "mdfld_dsi_pkg_sender.h" - -static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev) -{ - struct drm_display_mode *mode; - struct drm_psb_private *dev_priv = dev->dev_private; - struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD; - bool use_gct = false; /*Disable GCT for now*/ - - mode = kzalloc(sizeof(*mode), GFP_KERNEL); - if (!mode) - return NULL; - - if (use_gct) { - mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo; - mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo; - mode->hsync_start = mode->hdisplay + \ - ((ti->hsync_offset_hi << 8) | \ - ti->hsync_offset_lo); - mode->hsync_end = mode->hsync_start + \ - ((ti->hsync_pulse_width_hi << 8) | \ - ti->hsync_pulse_width_lo); - mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \ - ti->hblank_lo); - mode->vsync_start = \ - mode->vdisplay + ((ti->vsync_offset_hi << 8) | \ - ti->vsync_offset_lo); - mode->vsync_end = \ - mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \ - ti->vsync_pulse_width_lo); - mode->vtotal = mode->vdisplay + \ - ((ti->vblank_hi << 8) | ti->vblank_lo); - mode->clock = ti->pixel_clock * 10; - - dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay); - dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay); - dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start); - dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end); - dev_dbg(dev->dev, "htotal is %d\n", mode->htotal); - dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start); - dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end); - dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal); - dev_dbg(dev->dev, "clock is %d\n", mode->clock); - } else { - mode->hdisplay = 480; - mode->vdisplay = 854; - mode->hsync_start = 487; - mode->hsync_end = 490; - mode->htotal = 499; - mode->vsync_start = 861; - mode->vsync_end = 865; - mode->vtotal = 873; - mode->clock = 33264; - } - - drm_mode_set_name(mode); - drm_mode_set_crtcinfo(mode, 0); - - mode->type |= DRM_MODE_TYPE_PREFERRED; - - return mode; -} - -static int tmd_vid_get_panel_info(struct drm_device *dev, - int pipe, - struct panel_info *pi) -{ - if (!dev || !pi) - return -EINVAL; - - pi->width_mm = TMD_PANEL_WIDTH; - pi->height_mm = TMD_PANEL_HEIGHT; - - return 0; -} - -/* ************************************************************************* *\ - * FUNCTION: mdfld_init_TMD_MIPI - * - * DESCRIPTION: This function is called only by mrst_dsi_mode_set and - * restore_display_registers. since this function does not - * acquire the mutex, it is important that the calling function - * does! -\* ************************************************************************* */ - -/* FIXME: make the below data u8 instead of u32; note byte order! */ -static u32 tmd_cmd_mcap_off[] = {0x000000b2}; -static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef}; -static u32 tmd_cmd_set_lane_num[] = {0x006360ef}; -static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef}; -static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef}; -static u32 tmd_cmd_set_mode[] = {0x000000b3}; -static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef}; -static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df}; -static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055}; -static u32 tmd_cmd_set_video_mode[] = {0x00000153}; -/*no auto_bl,need add in furture*/ -static u32 tmd_cmd_enable_backlight[] = {0x00005ab4}; -static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd}; - -static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config, - int pipe) -{ - struct mdfld_dsi_pkg_sender *sender - = mdfld_dsi_get_pkg_sender(dsi_config); - - DRM_INFO("Enter mdfld init TMD MIPI display.\n"); - - if (!sender) { - DRM_ERROR("Cannot get sender\n"); - return; - } - - if (dsi_config->dvr_ic_inited) - return; - - msleep(3); - - /* FIXME: make the below data u8 instead of u32; note byte order! */ - - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off, - sizeof(tmd_cmd_mcap_off), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch, - sizeof(tmd_cmd_enable_lane_switch), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num, - sizeof(tmd_cmd_set_lane_num), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0, - sizeof(tmd_cmd_pushing_clock0), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1, - sizeof(tmd_cmd_pushing_clock1), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode, - sizeof(tmd_cmd_set_mode), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode, - sizeof(tmd_cmd_set_sync_pulse_mode), false); - mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column, - sizeof(tmd_cmd_set_column), false); - mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page, - sizeof(tmd_cmd_set_page), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode, - sizeof(tmd_cmd_set_video_mode), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight, - sizeof(tmd_cmd_enable_backlight), false); - mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming, - sizeof(tmd_cmd_set_backlight_dimming), false); - - dsi_config->dvr_ic_inited = 1; -} - -/*TPO DPI encoder helper funcs*/ -static const struct drm_encoder_helper_funcs - mdfld_tpo_dpi_encoder_helper_funcs = { - .dpms = mdfld_dsi_dpi_dpms, - .mode_fixup = mdfld_dsi_dpi_mode_fixup, - .prepare = mdfld_dsi_dpi_prepare, - .mode_set = mdfld_dsi_dpi_mode_set, - .commit = mdfld_dsi_dpi_commit, -}; - -const struct panel_funcs mdfld_tmd_vid_funcs = { - .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, - .get_config_mode = &tmd_vid_get_config_mode, - .get_panel_info = tmd_vid_get_panel_info, - .reset = mdfld_dsi_panel_reset, - .drv_ic_init = mdfld_dsi_tmd_drv_ic_init, -}; diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c deleted file mode 100644 index 11845978fb0a..000000000000 --- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright © 2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * jim liu <jim.liu@intel.com> - * Jackie Li<yaodong.li@intel.com> - */ - -#include "mdfld_dsi_dpi.h" - -static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev) -{ - struct drm_display_mode *mode; - - mode = kzalloc(sizeof(*mode), GFP_KERNEL); - if (!mode) - return NULL; - - mode->hdisplay = 864; - mode->vdisplay = 480; - mode->hsync_start = 873; - mode->hsync_end = 876; - mode->htotal = 887; - mode->vsync_start = 487; - mode->vsync_end = 490; - mode->vtotal = 499; - mode->clock = 33264; - - drm_mode_set_name(mode); - drm_mode_set_crtcinfo(mode, 0); - - mode->type |= DRM_MODE_TYPE_PREFERRED; - - return mode; -} - -static int tpo_vid_get_panel_info(struct drm_device *dev, - int pipe, - struct panel_info *pi) -{ - if (!dev || !pi) - return -EINVAL; - - pi->width_mm = TPO_PANEL_WIDTH; - pi->height_mm = TPO_PANEL_HEIGHT; - - return 0; -} - -/*TPO DPI encoder helper funcs*/ -static const struct drm_encoder_helper_funcs - mdfld_tpo_dpi_encoder_helper_funcs = { - .dpms = mdfld_dsi_dpi_dpms, - .mode_fixup = mdfld_dsi_dpi_mode_fixup, - .prepare = mdfld_dsi_dpi_prepare, - .mode_set = mdfld_dsi_dpi_mode_set, - .commit = mdfld_dsi_dpi_commit, -}; - -const struct panel_funcs mdfld_tpo_vid_funcs = { - .encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs, - .get_config_mode = &tpo_vid_get_config_mode, - .get_panel_info = tpo_vid_get_panel_info, -}; diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index 13aff19aae9b..d856580b8111 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c @@ -48,7 +48,6 @@ static inline uint32_t psb_mmu_pd_index(uint32_t offset) return offset >> PSB_PDE_SHIFT; } -#if defined(CONFIG_X86) static inline void psb_clflush(void *addr) { __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); @@ -63,13 +62,6 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) psb_clflush(addr); mb(); } -#else - -static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) -{; -} - -#endif static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force) { @@ -293,7 +285,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) *ptes++ = pd->invalid_pte; -#if defined(CONFIG_X86) if (pd->driver->has_clflush && pd->hw_context != -1) { mb(); for (i = 0; i < clflush_count; ++i) { @@ -302,7 +293,6 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) } mb(); } -#endif kunmap_atomic(v); spin_unlock(lock); @@ -459,7 +449,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev, driver->has_clflush = 0; -#if defined(CONFIG_X86) if (boot_cpu_has(X86_FEATURE_CLFLUSH)) { uint32_t tfms, misc, cap0, cap4, clflush_size; @@ -476,7 +465,6 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev, driver->clflush_mask = driver->clflush_add - 1; driver->clflush_mask = ~driver->clflush_mask; } -#endif up_write(&driver->sem); return driver; @@ -486,7 +474,6 @@ out_err1: return NULL; } -#if defined(CONFIG_X86) static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) @@ -534,14 +521,6 @@ static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, } mb(); } -#else -static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, - uint32_t num_pages, uint32_t desired_tile_stride, - uint32_t hw_tile_stride) -{ - drm_ttm_cache_flush(); -} -#endif void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages) diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c index 08cd5f73c868..454156fcbec7 100644 --- a/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/drivers/gpu/drm/gma500/oaktrail_device.c @@ -10,9 +10,6 @@ #include <linux/dmi.h> #include <linux/module.h> -#include <asm/intel-mid.h> -#include <asm/intel_scu_ipc.h> - #include <drm/drm.h> #include "intel_bios.h" @@ -548,7 +545,7 @@ const struct psb_ops oaktrail_chip_ops = { .chip_setup = oaktrail_chip_setup, .chip_teardown = oaktrail_teardown, .crtc_helper = &oaktrail_helper_funcs, - .crtc_funcs = &psb_intel_crtc_funcs, + .crtc_funcs = &gma_intel_crtc_funcs, .output_init = oaktrail_output_init, diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index 2d21f8ec595f..951725a0f7a3 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c @@ -329,7 +329,7 @@ const struct psb_ops psb_chip_ops = { .chip_teardown = psb_chip_teardown, .crtc_helper = &psb_intel_helper_funcs, - .crtc_funcs = &psb_intel_crtc_funcs, + .crtc_funcs = &gma_intel_crtc_funcs, .clock_funcs = &psb_clock_funcs, .output_init = psb_output_init, diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index a19f60d40394..650af221c916 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -12,6 +12,7 @@ #include <linux/notifier.h> #include <linux/pm_runtime.h> #include <linux/spinlock.h> +#include <linux/delay.h> #include <asm/set_memory.h> @@ -46,16 +47,15 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); * PowerVR SGX535 - Poulsbo - Intel GMA 500, Intel Atom Z5xx * PowerVR SGX535 - Moorestown - Intel GMA 600 * PowerVR SGX535 - Oaktrail - Intel GMA 600, Intel Atom Z6xx, E6xx - * PowerVR SGX540 - Medfield - Intel Atom Z2460 - * PowerVR SGX544MP2 - Medfield - * PowerVR SGX545 - Cedartrail - Intel GMA 3600, Intel Atom D2500, N2600 * PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700, * N2800 */ static const struct pci_device_id pciidlist[] = { + /* Poulsbo */ { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, -#if defined(CONFIG_DRM_GMA600) + /* Oak Trail */ { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops }, { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops }, { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops }, @@ -65,18 +65,7 @@ static const struct pci_device_id pciidlist[] = { { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops }, { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops }, { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops }, -#endif -#if defined(CONFIG_DRM_MEDFIELD) - { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, - { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, - { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, - { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, - { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, - { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, - { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, - { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops }, -#endif -#if defined(CONFIG_DRM_GMA3600) + /* Cedar Trail */ { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops }, { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops }, { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops }, @@ -93,7 +82,6 @@ static const struct pci_device_id pciidlist[] = { { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops }, { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops }, { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops }, -#endif { 0, } }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -104,6 +92,36 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_ioctl_desc psb_ioctls[] = { }; +/** + * psb_spank - reset the 2D engine + * @dev_priv: our PSB DRM device + * + * Soft reset the graphics engine and then reload the necessary registers. + */ +void psb_spank(struct drm_psb_private *dev_priv) +{ + PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET | + _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET | + _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET | + _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET); + PSB_RSGX32(PSB_CR_SOFT_RESET); + + msleep(1); + + PSB_WSGX32(0, PSB_CR_SOFT_RESET); + wmb(); + PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT, + PSB_CR_BIF_CTRL); + wmb(); + (void) PSB_RSGX32(PSB_CR_BIF_CTRL); + + msleep(1); + PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT, + PSB_CR_BIF_CTRL); + (void) PSB_RSGX32(PSB_CR_BIF_CTRL); + PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); +} + static int psb_do_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index d303f8271f7e..49afa577d442 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -40,19 +40,16 @@ enum { CHIP_PSB_8108 = 0, /* Poulsbo */ CHIP_PSB_8109 = 1, /* Poulsbo */ CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */ - CHIP_MFLD_0130 = 3, /* Medfield */ }; #define IS_PSB(drm) ((to_pci_dev((drm)->dev)->device & 0xfffe) == 0x8108) #define IS_MRST(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x4100) -#define IS_MFLD(drm) ((to_pci_dev((drm)->dev)->device & 0xfff8) == 0x0130) #define IS_CDV(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x0be0) /* Hardware offsets */ #define PSB_VDC_OFFSET 0x00000000 #define PSB_VDC_SIZE 0x000080000 #define MRST_MMIO_SIZE 0x0000C0000 -#define MDFLD_MMIO_SIZE 0x000100000 #define PSB_SGX_SIZE 0x8000 #define PSB_SGX_OFFSET 0x00040000 #define MRST_SGX_OFFSET 0x00080000 @@ -109,8 +106,6 @@ enum { #define _PSB_DPST_PIPEA_FLAG (1<<6) #define _PSB_PIPEA_EVENT_FLAG (1<<6) #define _PSB_VSYNC_PIPEA_FLAG (1<<7) -#define _MDFLD_MIPIA_FLAG (1<<16) -#define _MDFLD_MIPIC_FLAG (1<<17) #define _PSB_IRQ_DISP_HOTSYNC (1<<17) #define _PSB_IRQ_SGX_FLAG (1<<18) #define _PSB_IRQ_MSVDX_FLAG (1<<19) @@ -119,13 +114,6 @@ enum { #define _PSB_PIPE_EVENT_FLAG (_PSB_VSYNC_PIPEA_FLAG | \ _PSB_VSYNC_PIPEB_FLAG) -/* This flag includes all the display IRQ bits excepts the vblank irqs. */ -#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \ - _MDFLD_PIPEB_EVENT_FLAG | \ - _PSB_PIPEA_EVENT_FLAG | \ - _PSB_VSYNC_PIPEA_FLAG | \ - _MDFLD_MIPIA_FLAG | \ - _MDFLD_MIPIC_FLAG) #define PSB_INT_IDENTITY_R 0x20A4 #define PSB_INT_MASK_R 0x20A8 #define PSB_INT_ENABLE_R 0x20A0 @@ -191,25 +179,6 @@ enum { #define PSB_WATCHDOG_DELAY (HZ * 2) #define PSB_LID_DELAY (HZ / 10) -#define MDFLD_PNW_B0 0x04 -#define MDFLD_PNW_C0 0x08 - -#define MDFLD_DSR_2D_3D_0 (1 << 0) -#define MDFLD_DSR_2D_3D_2 (1 << 1) -#define MDFLD_DSR_CURSOR_0 (1 << 2) -#define MDFLD_DSR_CURSOR_2 (1 << 3) -#define MDFLD_DSR_OVERLAY_0 (1 << 4) -#define MDFLD_DSR_OVERLAY_2 (1 << 5) -#define MDFLD_DSR_MIPI_CONTROL (1 << 6) -#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4)) -#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5)) -#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2) - -#define MDFLD_DSR_RR 45 -#define MDFLD_DPU_ENABLE (1 << 31) -#define MDFLD_DSR_FULLSCREEN (1 << 30) -#define MDFLD_DSR_DELAY (HZ / MDFLD_DSR_RR) - #define PSB_PWR_STATE_ON 1 #define PSB_PWR_STATE_OFF 2 @@ -382,16 +351,6 @@ struct psb_state { uint32_t savePWM_CONTROL_LOGIC; }; -struct medfield_state { - uint32_t saveMIPI; - uint32_t saveMIPI_C; - - uint32_t savePFIT_CONTROL; - uint32_t savePFIT_PGM_RATIOS; - uint32_t saveHDMIPHYMISCCTL; - uint32_t saveHDMIB_CONTROL; -}; - struct cdv_state { uint32_t saveDSPCLK_GATE_D; uint32_t saveRAMCLK_GATE_D; @@ -417,7 +376,6 @@ struct psb_save_area { uint32_t saveVBT; union { struct psb_state psb; - struct medfield_state mdfld; struct cdv_state cdv; }; uint32_t saveBLC_PWM_CTL2; @@ -428,6 +386,8 @@ struct psb_ops; #define PSB_NUM_PIPE 3 +struct intel_scu_ipc_dev; + struct drm_psb_private { struct drm_device *dev; struct pci_dev *aux_pdev; /* Currently only used by mrst */ @@ -567,6 +527,7 @@ struct drm_psb_private { * Used for modifying backlight from * xrandr -- consider removing and using HAL instead */ + struct intel_scu_ipc_dev *scu; struct backlight_device *backlight_device; struct drm_property *backlight_property; bool backlight_enabled; @@ -590,8 +551,6 @@ struct drm_psb_private { u32 pipeconf[3]; u32 dspcntr[3]; - int mdfld_panel_id; - bool dplla_96mhz; /* DPLL data from the VBT */ struct { @@ -666,13 +625,9 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev) /* psb_irq.c */ extern irqreturn_t psb_irq_handler(int irq, void *arg); -extern int psb_irq_enable_dpst(struct drm_device *dev); -extern int psb_irq_disable_dpst(struct drm_device *dev); extern void psb_irq_preinstall(struct drm_device *dev); extern int psb_irq_postinstall(struct drm_device *dev); extern void psb_irq_uninstall(struct drm_device *dev); -extern void psb_irq_turn_on_dpst(struct drm_device *dev); -extern void psb_irq_turn_off_dpst(struct drm_device *dev); extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands); extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence); @@ -720,7 +675,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev, /* psb_intel_display.c */ extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs; -extern const struct drm_crtc_funcs psb_intel_crtc_funcs; +extern const struct drm_crtc_funcs gma_intel_crtc_funcs; /* psb_intel_lvds.c */ extern const struct drm_connector_helper_funcs @@ -737,9 +692,6 @@ extern const struct psb_ops psb_chip_ops; /* oaktrail_device.c */ extern const struct psb_ops oaktrail_chip_ops; -/* mdlfd_device.c */ -extern const struct psb_ops mdfld_chip_ops; - /* cdv_device.c */ extern const struct psb_ops cdv_chip_ops; @@ -779,25 +731,6 @@ static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset, pci_write_config_dword(pci_root, 0xD0, mcr); pci_dev_put(pci_root); } -static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset) -{ - int mcr = (0x10<<24) | (port << 16) | (offset << 8); - uint32_t ret_val = 0; - struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); - pci_write_config_dword(pci_root, 0xD0, mcr); - pci_read_config_dword(pci_root, 0xD4, &ret_val); - pci_dev_put(pci_root); - return ret_val; -} -static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset, - u32 value) -{ - int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0; - struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); - pci_write_config_dword(pci_root, 0xD4, value); - pci_write_config_dword(pci_root, 0xD0, mcr); - pci_dev_put(pci_root); -} static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg) { diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 9c3cb1b80bbd..359606429316 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -426,7 +426,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .disable = gma_crtc_disable, }; -const struct drm_crtc_funcs psb_intel_crtc_funcs = { +const struct drm_crtc_funcs gma_intel_crtc_funcs = { .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h index 835cc924c45a..ced7b433befb 100644 --- a/drivers/gpu/drm/gma500/psb_intel_reg.h +++ b/drivers/gpu/drm/gma500/psb_intel_reg.h @@ -550,38 +550,6 @@ #define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30) #define DPST_YUV_LUMA_MODE 0 -struct dpst_ie_histogram_control { - union { - uint32_t data; - struct { - uint32_t bin_reg_index:7; - uint32_t reserved:4; - uint32_t bin_reg_func_select:1; - uint32_t sync_to_phase_in:1; - uint32_t alt_enhancement_mode:2; - uint32_t reserved1:1; - uint32_t sync_to_phase_in_count:8; - uint32_t histogram_mode_select:1; - uint32_t reserved2:4; - uint32_t ie_pipe_assignment:1; - uint32_t ie_mode_table_enabled:1; - uint32_t ie_histogram_enable:1; - }; - }; -}; - -struct dpst_guardband { - union { - uint32_t data; - struct { - uint32_t guardband:22; - uint32_t guardband_interrupt_delay:8; - uint32_t interrupt_status:1; - uint32_t interrupt_enable:1; - }; - }; -}; - #define PIPEAFRAMEHIGH 0x70040 #define PIPEAFRAMEPIXEL 0x70044 #define PIPEBFRAMEHIGH 0x71040 @@ -595,7 +563,7 @@ struct dpst_guardband { #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 -#define FW_BLC_SELF 0x20e0 +#define FW_BLC_SELF 0x20e0 #define FW_BLC_SELF_EN (1<<15) #define DSPARB 0x70030 @@ -789,17 +757,9 @@ struct dpst_guardband { * MOORESTOWN delta registers */ #define MRST_DPLL_A 0x0f014 -#define MDFLD_DPLL_B 0x0f018 -#define MDFLD_INPUT_REF_SEL (1 << 14) -#define MDFLD_VCO_SEL (1 << 16) #define DPLLA_MODE_LVDS (2 << 26) /* mrst */ -#define MDFLD_PLL_LATCHEN (1 << 28) -#define MDFLD_PWR_GATE_EN (1 << 30) -#define MDFLD_P1_MASK (0x1FF << 17) #define MRST_FPA0 0x0f040 #define MRST_FPA1 0x0f044 -#define MDFLD_DPLL_DIV0 0x0f048 -#define MDFLD_DPLL_DIV1 0x0f04c #define MRST_PERF_MODE 0x020f4 /* @@ -848,7 +808,6 @@ struct dpst_guardband { #define MRST_DSPABASE 0x7019c #define MRST_DSPBBASE 0x7119c -#define MDFLD_DSPCBASE 0x7219c /* * Moorestown registers. @@ -930,7 +889,6 @@ struct dpst_guardband { #define DEVICE_RESET_REG 0xb01C #define DPI_RESOLUTION_REG 0xb020 #define RES_V_POS 0x10 -#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */ #define HORIZ_SYNC_PAD_COUNT_REG 0xb028 #define HORIZ_BACK_PORCH_COUNT_REG 0xb02C #define HORIZ_FRONT_PORCH_COUNT_REG 0xb030 diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 5cceea9be534..104009e78487 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -10,7 +10,6 @@ #include <drm/drm_vblank.h> -#include "mdfld_output.h" #include "power.h" #include "psb_drv.h" #include "psb_intel_reg.h" @@ -102,30 +101,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask) } } -static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe) -{ - if (gma_power_begin(dev_priv->dev, false)) { - u32 pipe_event = mid_pipe_event(pipe); - dev_priv->vdc_irq_mask |= pipe_event; - PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); - PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); - gma_power_end(dev_priv->dev); - } -} - -static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe) -{ - if (dev_priv->pipestat[pipe] == 0) { - if (gma_power_begin(dev_priv->dev, false)) { - u32 pipe_event = mid_pipe_event(pipe); - dev_priv->vdc_irq_mask &= ~pipe_event; - PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R); - PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); - gma_power_end(dev_priv->dev); - } - } -} - /* * Display controller interrupt handler for pipe event. */ @@ -164,8 +139,7 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe) "%s, can't clear status bits for pipe %d, its value = 0x%x.\n", __func__, pipe, PSB_RVDC32(pipe_stat_reg)); - if (pipe_stat_val & PIPE_VBLANK_STATUS || - (IS_MFLD(dev) && pipe_stat_val & PIPE_TE_STATUS)) { + if (pipe_stat_val & PIPE_VBLANK_STATUS) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); struct gma_crtc *gma_crtc = to_gma_crtc(crtc); unsigned long flags; @@ -263,11 +237,6 @@ irqreturn_t psb_irq_handler(int irq, void *arg) if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE)) dsp_int = 1; - /* FIXME: Handle Medfield - if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG) - dsp_int = 1; - */ - if (vdc_stat & _PSB_IRQ_SGX_FLAG) sgx_int = 1; if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC) @@ -325,13 +294,6 @@ void psb_irq_preinstall(struct drm_device *dev) if (dev->vblank[1].enabled) dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; - /* FIXME: Handle Medfield irq mask - if (dev->vblank[1].enabled) - dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG; - if (dev->vblank[2].enabled) - dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG; - */ - /* Revisit this area - want per device masks ? */ if (dev_priv->ops->hotplug) dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC; @@ -406,92 +368,6 @@ void psb_irq_uninstall(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); } -void psb_irq_turn_on_dpst(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = - (struct drm_psb_private *) dev->dev_private; - u32 hist_reg; - u32 pwm_reg; - - if (gma_power_begin(dev, false)) { - PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL); - hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL); - PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL); - hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL); - - PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC); - pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); - PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE - | PWM_PHASEIN_INT_ENABLE, - PWM_CONTROL_LOGIC); - pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); - - psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE); - - hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL); - PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR, - HISTOGRAM_INT_CONTROL); - pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); - PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE, - PWM_CONTROL_LOGIC); - - gma_power_end(dev); - } -} - -int psb_irq_enable_dpst(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = - (struct drm_psb_private *) dev->dev_private; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); - - /* enable DPST */ - mid_enable_pipe_event(dev_priv, 0); - psb_irq_turn_on_dpst(dev); - - spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - return 0; -} - -void psb_irq_turn_off_dpst(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = - (struct drm_psb_private *) dev->dev_private; - u32 pwm_reg; - - if (gma_power_begin(dev, false)) { - PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL); - PSB_RVDC32(HISTOGRAM_INT_CONTROL); - - psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE); - - pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); - PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE, - PWM_CONTROL_LOGIC); - pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC); - - gma_power_end(dev); - } -} - -int psb_irq_disable_dpst(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = - (struct drm_psb_private *) dev->dev_private; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); - - mid_disable_pipe_event(dev_priv, 0); - psb_irq_turn_off_dpst(dev); - - spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - - return 0; -} - /* * It is used to enable VBLANK interrupt */ @@ -504,11 +380,6 @@ int psb_enable_vblank(struct drm_crtc *crtc) uint32_t reg_val = 0; uint32_t pipeconf_reg = mid_pipeconf(pipe); - /* Medfield is different - we should perhaps extract out vblank - and blacklight etc ops */ - if (IS_MFLD(dev)) - return mdfld_enable_te(dev, pipe); - if (gma_power_begin(dev, false)) { reg_val = REG_READ(pipeconf_reg); gma_power_end(dev); @@ -543,8 +414,6 @@ void psb_disable_vblank(struct drm_crtc *crtc) struct drm_psb_private *dev_priv = dev->dev_private; unsigned long irqflags; - if (IS_MFLD(dev)) - mdfld_disable_te(dev, pipe); spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); if (pipe == 0) @@ -559,55 +428,6 @@ void psb_disable_vblank(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); } -/* - * It is used to enable TE interrupt - */ -int mdfld_enable_te(struct drm_device *dev, int pipe) -{ - struct drm_psb_private *dev_priv = - (struct drm_psb_private *) dev->dev_private; - unsigned long irqflags; - uint32_t reg_val = 0; - uint32_t pipeconf_reg = mid_pipeconf(pipe); - - if (gma_power_begin(dev, false)) { - reg_val = REG_READ(pipeconf_reg); - gma_power_end(dev); - } - - if (!(reg_val & PIPEACONF_ENABLE)) - return -EINVAL; - - spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); - - mid_enable_pipe_event(dev_priv, pipe); - psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE); - - spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - - return 0; -} - -/* - * It is used to disable TE interrupt - */ -void mdfld_disable_te(struct drm_device *dev, int pipe) -{ - struct drm_psb_private *dev_priv = - (struct drm_psb_private *) dev->dev_private; - unsigned long irqflags; - - if (!dev_priv->dsr_enable) - return; - - spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); - - mid_disable_pipe_event(dev_priv, pipe); - psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE); - - spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -} - /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h index 4f73998848d1..17c9b0b62471 100644 --- a/drivers/gpu/drm/gma500/psb_irq.h +++ b/drivers/gpu/drm/gma500/psb_irq.h @@ -23,14 +23,8 @@ int psb_irq_postinstall(struct drm_device *dev); void psb_irq_uninstall(struct drm_device *dev); irqreturn_t psb_irq_handler(int irq, void *arg); -int psb_irq_enable_dpst(struct drm_device *dev); -int psb_irq_disable_dpst(struct drm_device *dev); -void psb_irq_turn_on_dpst(struct drm_device *dev); -void psb_irq_turn_off_dpst(struct drm_device *dev); int psb_enable_vblank(struct drm_crtc *crtc); void psb_disable_vblank(struct drm_crtc *crtc); u32 psb_get_vblank_counter(struct drm_crtc *crtc); -int mdfld_enable_te(struct drm_device *dev, int pipe); -void mdfld_disable_te(struct drm_device *dev, int pipe); #endif /* _PSB_IRQ_H_ */ diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h index fb22bac5bb74..2a229a0ef36c 100644 --- a/drivers/gpu/drm/gma500/psb_reg.h +++ b/drivers/gpu/drm/gma500/psb_reg.h @@ -550,21 +550,7 @@ #define PSB_PM_SSC 0x20 #define PSB_PM_SSS 0x30 #define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/ -#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c -#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000 -#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000 -#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000 -#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */ /* Display SSS register bits are different in A0 vs. B0 */ #define PSB_PWRGT_GFX_MASK 0x3 -#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0 -#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300 -#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00 #define PSB_PWRGT_GFX_MASK_B0 0xc3 -#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c -#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000 -#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000 -#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000 -#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */ -#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */ #endif diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c deleted file mode 100644 index 99d2ffc2fed9..000000000000 --- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c +++ /dev/null @@ -1,805 +0,0 @@ -/* - * Copyright © 2011 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/gpio/consumer.h> - -#include <asm/intel_scu_ipc.h> - -#include "mdfld_dsi_dpi.h" -#include "mdfld_dsi_pkg_sender.h" -#include "mdfld_output.h" -#include "tc35876x-dsi-lvds.h" - -static struct i2c_client *tc35876x_client; -static struct i2c_client *cmi_lcd_i2c_client; -/* Panel GPIOs */ -static struct gpio_desc *bridge_reset; -static struct gpio_desc *bridge_bl_enable; -static struct gpio_desc *backlight_voltage; - - -#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) -#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) - -/* DSI D-PHY Layer Registers */ -#define D0W_DPHYCONTTX 0x0004 -#define CLW_DPHYCONTRX 0x0020 -#define D0W_DPHYCONTRX 0x0024 -#define D1W_DPHYCONTRX 0x0028 -#define D2W_DPHYCONTRX 0x002C -#define D3W_DPHYCONTRX 0x0030 -#define COM_DPHYCONTRX 0x0038 -#define CLW_CNTRL 0x0040 -#define D0W_CNTRL 0x0044 -#define D1W_CNTRL 0x0048 -#define D2W_CNTRL 0x004C -#define D3W_CNTRL 0x0050 -#define DFTMODE_CNTRL 0x0054 - -/* DSI PPI Layer Registers */ -#define PPI_STARTPPI 0x0104 -#define PPI_BUSYPPI 0x0108 -#define PPI_LINEINITCNT 0x0110 -#define PPI_LPTXTIMECNT 0x0114 -#define PPI_LANEENABLE 0x0134 -#define PPI_TX_RX_TA 0x013C -#define PPI_CLS_ATMR 0x0140 -#define PPI_D0S_ATMR 0x0144 -#define PPI_D1S_ATMR 0x0148 -#define PPI_D2S_ATMR 0x014C -#define PPI_D3S_ATMR 0x0150 -#define PPI_D0S_CLRSIPOCOUNT 0x0164 -#define PPI_D1S_CLRSIPOCOUNT 0x0168 -#define PPI_D2S_CLRSIPOCOUNT 0x016C -#define PPI_D3S_CLRSIPOCOUNT 0x0170 -#define CLS_PRE 0x0180 -#define D0S_PRE 0x0184 -#define D1S_PRE 0x0188 -#define D2S_PRE 0x018C -#define D3S_PRE 0x0190 -#define CLS_PREP 0x01A0 -#define D0S_PREP 0x01A4 -#define D1S_PREP 0x01A8 -#define D2S_PREP 0x01AC -#define D3S_PREP 0x01B0 -#define CLS_ZERO 0x01C0 -#define D0S_ZERO 0x01C4 -#define D1S_ZERO 0x01C8 -#define D2S_ZERO 0x01CC -#define D3S_ZERO 0x01D0 -#define PPI_CLRFLG 0x01E0 -#define PPI_CLRSIPO 0x01E4 -#define HSTIMEOUT 0x01F0 -#define HSTIMEOUTENABLE 0x01F4 - -/* DSI Protocol Layer Registers */ -#define DSI_STARTDSI 0x0204 -#define DSI_BUSYDSI 0x0208 -#define DSI_LANEENABLE 0x0210 -#define DSI_LANESTATUS0 0x0214 -#define DSI_LANESTATUS1 0x0218 -#define DSI_INTSTATUS 0x0220 -#define DSI_INTMASK 0x0224 -#define DSI_INTCLR 0x0228 -#define DSI_LPTXTO 0x0230 - -/* DSI General Registers */ -#define DSIERRCNT 0x0300 - -/* DSI Application Layer Registers */ -#define APLCTRL 0x0400 -#define RDPKTLN 0x0404 - -/* Video Path Registers */ -#define VPCTRL 0x0450 -#define HTIM1 0x0454 -#define HTIM2 0x0458 -#define VTIM1 0x045C -#define VTIM2 0x0460 -#define VFUEN 0x0464 - -/* LVDS Registers */ -#define LVMX0003 0x0480 -#define LVMX0407 0x0484 -#define LVMX0811 0x0488 -#define LVMX1215 0x048C -#define LVMX1619 0x0490 -#define LVMX2023 0x0494 -#define LVMX2427 0x0498 -#define LVCFG 0x049C -#define LVPHY0 0x04A0 -#define LVPHY1 0x04A4 - -/* System Registers */ -#define SYSSTAT 0x0500 -#define SYSRST 0x0504 - -/* GPIO Registers */ -/*#define GPIOC 0x0520*/ -#define GPIOO 0x0524 -#define GPIOI 0x0528 - -/* I2C Registers */ -#define I2CTIMCTRL 0x0540 -#define I2CMADDR 0x0544 -#define WDATAQ 0x0548 -#define RDATAQ 0x054C - -/* Chip/Rev Registers */ -#define IDREG 0x0580 - -/* Debug Registers */ -#define DEBUG00 0x05A0 -#define DEBUG01 0x05A4 - -/* Panel CABC registers */ -#define PANEL_PWM_CONTROL 0x90 -#define PANEL_FREQ_DIVIDER_HI 0x91 -#define PANEL_FREQ_DIVIDER_LO 0x92 -#define PANEL_DUTY_CONTROL 0x93 -#define PANEL_MODIFY_RGB 0x94 -#define PANEL_FRAMERATE_CONTROL 0x96 -#define PANEL_PWM_MIN 0x97 -#define PANEL_PWM_REF 0x98 -#define PANEL_PWM_MAX 0x99 -#define PANEL_ALLOW_DISTORT 0x9A -#define PANEL_BYPASS_PWMI 0x9B - -/* Panel color management registers */ -#define PANEL_CM_ENABLE 0x700 -#define PANEL_CM_HUE 0x701 -#define PANEL_CM_SATURATION 0x702 -#define PANEL_CM_INTENSITY 0x703 -#define PANEL_CM_BRIGHTNESS 0x704 -#define PANEL_CM_CE_ENABLE 0x705 -#define PANEL_CM_PEAK_EN 0x710 -#define PANEL_CM_GAIN 0x711 -#define PANEL_CM_HUETABLE_START 0x730 -#define PANEL_CM_HUETABLE_END 0x747 /* inclusive */ - -/* Input muxing for registers LVMX0003...LVMX2427 */ -enum { - INPUT_R0, /* 0 */ - INPUT_R1, - INPUT_R2, - INPUT_R3, - INPUT_R4, - INPUT_R5, - INPUT_R6, - INPUT_R7, - INPUT_G0, /* 8 */ - INPUT_G1, - INPUT_G2, - INPUT_G3, - INPUT_G4, - INPUT_G5, - INPUT_G6, - INPUT_G7, - INPUT_B0, /* 16 */ - INPUT_B1, - INPUT_B2, - INPUT_B3, - INPUT_B4, - INPUT_B5, - INPUT_B6, - INPUT_B7, - INPUT_HSYNC, /* 24 */ - INPUT_VSYNC, - INPUT_DE, - LOGIC_0, - /* 28...31 undefined */ -}; - -#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00) \ - (FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) | \ - FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0)) - -/** - * tc35876x_regw - Write DSI-LVDS bridge register using I2C - * @client: struct i2c_client to use - * @reg: register address - * @value: value to write - * - * Returns 0 on success, or a negative error value. - */ -static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value) -{ - int r; - u8 tx_data[] = { - /* NOTE: Register address big-endian, data little-endian. */ - (reg >> 8) & 0xff, - reg & 0xff, - value & 0xff, - (value >> 8) & 0xff, - (value >> 16) & 0xff, - (value >> 24) & 0xff, - }; - struct i2c_msg msgs[] = { - { - .addr = client->addr, - .flags = 0, - .buf = tx_data, - .len = ARRAY_SIZE(tx_data), - }, - }; - - r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (r < 0) { - dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n", - __func__, reg, value, r); - return r; - } - - if (r < ARRAY_SIZE(msgs)) { - dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n", - __func__, reg, value, r); - return -EAGAIN; - } - - dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n", - __func__, reg, value); - - return 0; -} - -/** - * tc35876x_regr - Read DSI-LVDS bridge register using I2C - * @client: struct i2c_client to use - * @reg: register address - * @value: pointer for storing the value - * - * Returns 0 on success, or a negative error value. - */ -static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value) -{ - int r; - u8 tx_data[] = { - (reg >> 8) & 0xff, - reg & 0xff, - }; - u8 rx_data[4]; - struct i2c_msg msgs[] = { - { - .addr = client->addr, - .flags = 0, - .buf = tx_data, - .len = ARRAY_SIZE(tx_data), - }, - { - .addr = client->addr, - .flags = I2C_M_RD, - .buf = rx_data, - .len = ARRAY_SIZE(rx_data), - }, - }; - - r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (r < 0) { - dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__, - reg, r); - return r; - } - - if (r < ARRAY_SIZE(msgs)) { - dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__, - reg, r); - return -EAGAIN; - } - - *value = rx_data[0] << 24 | rx_data[1] << 16 | - rx_data[2] << 8 | rx_data[3]; - - dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__, - reg, *value); - - return 0; -} - -void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state) -{ - if (WARN(!tc35876x_client, "%s called before probe", __func__)) - return; - - dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state); - - if (!bridge_reset) - return; - - if (state) { - gpiod_set_value_cansleep(bridge_reset, 0); - mdelay(10); - } else { - /* Pull MIPI Bridge reset pin to Low */ - gpiod_set_value_cansleep(bridge_reset, 0); - mdelay(20); - /* Pull MIPI Bridge reset pin to High */ - gpiod_set_value_cansleep(bridge_reset, 1); - mdelay(40); - } -} - -void tc35876x_configure_lvds_bridge(struct drm_device *dev) -{ - struct i2c_client *i2c = tc35876x_client; - u32 ppi_lptxtimecnt; - u32 txtagocnt; - u32 txtasurecnt; - u32 id; - - if (WARN(!tc35876x_client, "%s called before probe", __func__)) - return; - - dev_dbg(&tc35876x_client->dev, "%s\n", __func__); - - if (!tc35876x_regr(i2c, IDREG, &id)) - dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id); - else - dev_err(&tc35876x_client->dev, "Cannot read ID\n"); - - ppi_lptxtimecnt = 4; - txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4; - txtasurecnt = 3 * ppi_lptxtimecnt / 2; - tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) | - FLD_VAL(txtasurecnt, 10, 0)); - tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0)); - - tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); - tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); - tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); - tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0)); - - /* Enabling MIPI & PPI lanes, Enable 4 lanes */ - tc35876x_regw(i2c, PPI_LANEENABLE, - BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); - tc35876x_regw(i2c, DSI_LANEENABLE, - BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)); - tc35876x_regw(i2c, PPI_STARTPPI, BIT(0)); - tc35876x_regw(i2c, DSI_STARTDSI, BIT(0)); - - /* Setting LVDS output frequency */ - tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) | - FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */ - - /* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */ - tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5)); - - /* Horizontal back porch and horizontal pulse width. 0x00280028 */ - tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0)); - - /* Horizontal front porch and horizontal active video size. 0x00500500*/ - tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0)); - - /* Vertical back porch and vertical sync pulse width. 0x000e000a */ - tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0)); - - /* Vertical front porch and vertical display size. 0x000e0320 */ - tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0)); - - /* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */ - tc35876x_regw(i2c, VFUEN, BIT(0)); - - /* Soft reset LCD controller. */ - tc35876x_regw(i2c, SYSRST, BIT(2)); - - /* LVDS-TX input muxing */ - tc35876x_regw(i2c, LVMX0003, - INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2)); - tc35876x_regw(i2c, LVMX0407, - INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6)); - tc35876x_regw(i2c, LVMX0811, - INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3)); - tc35876x_regw(i2c, LVMX1215, - INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5)); - tc35876x_regw(i2c, LVMX1619, - INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0)); - tc35876x_regw(i2c, LVMX2023, - INPUT_MUX(LOGIC_0, INPUT_B7, INPUT_B6, INPUT_B5)); - tc35876x_regw(i2c, LVMX2427, - INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC)); - - /* Enable LVDS transmitter. */ - tc35876x_regw(i2c, LVCFG, BIT(0)); - - /* Clear notifications. Don't write reserved bits. Was write 0xffffffff - * to 0x0288, must be in error?! */ - tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0)); -} - -#define GPIOPWMCTRL 0x38F -#define PWM0CLKDIV0 0x62 /* low byte */ -#define PWM0CLKDIV1 0x61 /* high byte */ - -#define SYSTEMCLK 19200000UL /* 19.2 MHz */ -#define PWM_FREQUENCY 9600 /* Hz */ - -/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */ -static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f) -{ - return (baseclk - f) / f; -} - -static void tc35876x_brightness_init(struct drm_device *dev) -{ - int ret; - u8 pwmctrl; - u16 clkdiv; - - /* Make sure the PWM reference is the 19.2 MHz system clock. Read first - * instead of setting directly to catch potential conflicts between PWM - * users. */ - ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl); - if (ret || pwmctrl != 0x01) { - if (ret) - dev_err(dev->dev, "GPIOPWMCTRL read failed\n"); - else - dev_warn(dev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl); - - ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01); - if (ret) - dev_err(dev->dev, "GPIOPWMCTRL set failed\n"); - } - - clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY); - - ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff); - if (!ret) - ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff); - - if (ret) - dev_err(dev->dev, "PWM0CLKDIV set failed\n"); - else - dev_dbg(dev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n", - clkdiv, PWM_FREQUENCY); -} - -#define PWM0DUTYCYCLE 0x67 - -void tc35876x_brightness_control(struct drm_device *dev, int level) -{ - int ret; - u8 duty_val; - u8 panel_duty_val; - - level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); - - /* PWM duty cycle 0x00...0x63 corresponds to 0...99% */ - duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL; - - /* I won't pretend to understand this formula. The panel spec is quite - * bad engrish. - */ - panel_duty_val = (2 * level - 100) * 0xA9 / - MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56; - - ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val); - if (ret) - dev_err(&tc35876x_client->dev, "%s: ipc write fail\n", - __func__); - - if (cmi_lcd_i2c_client) { - ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, - PANEL_PWM_MAX, panel_duty_val); - if (ret < 0) - dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n", - __func__); - } -} - -void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev) -{ - if (WARN(!tc35876x_client, "%s called before probe", __func__)) - return; - - dev_dbg(&tc35876x_client->dev, "%s\n", __func__); - - if (bridge_bl_enable) - gpiod_set_value_cansleep(bridge_bl_enable, 0); - - if (backlight_voltage) - gpiod_set_value_cansleep(backlight_voltage, 0); -} - -void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - - if (WARN(!tc35876x_client, "%s called before probe", __func__)) - return; - - dev_dbg(&tc35876x_client->dev, "%s\n", __func__); - - if (backlight_voltage) { - gpiod_set_value_cansleep(backlight_voltage, 1); - msleep(260); - } - - if (cmi_lcd_i2c_client) { - int ret; - dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n"); - /* Bit 4 is average_saving. Setting it to 1, the brightness is - * referenced to the average of the frame content. 0 means - * reference to the maximum of frame contents. Bits 3:0 are - * allow_distort. When set to a nonzero value, all color values - * between 255-allow_distort*2 and 255 are mapped to the - * 255-allow_distort*2 value. - */ - ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, - PANEL_ALLOW_DISTORT, 0x10); - if (ret < 0) - dev_err(&cmi_lcd_i2c_client->dev, - "i2c write failed (%d)\n", ret); - ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, - PANEL_BYPASS_PWMI, 0); - if (ret < 0) - dev_err(&cmi_lcd_i2c_client->dev, - "i2c write failed (%d)\n", ret); - /* Set minimum brightness value - this is tunable */ - ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client, - PANEL_PWM_MIN, 0x35); - if (ret < 0) - dev_err(&cmi_lcd_i2c_client->dev, - "i2c write failed (%d)\n", ret); - } - - if (bridge_bl_enable) - gpiod_set_value_cansleep(bridge_bl_enable, 1); - - tc35876x_brightness_control(dev, dev_priv->brightness_adjusted); -} - -static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev) -{ - struct drm_display_mode *mode; - - dev_dbg(dev->dev, "%s\n", __func__); - - mode = kzalloc(sizeof(*mode), GFP_KERNEL); - if (!mode) - return NULL; - - /* FIXME: do this properly. */ - mode->hdisplay = 1280; - mode->vdisplay = 800; - mode->hsync_start = 1360; - mode->hsync_end = 1400; - mode->htotal = 1440; - mode->vsync_start = 814; - mode->vsync_end = 824; - mode->vtotal = 838; - mode->clock = 33324 << 1; - - dev_info(dev->dev, "hdisplay(w) = %d\n", mode->hdisplay); - dev_info(dev->dev, "vdisplay(h) = %d\n", mode->vdisplay); - dev_info(dev->dev, "HSS = %d\n", mode->hsync_start); - dev_info(dev->dev, "HSE = %d\n", mode->hsync_end); - dev_info(dev->dev, "htotal = %d\n", mode->htotal); - dev_info(dev->dev, "VSS = %d\n", mode->vsync_start); - dev_info(dev->dev, "VSE = %d\n", mode->vsync_end); - dev_info(dev->dev, "vtotal = %d\n", mode->vtotal); - dev_info(dev->dev, "clock = %d\n", mode->clock); - - drm_mode_set_name(mode); - drm_mode_set_crtcinfo(mode, 0); - - mode->type |= DRM_MODE_TYPE_PREFERRED; - - return mode; -} - -/* DV1 Active area 216.96 x 135.6 mm */ -#define DV1_PANEL_WIDTH 217 -#define DV1_PANEL_HEIGHT 136 - -static int tc35876x_get_panel_info(struct drm_device *dev, int pipe, - struct panel_info *pi) -{ - if (!dev || !pi) - return -EINVAL; - - pi->width_mm = DV1_PANEL_WIDTH; - pi->height_mm = DV1_PANEL_HEIGHT; - - return 0; -} - -static int tc35876x_bridge_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - dev_info(&client->dev, "%s\n", __func__); - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { - dev_err(&client->dev, "%s: i2c_check_functionality() failed\n", - __func__); - return -ENODEV; - } - - bridge_reset = devm_gpiod_get_optional(&client->dev, "bridge-reset", GPIOD_OUT_LOW); - if (IS_ERR(bridge_reset)) - return PTR_ERR(bridge_reset); - if (bridge_reset) - gpiod_set_consumer_name(bridge_reset, "tc35876x bridge reset"); - - bridge_bl_enable = devm_gpiod_get_optional(&client->dev, "bl-en", GPIOD_OUT_LOW); - if (IS_ERR(bridge_bl_enable)) - return PTR_ERR(bridge_bl_enable); - if (bridge_bl_enable) - gpiod_set_consumer_name(bridge_bl_enable, "tc35876x panel bl en"); - - backlight_voltage = devm_gpiod_get_optional(&client->dev, "vadd", GPIOD_OUT_LOW); - if (IS_ERR(backlight_voltage)) - return PTR_ERR(backlight_voltage); - if (backlight_voltage) - gpiod_set_consumer_name(backlight_voltage, "tc35876x panel vadd"); - - tc35876x_client = client; - - return 0; -} - -static int tc35876x_bridge_remove(struct i2c_client *client) -{ - dev_dbg(&client->dev, "%s\n", __func__); - - tc35876x_client = NULL; - - return 0; -} - -static const struct i2c_device_id tc35876x_bridge_id[] = { - { "i2c_disp_brig", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id); - -static struct i2c_driver tc35876x_bridge_i2c_driver = { - .driver = { - .name = "i2c_disp_brig", - }, - .id_table = tc35876x_bridge_id, - .probe = tc35876x_bridge_probe, - .remove = tc35876x_bridge_remove, -}; - -/* LCD panel I2C */ -static int cmi_lcd_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - dev_info(&client->dev, "%s\n", __func__); - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { - dev_err(&client->dev, "%s: i2c_check_functionality() failed\n", - __func__); - return -ENODEV; - } - - cmi_lcd_i2c_client = client; - - return 0; -} - -static int cmi_lcd_i2c_remove(struct i2c_client *client) -{ - dev_dbg(&client->dev, "%s\n", __func__); - - cmi_lcd_i2c_client = NULL; - - return 0; -} - -static const struct i2c_device_id cmi_lcd_i2c_id[] = { - { "cmi-lcd", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id); - -static struct i2c_driver cmi_lcd_i2c_driver = { - .driver = { - .name = "cmi-lcd", - }, - .id_table = cmi_lcd_i2c_id, - .probe = cmi_lcd_i2c_probe, - .remove = cmi_lcd_i2c_remove, -}; - -/* HACK to create I2C device while it's not created by platform code */ -#define CMI_LCD_I2C_ADAPTER 2 -#define CMI_LCD_I2C_ADDR 0x60 - -static int cmi_lcd_hack_create_device(void) -{ - struct i2c_adapter *adapter; - struct i2c_client *client; - struct i2c_board_info info = { - .type = "cmi-lcd", - .addr = CMI_LCD_I2C_ADDR, - }; - - pr_debug("%s\n", __func__); - - adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER); - if (!adapter) { - pr_err("%s: i2c_get_adapter(%d) failed\n", __func__, - CMI_LCD_I2C_ADAPTER); - return -EINVAL; - } - - client = i2c_new_client_device(adapter, &info); - if (IS_ERR(client)) { - pr_err("%s: creating I2C device failed\n", __func__); - i2c_put_adapter(adapter); - return PTR_ERR(client); - } - - return 0; -} - -static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = { - .dpms = mdfld_dsi_dpi_dpms, - .mode_fixup = mdfld_dsi_dpi_mode_fixup, - .prepare = mdfld_dsi_dpi_prepare, - .mode_set = mdfld_dsi_dpi_mode_set, - .commit = mdfld_dsi_dpi_commit, -}; - -const struct panel_funcs mdfld_tc35876x_funcs = { - .encoder_helper_funcs = &tc35876x_encoder_helper_funcs, - .get_config_mode = tc35876x_get_config_mode, - .get_panel_info = tc35876x_get_panel_info, -}; - -void tc35876x_init(struct drm_device *dev) -{ - int r; - - dev_dbg(dev->dev, "%s\n", __func__); - - cmi_lcd_hack_create_device(); - - r = i2c_add_driver(&cmi_lcd_i2c_driver); - if (r < 0) - dev_err(dev->dev, - "%s: i2c_add_driver() for %s failed (%d)\n", - __func__, cmi_lcd_i2c_driver.driver.name, r); - - r = i2c_add_driver(&tc35876x_bridge_i2c_driver); - if (r < 0) - dev_err(dev->dev, - "%s: i2c_add_driver() for %s failed (%d)\n", - __func__, tc35876x_bridge_i2c_driver.driver.name, r); - - tc35876x_brightness_init(dev); -} - -void tc35876x_exit(void) -{ - pr_debug("%s\n", __func__); - - i2c_del_driver(&tc35876x_bridge_i2c_driver); - - if (cmi_lcd_i2c_client) - i2c_del_driver(&cmi_lcd_i2c_driver); -} diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h deleted file mode 100644 index b14b7f9e7d1e..000000000000 --- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright © 2011 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__ -#define __MDFLD_DSI_LVDS_BRIDGE_H__ - -void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state); -void tc35876x_configure_lvds_bridge(struct drm_device *dev); -void tc35876x_brightness_control(struct drm_device *dev, int level); -void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev); -void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev); -void tc35876x_init(struct drm_device *dev); -void tc35876x_exit(void); - -extern const struct panel_funcs mdfld_tc35876x_funcs; - -#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/ diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index 096eea985b6f..fa8da0ef707e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -53,27 +53,29 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = { }; static int hibmc_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_framebuffer *fb = state->fb; - struct drm_crtc *crtc = state->crtc; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; + struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; - u32 src_w = state->src_w >> 16; - u32 src_h = state->src_h >> 16; + u32 src_w = new_plane_state->src_w >> 16; + u32 src_h = new_plane_state->src_h >> 16; if (!crtc || !fb) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - if (src_w != state->crtc_w || src_h != state->crtc_h) { + if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) { drm_dbg_atomic(plane->dev, "scale not support\n"); return -EINVAL; } - if (state->crtc_x < 0 || state->crtc_y < 0) { + if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) { drm_dbg_atomic(plane->dev, "crtc_x/y of drm_plane state is invalid\n"); return -EINVAL; } @@ -81,15 +83,15 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane, if (!crtc_state->enable) return 0; - if (state->crtc_x + state->crtc_w > + if (new_plane_state->crtc_x + new_plane_state->crtc_w > crtc_state->adjusted_mode.hdisplay || - state->crtc_y + state->crtc_h > + new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay) { drm_dbg_atomic(plane->dev, "visible portion of plane is invalid\n"); return -EINVAL; } - if (state->fb->pitches[0] % 128 != 0) { + if (new_plane_state->fb->pitches[0] % 128 != 0) { drm_dbg_atomic(plane->dev, "wrong stride with 128-byte aligned\n"); return -EINVAL; } @@ -97,19 +99,20 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane, } static void hibmc_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); u32 reg; s64 gpu_addr = 0; u32 line_l; struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev); struct drm_gem_vram_object *gbo; - if (!state->fb) + if (!new_state->fb) return; - gbo = drm_gem_vram_of_gem(state->fb->obj[0]); + gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]); gpu_addr = drm_gem_vram_offset(gbo); if (WARN_ON_ONCE(gpu_addr < 0)) @@ -117,9 +120,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); - reg = state->fb->width * (state->fb->format->cpp[0]); + reg = new_state->fb->width * (new_state->fb->format->cpp[0]); - line_l = state->fb->pitches[0]; + line_l = new_state->fb->pitches[0]; writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) | HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l), priv->mmio + HIBMC_CRT_FB_WIDTH); @@ -128,7 +131,7 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL); reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK; reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT, - state->fb->format->cpp[0] * 8 / 16); + new_state->fb->format->cpp[0] * 8 / 16); writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL); } diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index aa6c53f88f7c..6dcf9ec05eec 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -549,16 +549,15 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb, u32 ch, u32 y, u32 in_h, u32 fmt) { struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0); - struct drm_format_name_buf format_name; u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en; u32 stride = fb->pitches[0]; u32 addr = (u32)obj->paddr + y * stride; DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n", ch + 1, y, in_h, stride, (u32)obj->paddr); - DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n", + DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n", addr, fb->width, fb->height, fmt, - drm_get_format_name(fb->format->format, &format_name)); + &fb->format->format); /* get reg offset */ reg_ctrl = RD_CH_CTRL(ch); @@ -758,19 +757,21 @@ static void ade_disable_channel(struct kirin_plane *kplane) } static int ade_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_framebuffer *fb = state->fb; - struct drm_crtc *crtc = state->crtc; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; + struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; - u32 src_x = state->src_x >> 16; - u32 src_y = state->src_y >> 16; - u32 src_w = state->src_w >> 16; - u32 src_h = state->src_h >> 16; - int crtc_x = state->crtc_x; - int crtc_y = state->crtc_y; - u32 crtc_w = state->crtc_w; - u32 crtc_h = state->crtc_h; + u32 src_x = new_plane_state->src_x >> 16; + u32 src_y = new_plane_state->src_y >> 16; + u32 src_w = new_plane_state->src_w >> 16; + u32 src_h = new_plane_state->src_h >> 16; + int crtc_x = new_plane_state->crtc_x; + int crtc_y = new_plane_state->crtc_y; + u32 crtc_w = new_plane_state->crtc_w; + u32 crtc_h = new_plane_state->crtc_h; u32 fmt; if (!crtc || !fb) @@ -780,7 +781,7 @@ static int ade_plane_atomic_check(struct drm_plane *plane, if (fmt == ADE_FORMAT_UNSUPPORT) return -EINVAL; - crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -803,19 +804,21 @@ static int ade_plane_atomic_check(struct drm_plane *plane, } static void ade_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct kirin_plane *kplane = to_kirin_plane(plane); - ade_update_channel(kplane, state->fb, state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - state->src_x >> 16, state->src_y >> 16, - state->src_w >> 16, state->src_h >> 16); + ade_update_channel(kplane, new_state->fb, new_state->crtc_x, + new_state->crtc_y, + new_state->crtc_w, new_state->crtc_h, + new_state->src_x >> 16, new_state->src_y >> 16, + new_state->src_w >> 16, new_state->src_h >> 16); } static void ade_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct kirin_plane *kplane = to_kirin_plane(plane); diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index be76054c01d8..72a38f28393f 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -19,6 +19,8 @@ config DRM_I915_WERROR config DRM_I915_DEBUG bool "Enable additional driver debugging" depends on DRM_I915 + depends on EXPERT # only for developers + depends on !COMPILE_TEST # never built by robots select DEBUG_FS select PREEMPT_COUNT select I2C_CHARDEV diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 32bd1fdffffe..921db06232c3 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -21,7 +21,6 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, sign-compare) subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) -subdir-ccflags-y += $(call cc-disable-warning, uninitialized) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror @@ -224,7 +223,9 @@ i915-y += \ display/intel_sprite.o \ display/intel_tc.o \ display/intel_vga.o \ - display/i9xx_plane.o + display/i9xx_plane.o \ + display/skl_scaler.o \ + display/skl_universal_plane.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o @@ -242,6 +243,7 @@ i915-y += \ display/icl_dsi.o \ display/intel_crt.o \ display/intel_ddi.o \ + display/intel_ddi_buf_trans.o \ display/intel_dp.o \ display/intel_dp_aux.o \ display/intel_dp_aux_backlight.o \ @@ -298,7 +300,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o no-header-test := \ display/intel_vbt_defs.h -extra-$(CONFIG_DRM_I915_WERROR) += \ +always-$(CONFIG_DRM_I915_WERROR) += \ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \ $(shell cd $(srctree)/$(src) && find * -name '*.h'))) diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index d30374df67f0..8a52beaed2da 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -256,6 +256,33 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) offset = 0; /* + * When using an X-tiled surface the plane starts to + * misbehave if the x offset + width exceeds the stride. + * hsw/bdw: underrun galore + * ilk/snb/ivb: wrap to the next tile row mid scanout + * i965/g4x: so far appear immune to this + * vlv/chv: TODO check + * + * Linear surfaces seem to work just fine, even on hsw/bdw + * despite them not using the linear offset anymore. + */ + if (INTEL_GEN(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) { + u32 alignment = intel_surf_alignment(fb, 0); + int cpp = fb->format->cpp[0]; + + while ((src_x + src_w) * cpp > plane_state->color_plane[0].stride) { + if (offset == 0) { + drm_dbg_kms(&dev_priv->drm, + "Unable to find suitable display surface offset due to X-tiling\n"); + return -EINVAL; + } + + offset = intel_plane_adjust_aligned_offset(&src_x, &src_y, plane_state, 0, + offset, offset - alignment); + } + } + + /* * Put the final coordinates back so that the src * coordinate checks will see the right values. */ @@ -743,10 +770,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) int num_formats; int ret, zpos; - if (INTEL_GEN(dev_priv) >= 9) - return skl_universal_plane_create(dev_priv, pipe, - PLANE_PRIMARY); - plane = intel_plane_alloc(); if (IS_ERR(plane)) return plane; @@ -897,3 +920,122 @@ fail: return ERR_PTR(ret); } +static int i9xx_format_to_fourcc(int format) +{ + switch (format) { + case DISPPLANE_8BPP: + return DRM_FORMAT_C8; + case DISPPLANE_BGRA555: + return DRM_FORMAT_ARGB1555; + case DISPPLANE_BGRX555: + return DRM_FORMAT_XRGB1555; + case DISPPLANE_BGRX565: + return DRM_FORMAT_RGB565; + default: + case DISPPLANE_BGRX888: + return DRM_FORMAT_XRGB8888; + case DISPPLANE_RGBX888: + return DRM_FORMAT_XBGR8888; + case DISPPLANE_BGRA888: + return DRM_FORMAT_ARGB8888; + case DISPPLANE_RGBA888: + return DRM_FORMAT_ABGR8888; + case DISPPLANE_BGRX101010: + return DRM_FORMAT_XRGB2101010; + case DISPPLANE_RGBX101010: + return DRM_FORMAT_XBGR2101010; + case DISPPLANE_BGRA101010: + return DRM_FORMAT_ARGB2101010; + case DISPPLANE_RGBA101010: + return DRM_FORMAT_ABGR2101010; + case DISPPLANE_RGBX161616: + return DRM_FORMAT_XBGR16161616F; + } +} + +void +i9xx_get_initial_plane_config(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + enum pipe pipe; + u32 val, base, offset; + int fourcc, pixel_format; + unsigned int aligned_height; + struct drm_framebuffer *fb; + struct intel_framebuffer *intel_fb; + + if (!plane->get_hw_state(plane, &pipe)) + return; + + drm_WARN_ON(dev, pipe != crtc->pipe); + + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + if (!intel_fb) { + drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); + return; + } + + fb = &intel_fb->base; + + fb->dev = dev; + + val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); + + if (INTEL_GEN(dev_priv) >= 4) { + if (val & DISPPLANE_TILED) { + plane_config->tiling = I915_TILING_X; + fb->modifier = I915_FORMAT_MOD_X_TILED; + } + + if (val & DISPPLANE_ROTATE_180) + plane_config->rotation = DRM_MODE_ROTATE_180; + } + + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && + val & DISPPLANE_MIRROR) + plane_config->rotation |= DRM_MODE_REFLECT_X; + + pixel_format = val & DISPPLANE_PIXFORMAT_MASK; + fourcc = i9xx_format_to_fourcc(pixel_format); + fb->format = drm_format_info(fourcc); + + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; + } else if (INTEL_GEN(dev_priv) >= 4) { + if (plane_config->tiling) + offset = intel_de_read(dev_priv, + DSPTILEOFF(i9xx_plane)); + else + offset = intel_de_read(dev_priv, + DSPLINOFF(i9xx_plane)); + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; + } else { + base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); + } + plane_config->base = base; + + val = intel_de_read(dev_priv, PIPESRC(pipe)); + fb->width = ((val >> 16) & 0xfff) + 1; + fb->height = ((val >> 0) & 0xfff) + 1; + + val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); + fb->pitches[0] = val & 0xffffffc0; + + aligned_height = intel_fb_align_height(fb, 0, fb->height); + + plane_config->size = fb->pitches[0] * aligned_height; + + drm_dbg_kms(&dev_priv->drm, + "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", + crtc->base.name, plane->base.name, fb->width, fb->height, + fb->format->cpp[0] * 8, base, fb->pitches[0], + plane_config->size); + + plane_config->fb = intel_fb; +} + diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h index ca963c2a8457..027b66053984 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.h +++ b/drivers/gpu/drm/i915/display/i9xx_plane.h @@ -10,6 +10,8 @@ enum pipe; struct drm_i915_private; +struct intel_crtc; +struct intel_initial_plane_config; struct intel_plane; struct intel_plane_state; @@ -21,4 +23,6 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state); struct intel_plane * intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe); +void i9xx_get_initial_plane_config(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config); #endif diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 9d245a689323..7f2abc088a66 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -35,6 +35,8 @@ #include "intel_dsi.h" #include "intel_panel.h" #include "intel_vdsc.h" +#include "skl_scaler.h" +#include "skl_universal_plane.h" static int header_credits_available(struct drm_i915_private *dev_priv, enum transcoder dsi_trans) @@ -653,6 +655,24 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpll.lock); } +static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + bool clock_enabled = false; + enum phy phy; + u32 tmp; + + tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + + for_each_dsi_phy(phy, intel_dsi->phys) { + if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy))) + clock_enabled = true; + } + + return clock_enabled; +} + static void gen11_dsi_map_pll(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { @@ -1488,14 +1508,10 @@ static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi, static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ - pipe_config->port_clock = intel_dpll_get_freq(i915, - pipe_config->shared_dpll, - &pipe_config->dpll_hw_state); + intel_ddi_get_clock(encoder, pipe_config, icl_ddi_combo_get_pll(encoder)); pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk; if (intel_dsi->dual_link) @@ -1940,6 +1956,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->pipe_mask = ~0; encoder->power_domain = POWER_DOMAIN_PORT_DSI; encoder->get_power_domains = gen11_dsi_get_power_domains; + encoder->disable_clock = gen11_dsi_gate_clocks; + encoder->is_clock_enabled = gen11_dsi_is_clock_enabled; /* register DSI connector with DRM subsystem */ drm_connector_init(dev, connector, &gen11_dsi_connector_funcs, diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index e00fdc47c0eb..27f7d7109ca3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -40,7 +40,7 @@ #include "intel_global_state.h" #include "intel_hdcp.h" #include "intel_psr.h" -#include "intel_sprite.h" +#include "skl_universal_plane.h" /** * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property. diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 987cf509337f..f3fa1441ce16 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1630,16 +1630,36 @@ static const u8 rkl_pch_tgp_ddc_pin_map[] = { [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP, }; +static const u8 adls_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, + [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, + [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, + [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, +}; + +static const u8 gen9bc_tgp_ddc_pin_map[] = { + [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP, + [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP, +}; + static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) { const u8 *ddc_pin_map; int n_entries; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) { + if (HAS_PCH_ADP(dev_priv)) { + ddc_pin_map = adls_ddc_pin_map; + n_entries = ARRAY_SIZE(adls_ddc_pin_map); + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) { return vbt_pin; } else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) { ddc_pin_map = rkl_pch_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); + } else if (HAS_PCH_TGP(dev_priv) && IS_GEN9_BC(dev_priv)) { + ddc_pin_map = gen9bc_tgp_ddc_pin_map; + n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map); } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) { ddc_pin_map = icp_ddc_pin_map; n_entries = ARRAY_SIZE(icp_ddc_pin_map); @@ -1708,8 +1728,26 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv, [PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, [PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, }; + /* + * Alderlake S ports used in the driver are PORT_A, PORT_D, PORT_E, + * PORT_F and PORT_G, we need to map that to correct VBT sections. + */ + static const int adls_port_mapping[][3] = { + [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, + [PORT_B] = { -1 }, + [PORT_C] = { -1 }, + [PORT_TC1] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, + [PORT_TC2] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, + [PORT_TC3] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, + [PORT_TC4] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 }, + }; - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) + if (IS_ALDERLAKE_S(dev_priv)) + return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping), + ARRAY_SIZE(adls_port_mapping[0]), + adls_port_mapping, + dvo_port); + else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping), ARRAY_SIZE(rkl_port_mapping[0]), rkl_port_mapping, @@ -1721,6 +1759,44 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv, dvo_port); } +static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate) +{ + switch (vbt_max_link_rate) { + default: + case BDB_230_VBT_DP_MAX_LINK_RATE_DEF: + return 0; + case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20: + return 2000000; + case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5: + return 1350000; + case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10: + return 1000000; + case BDB_230_VBT_DP_MAX_LINK_RATE_HBR3: + return 810000; + case BDB_230_VBT_DP_MAX_LINK_RATE_HBR2: + return 540000; + case BDB_230_VBT_DP_MAX_LINK_RATE_HBR: + return 270000; + case BDB_230_VBT_DP_MAX_LINK_RATE_LBR: + return 162000; + } +} + +static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) +{ + switch (vbt_max_link_rate) { + default: + case BDB_216_VBT_DP_MAX_LINK_RATE_HBR3: + return 810000; + case BDB_216_VBT_DP_MAX_LINK_RATE_HBR2: + return 540000; + case BDB_216_VBT_DP_MAX_LINK_RATE_HBR: + return 270000; + case BDB_216_VBT_DP_MAX_LINK_RATE_LBR: + return 162000; + } +} + static void parse_ddi_port(struct drm_i915_private *dev_priv, struct display_device_data *devdata, u8 bdb_version) @@ -1800,7 +1876,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, /* The VBT HDMI level shift values match the table we have. */ u8 hdmi_level_shift = child->hdmi_level_shifter_value; drm_dbg_kms(&dev_priv->drm, - "VBT HDMI level shift for port %c: %d\n", + "Port %c VBT HDMI level shift: %d\n", port_name(port), hdmi_level_shift); info->hdmi_level_shift = hdmi_level_shift; @@ -1827,7 +1903,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, if (max_tmds_clock) drm_dbg_kms(&dev_priv->drm, - "VBT HDMI max TMDS clock for port %c: %d kHz\n", + "Port %c VBT HDMI max TMDS clock: %d kHz\n", port_name(port), max_tmds_clock); info->max_tmds_clock = max_tmds_clock; } @@ -1836,33 +1912,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, if (bdb_version >= 196 && child->iboost) { info->dp_boost_level = translate_iboost(child->dp_iboost_level); drm_dbg_kms(&dev_priv->drm, - "VBT (e)DP boost level for port %c: %d\n", + "Port %c VBT (e)DP boost level: %d\n", port_name(port), info->dp_boost_level); info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level); drm_dbg_kms(&dev_priv->drm, - "VBT HDMI boost level for port %c: %d\n", + "Port %c VBT HDMI boost level: %d\n", port_name(port), info->hdmi_boost_level); } /* DP max link rate for CNL+ */ if (bdb_version >= 216) { - switch (child->dp_max_link_rate) { - default: - case VBT_DP_MAX_LINK_RATE_HBR3: - info->dp_max_link_rate = 810000; - break; - case VBT_DP_MAX_LINK_RATE_HBR2: - info->dp_max_link_rate = 540000; - break; - case VBT_DP_MAX_LINK_RATE_HBR: - info->dp_max_link_rate = 270000; - break; - case VBT_DP_MAX_LINK_RATE_LBR: - info->dp_max_link_rate = 162000; - break; - } + if (bdb_version >= 230) + info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate); + else + info->dp_max_link_rate = parse_bdb_216_dp_max_link_rate(child->dp_max_link_rate); + drm_dbg_kms(&dev_priv->drm, - "VBT DP max link rate for port %c: %d\n", + "Port %c VBT DP max link rate: %d\n", port_name(port), info->dp_max_link_rate); } @@ -2098,7 +2164,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); void __iomem *p = NULL, *oprom; struct vbt_header *vbt; u16 vbt_size; @@ -2645,6 +2711,23 @@ intel_bios_is_lspcon_present(const struct drm_i915_private *i915, return HAS_LSPCON(i915) && child && child->lspcon; } +/** + * intel_bios_is_lane_reversal_needed - if lane reversal needed on port + * @i915: i915 device instance + * @port: port to check + * + * Return true if port requires lane reversal + */ +bool +intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, + enum port port) +{ + const struct child_device_config *child = + i915->vbt.ddi_port_info[port].child; + + return child && child->lane_reversal; +} + enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port) { @@ -2661,27 +2744,44 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, return aux_ch; } + /* + * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D + * map to DDI A,B,TC1,TC2 respectively. + * + * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E + * map to DDI A,TC1,TC2,TC3,TC4 respectively. + */ switch (info->alternate_aux_channel) { case DP_AUX_A: aux_ch = AUX_CH_A; break; case DP_AUX_B: - aux_ch = AUX_CH_B; + if (IS_ALDERLAKE_S(dev_priv)) + aux_ch = AUX_CH_USBC1; + else + aux_ch = AUX_CH_B; break; case DP_AUX_C: - /* - * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D - * map to DDI A,B,TC1,TC2 respectively. - */ - aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ? - AUX_CH_USBC1 : AUX_CH_C; + if (IS_ALDERLAKE_S(dev_priv)) + aux_ch = AUX_CH_USBC2; + else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) + aux_ch = AUX_CH_USBC1; + else + aux_ch = AUX_CH_C; break; case DP_AUX_D: - aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ? - AUX_CH_USBC2 : AUX_CH_D; + if (IS_ALDERLAKE_S(dev_priv)) + aux_ch = AUX_CH_USBC3; + else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) + aux_ch = AUX_CH_USBC2; + else + aux_ch = AUX_CH_D; break; case DP_AUX_E: - aux_ch = AUX_CH_E; + if (IS_ALDERLAKE_S(dev_priv)) + aux_ch = AUX_CH_USBC4; + else + aux_ch = AUX_CH_E; break; case DP_AUX_F: aux_ch = AUX_CH_F; diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index e29e79faa01b..f25190ecfe97 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -241,6 +241,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, enum port port); bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, enum port port); +bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, + enum port port); enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 4b5a30ac84bc..d122b9965532 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -78,7 +78,17 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->num_points = dram_info->num_qgv_points; if (IS_GEN(dev_priv, 12)) - qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16; + switch (dram_info->type) { + case INTEL_DRAM_DDR4: + qi->t_bl = 4; + break; + case INTEL_DRAM_DDR5: + qi->t_bl = 8; + break; + default: + qi->t_bl = 16; + break; + } else if (IS_GEN(dev_priv, 11)) qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; @@ -142,6 +152,12 @@ static const struct intel_sa_info rkl_sa_info = { .displayrtids = 128, }; +static const struct intel_sa_info adls_sa_info = { + .deburst = 16, + .deprogbwlimit = 38, /* GB/s */ + .displayrtids = 256, +}; + static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; @@ -251,7 +267,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_ROCKETLAKE(dev_priv)) + if (IS_ALDERLAKE_S(dev_priv)) + icl_get_bw_info(dev_priv, &adls_sa_info); + else if (IS_ROCKETLAKE(dev_priv)) icl_get_bw_info(dev_priv, &rkl_sa_info); else if (IS_GEN(dev_priv, 12)) icl_get_bw_info(dev_priv, &tgl_sa_info); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 2e878cc274b7..a9019287f7d5 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -96,7 +96,7 @@ static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv, static void i85x_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 hpllcc = 0; /* @@ -138,7 +138,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv, static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -162,7 +162,7 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -256,7 +256,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) static void g33_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; @@ -305,7 +305,7 @@ fail: static void pnv_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u16 gcfgc = 0; pci_read_config_word(pdev, GCFGC, &gcfgc); @@ -339,7 +339,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); static const u8 div_3200[] = { 16, 10, 8 }; static const u8 div_4000[] = { 20, 12, 10 }; static const u8 div_5333[] = { 24, 16, 14 }; @@ -384,7 +384,7 @@ fail: static void gm45_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); unsigned int cdclk_sel; u16 tmp = 0; @@ -2145,10 +2145,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) if (IS_ERR(bw_state)) return PTR_ERR(bw_state); - if (cdclk_state->min_cdclk[i] == min_cdclk) + if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk) continue; - cdclk_state->min_cdclk[i] = min_cdclk; + cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; ret = intel_atomic_lock_global_state(&cdclk_state->base); if (ret) @@ -2199,10 +2199,10 @@ static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state) else min_voltage_level = 0; - if (cdclk_state->min_voltage_level[i] == min_voltage_level) + if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level) continue; - cdclk_state->min_voltage_level[i] = min_voltage_level; + cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level; ret = intel_atomic_lock_global_state(&cdclk_state->base); if (ret) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 996ae0608a62..c55813c6194a 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -187,10 +187,16 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy) * Some platforms only expect PHY_MISC to be programmed for PHY-A and * PHY-B and may not even have instances of the register for the * other combo PHY's. + * + * ADL-S technically has three instances of PHY_MISC, but only requires + * that we program it for PHY A. */ - if (IS_JSL_EHL(i915) || - IS_ROCKETLAKE(i915) || - IS_DG1(i915)) + + if (IS_ALDERLAKE_S(i915)) + return phy == PHY_A; + else if (IS_JSL_EHL(i915) || + IS_ROCKETLAKE(i915) || + IS_DG1(i915)) return phy < PHY_C; return true; @@ -246,14 +252,21 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy) * RKL,DG1: * A(master) -> B(slave) * C(master) -> D(slave) + * ADL-S: + * A(master) -> B(slave), C(slave) + * D(master) -> E(slave) * * We must set the IREFGEN bit for any PHY acting as a master * to another PHY. */ - if ((IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) && phy == PHY_C) + if (phy == PHY_A) return true; + else if (IS_ALDERLAKE_S(dev_priv)) + return phy == PHY_D; + else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) + return phy == PHY_C; - return phy == PHY_A; + return false; } static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 4934edd51cb0..7f3d11c5ce3e 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -38,6 +38,7 @@ #include "intel_crt.h" #include "intel_ddi.h" #include "intel_display_types.h" +#include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug.h" @@ -141,7 +142,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - intel_ddi_get_config(encoder, pipe_config); + hsw_ddi_get_config(encoder, pipe_config); pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC | @@ -1075,6 +1076,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv) crt->base.enable = hsw_enable_crt; crt->base.disable = hsw_disable_crt; crt->base.post_disable = hsw_post_disable_crt; + crt->base.enable_clock = hsw_ddi_enable_clock; + crt->base.disable_clock = hsw_ddi_disable_clock; + crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled; } else { if (HAS_PCH_SPLIT(dev_priv)) { crt->base.compute_config = pch_crt_compute_config; diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 57b0a3ebe908..3248f49999bb 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -10,6 +10,9 @@ #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> +#include "i915_trace.h" +#include "i915_vgpu.h" + #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_color.h" @@ -17,9 +20,13 @@ #include "intel_cursor.h" #include "intel_display_debugfs.h" #include "intel_display_types.h" +#include "intel_dsi.h" #include "intel_pipe_crc.h" +#include "intel_psr.h" #include "intel_sprite.h" +#include "intel_vrr.h" #include "i9xx_plane.h" +#include "skl_universal_plane.h" static void assert_vblank_disabled(struct drm_crtc *crtc) { @@ -32,6 +39,9 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)]; + if (!crtc->active) + return 0; + if (!vblank->max_vblank_count) return (u32)drm_crtc_accurate_vblank_count(&crtc->base); @@ -41,8 +51,6 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 mode_flags = crtc->mode_flags; /* * From Gen 11, In case of dsi cmd mode, frame counter wouldnt @@ -50,7 +58,8 @@ u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) * the hw counter, then we would find it updated in only * the next TE, hence switching to sw counter. */ - if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1)) + if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | + I915_MODE_FLAG_DSI_USE_TE1)) return 0; /* @@ -77,12 +86,26 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) drm_crtc_set_max_vblank_count(&crtc->base, intel_crtc_max_vblank_count(crtc_state)); drm_crtc_vblank_on(&crtc->base); + + /* + * Should really happen exactly when we enable the pipe + * but we want the frame counters in the trace, and that + * requires vblank support on some platforms/outputs. + */ + trace_intel_pipe_enable(crtc); } void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + /* + * Should really happen exactly when we disable the pipe + * but we want the frame counters in the trace, and that + * requires vblank support on some platforms/outputs. + */ + trace_intel_pipe_disable(crtc); + drm_crtc_vblank_off(&crtc->base); assert_vblank_disabled(&crtc->base); } @@ -109,7 +132,6 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, crtc_state->cpu_transcoder = INVALID_TRANSCODER; crtc_state->master_transcoder = INVALID_TRANSCODER; crtc_state->hsw_workaround_pipe = INVALID_PIPE; - crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID; crtc_state->scaler_state.scaler_id = -1; crtc_state->mst_master_transcoder = INVALID_TRANSCODER; } @@ -243,7 +265,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) crtc->pipe = pipe; crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe]; - primary = intel_primary_plane_create(dev_priv, pipe); + if (INTEL_GEN(dev_priv) >= 9) + primary = skl_universal_plane_create(dev_priv, pipe, + PLANE_PRIMARY); + else + primary = intel_primary_plane_create(dev_priv, pipe); if (IS_ERR(primary)) { ret = PTR_ERR(primary); goto fail; @@ -253,7 +279,11 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) for_each_sprite(dev_priv, pipe, sprite) { struct intel_plane *plane; - plane = intel_sprite_plane_create(dev_priv, pipe, sprite); + if (INTEL_GEN(dev_priv) >= 9) + plane = skl_universal_plane_create(dev_priv, pipe, + PLANE_SPRITE0 + sprite); + else + plane = intel_sprite_plane_create(dev_priv, pipe, sprite); if (IS_ERR(plane)) { ret = PTR_ERR(plane); goto fail; @@ -323,3 +353,238 @@ fail: return ret; } + +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs) +{ + /* paranoia */ + if (!adjusted_mode->crtc_htotal) + return 1; + + return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock, + 1000 * adjusted_mode->crtc_htotal); +} + +static int intel_mode_vblank_start(const struct drm_display_mode *mode) +{ + int vblank_start = mode->crtc_vblank_start; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vblank_start = DIV_ROUND_UP(vblank_start, 2); + + return vblank_start; +} + +/** + * intel_pipe_update_start() - start update of a set of display registers + * @new_crtc_state: the new crtc state + * + * Mark the start of an update to pipe registers that should be updated + * atomically regarding vblank. If the next vblank will happens within + * the next 100 us, this function waits until the vblank passes. + * + * After a successful call to this function, interrupts will be disabled + * until a subsequent call to intel_pipe_update_end(). That is done to + * avoid random delays. + */ +void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode; + long timeout = msecs_to_jiffies_timeout(1); + int scanline, min, max, vblank_start; + wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); + bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); + DEFINE_WAIT(wait); + + if (new_crtc_state->uapi.async_flip) + return; + + if (new_crtc_state->vrr.enable) + vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); + else + vblank_start = intel_mode_vblank_start(adjusted_mode); + + /* FIXME needs to be calibrated sensibly */ + min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, + VBLANK_EVASION_TIME_US); + max = vblank_start - 1; + + if (min <= 0 || max <= 0) + goto irq_disable; + + if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) + goto irq_disable; + + /* + * Wait for psr to idle out after enabling the VBL interrupts + * VBL interrupts will start the PSR exit and prevent a PSR + * re-entry as well. + */ + intel_psr_wait_for_idle(new_crtc_state); + + local_irq_disable(); + + crtc->debug.min_vbl = min; + crtc->debug.max_vbl = max; + trace_intel_pipe_update_start(crtc); + + for (;;) { + /* + * prepare_to_wait() has a memory barrier, which guarantees + * other CPUs can see the task state update by the time we + * read the scanline. + */ + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); + + scanline = intel_get_crtc_scanline(crtc); + if (scanline < min || scanline > max) + break; + + if (!timeout) { + drm_err(&dev_priv->drm, + "Potential atomic update failure on pipe %c\n", + pipe_name(crtc->pipe)); + break; + } + + local_irq_enable(); + + timeout = schedule_timeout(timeout); + + local_irq_disable(); + } + + finish_wait(wq, &wait); + + drm_crtc_vblank_put(&crtc->base); + + /* + * On VLV/CHV DSI the scanline counter would appear to + * increment approx. 1/3 of a scanline before start of vblank. + * The registers still get latched at start of vblank however. + * This means we must not write any registers on the first + * line of vblank (since not the whole line is actually in + * vblank). And unfortunately we can't use the interrupt to + * wait here since it will fire too soon. We could use the + * frame start interrupt instead since it will fire after the + * critical scanline, but that would require more changes + * in the interrupt code. So for now we'll just do the nasty + * thing and poll for the bad scanline to pass us by. + * + * FIXME figure out if BXT+ DSI suffers from this as well + */ + while (need_vlv_dsi_wa && scanline == vblank_start) + scanline = intel_get_crtc_scanline(crtc); + + crtc->debug.scanline_start = scanline; + crtc->debug.start_vbl_time = ktime_get(); + crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); + + trace_intel_pipe_update_vblank_evaded(crtc); + return; + +irq_disable: + local_irq_disable(); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) +static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) +{ + u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); + unsigned int h; + + h = ilog2(delta >> 9); + if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) + h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; + crtc->debug.vbl.times[h]++; + + crtc->debug.vbl.sum += delta; + if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) + crtc->debug.vbl.min = delta; + if (delta > crtc->debug.vbl.max) + crtc->debug.vbl.max = delta; + + if (delta > 1000 * VBLANK_EVASION_TIME_US) { + drm_dbg_kms(crtc->base.dev, + "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", + pipe_name(crtc->pipe), + div_u64(delta, 1000), + VBLANK_EVASION_TIME_US); + crtc->debug.vbl.over++; + } +} +#else +static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} +#endif + +/** + * intel_pipe_update_end() - end update of a set of display registers + * @new_crtc_state: the new crtc state + * + * Mark the end of an update started with intel_pipe_update_start(). This + * re-enables interrupts and verifies the update was actually completed + * before a vblank. + */ +void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + enum pipe pipe = crtc->pipe; + int scanline_end = intel_get_crtc_scanline(crtc); + u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); + ktime_t end_vbl_time = ktime_get(); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + if (new_crtc_state->uapi.async_flip) + return; + + trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); + + /* + * Incase of mipi dsi command mode, we need to set frame update + * request for every commit. + */ + if (INTEL_GEN(dev_priv) >= 11 && + intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) + icl_dsi_frame_update(new_crtc_state); + + /* We're still in the vblank-evade critical section, this can't race. + * Would be slightly nice to just grab the vblank count and arm the + * event outside of the critical section - the spinlock might spin for a + * while ... */ + if (new_crtc_state->uapi.event) { + drm_WARN_ON(&dev_priv->drm, + drm_crtc_vblank_get(&crtc->base) != 0); + + spin_lock(&crtc->base.dev->event_lock); + drm_crtc_arm_vblank_event(&crtc->base, + new_crtc_state->uapi.event); + spin_unlock(&crtc->base.dev->event_lock); + + new_crtc_state->uapi.event = NULL; + } + + local_irq_enable(); + + /* Send VRR Push to terminate Vblank */ + intel_vrr_send_push(new_crtc_state); + + if (intel_vgpu_active(dev_priv)) + return; + + if (crtc->debug.start_vbl_count && + crtc->debug.start_vbl_count != end_vbl_count) { + drm_err(&dev_priv->drm, + "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", + pipe_name(pipe), crtc->debug.start_vbl_count, + end_vbl_count, + ktime_us_delta(end_vbl_time, + crtc->debug.start_vbl_time), + crtc->debug.min_vbl, crtc->debug.max_vbl, + crtc->debug.scanline_start, scanline_end); + } + + dbg_vblank_evade(crtc, end_vbl_time); +} diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c index 67dc64df78a5..42005c1b5f0e 100644 --- a/drivers/gpu/drm/i915/display/intel_csr.c +++ b/drivers/gpu/drm/i915/display/intel_csr.c @@ -40,6 +40,10 @@ #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE +#define ADLS_CSR_PATH "i915/adls_dmc_ver2_01.bin" +#define ADLS_CSR_VERSION_REQUIRED CSR_VERSION(2, 1) +MODULE_FIRMWARE(ADLS_CSR_PATH); + #define DG1_CSR_PATH "i915/dg1_dmc_ver2_02.bin" #define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2) MODULE_FIRMWARE(DG1_CSR_PATH); @@ -640,7 +644,7 @@ static void csr_load_work_fn(struct work_struct *work) dev_priv = container_of(work, typeof(*dev_priv), csr.work); csr = &dev_priv->csr; - request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev); + request_firmware(&fw, dev_priv->csr.fw_path, dev_priv->drm.dev); parse_csr_fw(dev_priv, fw); if (dev_priv->csr.dmc_payload) { @@ -689,7 +693,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) */ intel_csr_runtime_pm_get(dev_priv); - if (IS_DG1(dev_priv)) { + if (IS_ALDERLAKE_S(dev_priv)) { + csr->fw_path = ADLS_CSR_PATH; + csr->required_version = ADLS_CSR_VERSION_REQUIRED; + csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; + } else if (IS_DG1(dev_priv)) { csr->fw_path = DG1_CSR_PATH; csr->required_version = DG1_CSR_VERSION_REQUIRED; csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index fbd857f2bdb2..64a952db8528 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -28,17 +28,18 @@ #include <drm/drm_scdc_helper.h> #include "i915_drv.h" -#include "i915_trace.h" #include "intel_audio.h" #include "intel_combo_phy.h" #include "intel_connector.h" #include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" #include "intel_display_types.h" #include "intel_dp.h" -#include "intel_dp_mst.h" #include "intel_dp_link_training.h" +#include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" +#include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hdcp.h" @@ -52,12 +53,8 @@ #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vrr.h" - -struct ddi_buf_trans { - u32 trans1; /* balance leg enable, de-emph level */ - u32 trans2; /* vref sel, vswing */ - u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */ -}; +#include "skl_scaler.h" +#include "skl_universal_plane.h" static const u8 index_to_dp_signal_levels[] = { [0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0, @@ -72,1389 +69,15 @@ static const u8 index_to_dp_signal_levels[] = { [9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0, }; -/* HDMI/DVI modes ignore everything but the last 2 items. So we share - * them for both DP and FDI transports, allowing those ports to - * automatically adapt to HDMI connections as well - */ -static const struct ddi_buf_trans hsw_ddi_translations_dp[] = { - { 0x00FFFFFF, 0x0006000E, 0x0 }, - { 0x00D75FFF, 0x0005000A, 0x0 }, - { 0x00C30FFF, 0x00040006, 0x0 }, - { 0x80AAAFFF, 0x000B0000, 0x0 }, - { 0x00FFFFFF, 0x0005000A, 0x0 }, - { 0x00D75FFF, 0x000C0004, 0x0 }, - { 0x80C30FFF, 0x000B0000, 0x0 }, - { 0x00FFFFFF, 0x00040006, 0x0 }, - { 0x80D75FFF, 0x000B0000, 0x0 }, -}; - -static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = { - { 0x00FFFFFF, 0x0007000E, 0x0 }, - { 0x00D75FFF, 0x000F000A, 0x0 }, - { 0x00C30FFF, 0x00060006, 0x0 }, - { 0x00AAAFFF, 0x001E0000, 0x0 }, - { 0x00FFFFFF, 0x000F000A, 0x0 }, - { 0x00D75FFF, 0x00160004, 0x0 }, - { 0x00C30FFF, 0x001E0000, 0x0 }, - { 0x00FFFFFF, 0x00060006, 0x0 }, - { 0x00D75FFF, 0x001E0000, 0x0 }, -}; - -static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = { - /* Idx NT mV d T mV d db */ - { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */ - { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */ - { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */ - { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */ - { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */ - { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */ - { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */ - { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */ - { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */ - { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */ - { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */ - { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */ -}; - -static const struct ddi_buf_trans bdw_ddi_translations_edp[] = { - { 0x00FFFFFF, 0x00000012, 0x0 }, - { 0x00EBAFFF, 0x00020011, 0x0 }, - { 0x00C71FFF, 0x0006000F, 0x0 }, - { 0x00AAAFFF, 0x000E000A, 0x0 }, - { 0x00FFFFFF, 0x00020011, 0x0 }, - { 0x00DB6FFF, 0x0005000F, 0x0 }, - { 0x00BEEFFF, 0x000A000C, 0x0 }, - { 0x00FFFFFF, 0x0005000F, 0x0 }, - { 0x00DB6FFF, 0x000A000C, 0x0 }, -}; - -static const struct ddi_buf_trans bdw_ddi_translations_dp[] = { - { 0x00FFFFFF, 0x0007000E, 0x0 }, - { 0x00D75FFF, 0x000E000A, 0x0 }, - { 0x00BEFFFF, 0x00140006, 0x0 }, - { 0x80B2CFFF, 0x001B0002, 0x0 }, - { 0x00FFFFFF, 0x000E000A, 0x0 }, - { 0x00DB6FFF, 0x00160005, 0x0 }, - { 0x80C71FFF, 0x001A0002, 0x0 }, - { 0x00F7DFFF, 0x00180004, 0x0 }, - { 0x80D75FFF, 0x001B0002, 0x0 }, -}; - -static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = { - { 0x00FFFFFF, 0x0001000E, 0x0 }, - { 0x00D75FFF, 0x0004000A, 0x0 }, - { 0x00C30FFF, 0x00070006, 0x0 }, - { 0x00AAAFFF, 0x000C0000, 0x0 }, - { 0x00FFFFFF, 0x0004000A, 0x0 }, - { 0x00D75FFF, 0x00090004, 0x0 }, - { 0x00C30FFF, 0x000C0000, 0x0 }, - { 0x00FFFFFF, 0x00070006, 0x0 }, - { 0x00D75FFF, 0x000C0000, 0x0 }, -}; - -static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { - /* Idx NT mV d T mV df db */ - { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */ - { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */ - { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */ - { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */ - { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */ - { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */ - { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */ - { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */ - { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */ - { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */ -}; - -/* Skylake H and S */ -static const struct ddi_buf_trans skl_ddi_translations_dp[] = { - { 0x00002016, 0x000000A0, 0x0 }, - { 0x00005012, 0x0000009B, 0x0 }, - { 0x00007011, 0x00000088, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, - { 0x00002016, 0x0000009B, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000C0, 0x1 }, - { 0x00002016, 0x000000DF, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, -}; - -/* Skylake U */ -static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { - { 0x0000201B, 0x000000A2, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x1 }, - { 0x80009010, 0x000000C0, 0x1 }, - { 0x0000201B, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, - { 0x80007011, 0x000000C0, 0x1 }, - { 0x00002016, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, -}; - -/* Skylake Y */ -static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { - { 0x00000018, 0x000000A2, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x3 }, - { 0x80009010, 0x000000C0, 0x3 }, - { 0x00000018, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, - { 0x80007011, 0x000000C0, 0x3 }, - { 0x00000018, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, -}; - -/* Kabylake H and S */ -static const struct ddi_buf_trans kbl_ddi_translations_dp[] = { - { 0x00002016, 0x000000A0, 0x0 }, - { 0x00005012, 0x0000009B, 0x0 }, - { 0x00007011, 0x00000088, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, - { 0x00002016, 0x0000009B, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000C0, 0x1 }, - { 0x00002016, 0x00000097, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, -}; - -/* Kabylake U */ -static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = { - { 0x0000201B, 0x000000A1, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x3 }, - { 0x80009010, 0x000000C0, 0x3 }, - { 0x0000201B, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, - { 0x80007011, 0x000000C0, 0x3 }, - { 0x00002016, 0x0000004F, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, -}; - -/* Kabylake Y */ -static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = { - { 0x00001017, 0x000000A1, 0x0 }, - { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x3 }, - { 0x8000800F, 0x000000C0, 0x3 }, - { 0x00001017, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, - { 0x80007011, 0x000000C0, 0x3 }, - { 0x00001017, 0x0000004C, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, -}; - -/* - * Skylake/Kabylake H and S - * eDP 1.4 low vswing translation parameters - */ -static const struct ddi_buf_trans skl_ddi_translations_edp[] = { - { 0x00000018, 0x000000A8, 0x0 }, - { 0x00004013, 0x000000A9, 0x0 }, - { 0x00007011, 0x000000A2, 0x0 }, - { 0x00009010, 0x0000009C, 0x0 }, - { 0x00000018, 0x000000A9, 0x0 }, - { 0x00006013, 0x000000A2, 0x0 }, - { 0x00007011, 0x000000A6, 0x0 }, - { 0x00000018, 0x000000AB, 0x0 }, - { 0x00007013, 0x0000009F, 0x0 }, - { 0x00000018, 0x000000DF, 0x0 }, -}; - -/* - * Skylake/Kabylake U - * eDP 1.4 low vswing translation parameters - */ -static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = { - { 0x00000018, 0x000000A8, 0x0 }, - { 0x00004013, 0x000000A9, 0x0 }, - { 0x00007011, 0x000000A2, 0x0 }, - { 0x00009010, 0x0000009C, 0x0 }, - { 0x00000018, 0x000000A9, 0x0 }, - { 0x00006013, 0x000000A2, 0x0 }, - { 0x00007011, 0x000000A6, 0x0 }, - { 0x00002016, 0x000000AB, 0x0 }, - { 0x00005013, 0x0000009F, 0x0 }, - { 0x00000018, 0x000000DF, 0x0 }, -}; - -/* - * Skylake/Kabylake Y - * eDP 1.4 low vswing translation parameters - */ -static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = { - { 0x00000018, 0x000000A8, 0x0 }, - { 0x00004013, 0x000000AB, 0x0 }, - { 0x00007011, 0x000000A4, 0x0 }, - { 0x00009010, 0x000000DF, 0x0 }, - { 0x00000018, 0x000000AA, 0x0 }, - { 0x00006013, 0x000000A4, 0x0 }, - { 0x00007011, 0x0000009D, 0x0 }, - { 0x00000018, 0x000000A0, 0x0 }, - { 0x00006012, 0x000000DF, 0x0 }, - { 0x00000018, 0x0000008A, 0x0 }, -}; - -/* Skylake/Kabylake U, H and S */ -static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { - { 0x00000018, 0x000000AC, 0x0 }, - { 0x00005012, 0x0000009D, 0x0 }, - { 0x00007011, 0x00000088, 0x0 }, - { 0x00000018, 0x000000A1, 0x0 }, - { 0x00000018, 0x00000098, 0x0 }, - { 0x00004013, 0x00000088, 0x0 }, - { 0x80006012, 0x000000CD, 0x1 }, - { 0x00000018, 0x000000DF, 0x0 }, - { 0x80003015, 0x000000CD, 0x1 }, /* Default */ - { 0x80003015, 0x000000C0, 0x1 }, - { 0x80000018, 0x000000C0, 0x1 }, -}; - -/* Skylake/Kabylake Y */ -static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { - { 0x00000018, 0x000000A1, 0x0 }, - { 0x00005012, 0x000000DF, 0x0 }, - { 0x80007011, 0x000000CB, 0x3 }, - { 0x00000018, 0x000000A4, 0x0 }, - { 0x00000018, 0x0000009D, 0x0 }, - { 0x00004013, 0x00000080, 0x0 }, - { 0x80006013, 0x000000C0, 0x3 }, - { 0x00000018, 0x0000008A, 0x0 }, - { 0x80003015, 0x000000C0, 0x3 }, /* Default */ - { 0x80003015, 0x000000C0, 0x3 }, - { 0x80000018, 0x000000C0, 0x3 }, -}; - -struct bxt_ddi_buf_trans { - u8 margin; /* swing value */ - u8 scale; /* scale value */ - u8 enable; /* scale enable */ - u8 deemphasis; -}; - -static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { - /* Idx NT mV diff db */ - { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ - { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */ - { 104, 0x9A, 0, 64, }, /* 2: 400 6 */ - { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */ - { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ - { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */ - { 154, 0x9A, 0, 64, }, /* 6: 600 6 */ - { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ - { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */ - { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ -}; - -static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = { - /* Idx NT mV diff db */ - { 26, 0, 0, 128, }, /* 0: 200 0 */ - { 38, 0, 0, 112, }, /* 1: 200 1.5 */ - { 48, 0, 0, 96, }, /* 2: 200 4 */ - { 54, 0, 0, 69, }, /* 3: 200 6 */ - { 32, 0, 0, 128, }, /* 4: 250 0 */ - { 48, 0, 0, 104, }, /* 5: 250 1.5 */ - { 54, 0, 0, 85, }, /* 6: 250 4 */ - { 43, 0, 0, 128, }, /* 7: 300 0 */ - { 54, 0, 0, 101, }, /* 8: 300 1.5 */ - { 48, 0, 0, 128, }, /* 9: 300 0 */ -}; - -/* BSpec has 2 recommended values - entries 0 and 8. - * Using the entry with higher vswing. - */ -static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { - /* Idx NT mV diff db */ - { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ - { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */ - { 52, 0x9A, 0, 64, }, /* 2: 400 6 */ - { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */ - { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ - { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */ - { 77, 0x9A, 0, 64, }, /* 6: 600 6 */ - { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ - { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */ - { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ -}; - -struct cnl_ddi_buf_trans { - u8 dw2_swing_sel; - u8 dw7_n_scalar; - u8 dw4_cursor_coeff; - u8 dw4_post_cursor_2; - u8 dw4_post_cursor_1; -}; - -/* Voltage Swing Programming for VccIO 0.85V for DP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ - { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ - { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ - { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ - { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ - { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.85V for HDMI */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ - { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ - { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.85V for eDP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ - { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ - { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ - { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */ - { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */ - { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.95V for DP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ - { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ - { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ - { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ - { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ - { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.95V for HDMI */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ - { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ - { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ - { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ - { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ - { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ - { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ - { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ - { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 0.95V for eDP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ - { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ - { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ - { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ - { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ - { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */ - { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 1.05V for DP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ - { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ - { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */ - { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ - { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */ - { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */ - { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 1.05V for HDMI */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ - { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ - { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ - { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ - { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ - { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ - { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ - { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ - { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ -}; - -/* Voltage Swing Programming for VccIO 1.05V for eDP */ -static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { - /* NT mV Trans mV db */ - { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ - { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ - { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ - { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ - { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ - { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ -}; - -/* icl_combo_phy_ddi_translations */ -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { - /* NT mV Trans mV db */ - { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ - { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ - { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ - { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ - { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ - { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ -}; - -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { - /* NT mV Trans mV db */ - { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ - { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ - { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ - { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ -}; - -static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = { - /* NT mV Trans mV db */ - { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ - { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */ - { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ - { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = { - /* NT mV Trans mV db */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ - { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ - { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ - { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ - { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ - { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ - { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ -}; - -static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = { - /* NT mV Trans mV db */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */ - { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */ - { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */ - { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */ - { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */ - { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ - { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ -}; - -static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = { - /* NT mV Trans mV db */ - { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ - { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { - /* NT mV Trans mV db */ - { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ - { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ - { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -struct icl_mg_phy_ddi_buf_trans { - u32 cri_txdeemph_override_11_6; - u32 cri_txdeemph_override_5_0; - u32 cri_txdeemph_override_17_12; -}; - -static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = { - /* Voltage swing pre-emphasis */ - { 0x18, 0x00, 0x00 }, /* 0 0 */ - { 0x1D, 0x00, 0x05 }, /* 0 1 */ - { 0x24, 0x00, 0x0C }, /* 0 2 */ - { 0x2B, 0x00, 0x14 }, /* 0 3 */ - { 0x21, 0x00, 0x00 }, /* 1 0 */ - { 0x2B, 0x00, 0x08 }, /* 1 1 */ - { 0x30, 0x00, 0x0F }, /* 1 2 */ - { 0x31, 0x00, 0x03 }, /* 2 0 */ - { 0x34, 0x00, 0x0B }, /* 2 1 */ - { 0x3F, 0x00, 0x00 }, /* 3 0 */ -}; - -static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = { - /* Voltage swing pre-emphasis */ - { 0x18, 0x00, 0x00 }, /* 0 0 */ - { 0x1D, 0x00, 0x05 }, /* 0 1 */ - { 0x24, 0x00, 0x0C }, /* 0 2 */ - { 0x2B, 0x00, 0x14 }, /* 0 3 */ - { 0x26, 0x00, 0x00 }, /* 1 0 */ - { 0x2C, 0x00, 0x07 }, /* 1 1 */ - { 0x33, 0x00, 0x0C }, /* 1 2 */ - { 0x2E, 0x00, 0x00 }, /* 2 0 */ - { 0x36, 0x00, 0x09 }, /* 2 1 */ - { 0x3F, 0x00, 0x00 }, /* 3 0 */ -}; - -static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = { - /* HDMI Preset VS Pre-emph */ - { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */ - { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */ - { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */ - { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */ - { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */ - { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */ - { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */ - { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */ - { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */ - { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */ -}; - -struct tgl_dkl_phy_ddi_buf_trans { - u32 dkl_vswing_control; - u32 dkl_preshoot_control; - u32 dkl_de_emphasis_control; -}; - -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = { - /* VS pre-emp Non-trans mV Pre-emph dB */ - { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ - { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ - { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ - { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */ - { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ - { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ - { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ - { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ - { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ - { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ -}; - -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = { - /* VS pre-emp Non-trans mV Pre-emph dB */ - { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ - { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ - { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ - { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */ - { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ - { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ - { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ - { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ - { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ - { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ -}; - -static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = { - /* HDMI Preset VS Pre-emph */ - { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */ - { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */ - { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */ - { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */ - { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */ - { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */ - { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */ - { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */ - { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */ - { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */ -}; - -static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = { - /* NT mV Trans mV db */ - { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ - { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ - { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ - { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ - { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ - { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -/* - * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries - * that DisplayPort specification requires - */ -static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = { - /* VS pre-emp */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */ -}; - -static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = { - /* NT mV Trans mV db */ - { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ - { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ - { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */ - { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { - /* NT mV Trans mV db */ - { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ - { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ - { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */ - { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ - { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */ - { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ -}; - -static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table) -{ - return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl; -} - -static const struct ddi_buf_trans * -bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp); - return bdw_ddi_translations_edp; - } else { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - return bdw_ddi_translations_dp; - } -} - -static const struct ddi_buf_trans * -skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_SKL_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - return skl_y_ddi_translations_dp; - } else if (IS_SKL_ULT(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); - return skl_u_ddi_translations_dp; - } else { - *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); - return skl_ddi_translations_dp; - } -} - -static const struct ddi_buf_trans * -kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv) || - IS_CML_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); - return kbl_y_ddi_translations_dp; - } else if (IS_KBL_ULT(dev_priv) || - IS_CFL_ULT(dev_priv) || - IS_CML_ULT(dev_priv)) { - *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp); - return kbl_u_ddi_translations_dp; - } else { - *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp); - return kbl_ddi_translations_dp; - } -} - -static const struct ddi_buf_trans * -skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - if (IS_SKL_ULX(dev_priv) || - IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv) || - IS_CML_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); - return skl_y_ddi_translations_edp; - } else if (IS_SKL_ULT(dev_priv) || - IS_KBL_ULT(dev_priv) || - IS_CFL_ULT(dev_priv) || - IS_CML_ULT(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); - return skl_u_ddi_translations_edp; - } else { - *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); - return skl_ddi_translations_edp; - } - } - - if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) - return kbl_get_buf_trans_dp(encoder, n_entries); - else - return skl_get_buf_trans_dp(encoder, n_entries); -} - -static const struct ddi_buf_trans * -skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) -{ - if (IS_SKL_ULX(dev_priv) || - IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv) || - IS_CML_ULX(dev_priv)) { - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); - return skl_y_ddi_translations_hdmi; - } else { - *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); - return skl_ddi_translations_hdmi; - } -} - -static int skl_buf_trans_num_entries(enum port port, int n_entries) -{ - /* Only DDIA and DDIE can select the 10th register with DP */ - if (port == PORT_A || port == PORT_E) - return min(n_entries, 10); - else - return min(n_entries, 9); -} - -static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) { - const struct ddi_buf_trans *ddi_translations = - kbl_get_buf_trans_dp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); - return ddi_translations; - } else if (IS_SKYLAKE(dev_priv)) { - const struct ddi_buf_trans *ddi_translations = - skl_get_buf_trans_dp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); - return ddi_translations; - } else if (IS_BROADWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - return bdw_ddi_translations_dp; - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); - return hsw_ddi_translations_dp; - } - - *n_entries = 0; - return NULL; -} - -static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_GEN9_BC(dev_priv)) { - const struct ddi_buf_trans *ddi_translations = - skl_get_buf_trans_edp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); - return ddi_translations; - } else if (IS_BROADWELL(dev_priv)) { - return bdw_get_buf_trans_edp(encoder, n_entries); - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); - return hsw_ddi_translations_dp; - } - - *n_entries = 0; - return NULL; -} - -static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, - int *n_entries) -{ - if (IS_BROADWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); - return bdw_ddi_translations_fdi; - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); - return hsw_ddi_translations_fdi; - } - - *n_entries = 0; - return NULL; -} - -static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder, - int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_GEN9_BC(dev_priv)) { - return skl_get_buf_trans_hdmi(dev_priv, n_entries); - } else if (IS_BROADWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - return bdw_ddi_translations_hdmi; - } else if (IS_HASWELL(dev_priv)) { - *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); - return hsw_ddi_translations_hdmi; - } - - *n_entries = 0; - return NULL; -} - -static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) -{ - *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); - return bxt_ddi_translations_dp; -} - -static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); - return bxt_ddi_translations_edp; - } - - return bxt_get_buf_trans_dp(encoder, n_entries); -} - -static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) -{ - *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi); - return bxt_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; - - if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V); - return cnl_ddi_translations_hdmi_0_85V; - } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V); - return cnl_ddi_translations_hdmi_0_95V; - } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V); - return cnl_ddi_translations_hdmi_1_05V; - } else { - *n_entries = 1; /* shut up gcc */ - MISSING_CASE(voltage); - } - return NULL; -} - -static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; - - if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V); - return cnl_ddi_translations_dp_0_85V; - } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V); - return cnl_ddi_translations_dp_0_95V; - } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V); - return cnl_ddi_translations_dp_1_05V; - } else { - *n_entries = 1; /* shut up gcc */ - MISSING_CASE(voltage); - } - return NULL; -} - -static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; - - if (dev_priv->vbt.edp.low_vswing) { - if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); - return cnl_ddi_translations_edp_0_85V; - } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); - return cnl_ddi_translations_edp_0_95V; - } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V); - return cnl_ddi_translations_edp_1_05V; - } else { - *n_entries = 1; /* shut up gcc */ - MISSING_CASE(voltage); - } - return NULL; - } else { - return cnl_get_buf_trans_dp(encoder, n_entries); - } -} - -static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); - return icl_combo_phy_ddi_translations_dp_hbr2; -} - -static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (crtc_state->port_clock > 540000) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); - return icl_combo_phy_ddi_translations_edp_hbr3; - } else if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); - return icl_combo_phy_ddi_translations_edp_hbr2; - } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3); - return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3; - } else if (IS_DG1(dev_priv)) { - *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr); - return dg1_combo_phy_ddi_translations_dp_rbr_hbr; - } - - return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); - else - return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct icl_mg_phy_ddi_buf_trans * -icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi); - return icl_mg_phy_ddi_translations_hdmi; -} - -static const struct icl_mg_phy_ddi_buf_trans * -icl_get_mg_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3); - return icl_mg_phy_ddi_translations_hbr2_hbr3; - } else { - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr); - return icl_mg_phy_ddi_translations_rbr_hbr; - } -} - -static const struct icl_mg_phy_ddi_buf_trans * -icl_get_mg_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries); - else - return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp); - return ehl_combo_phy_ddi_translations_dp; -} - -static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); - return icl_combo_phy_ddi_translations_edp_hbr2; - } - - return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); - else - return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * -jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); - return icl_combo_phy_ddi_translations_dp_hbr2; -} - -static const struct cnl_ddi_buf_trans * -jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (dev_priv->vbt.edp.low_vswing) { - if (crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2); - return jsl_combo_phy_ddi_translations_edp_hbr2; - } else { - *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr); - return jsl_combo_phy_ddi_translations_edp_hbr; - } - } - - return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -jsl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); - else - return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; -} - -static const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (crtc_state->port_clock > 270000) { - if (IS_ROCKETLAKE(dev_priv)) { - *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3); - return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3; - } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { - *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); - return tgl_uy_combo_phy_ddi_translations_dp_hbr2; - } else { - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); - return tgl_combo_phy_ddi_translations_dp_hbr2; - } - } else { - if (IS_ROCKETLAKE(dev_priv)) { - *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr); - return rkl_combo_phy_ddi_translations_dp_hbr; - } else { - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); - return tgl_combo_phy_ddi_translations_dp_hbr; - } - } -} - -static const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - if (crtc_state->port_clock > 540000) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); - return icl_combo_phy_ddi_translations_edp_hbr3; - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); - return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; - } else if (dev_priv->vbt.edp.low_vswing) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); - return icl_combo_phy_ddi_translations_edp_hbr2; - } - - return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); - else - return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); -} - -static const struct tgl_dkl_phy_ddi_buf_trans * -tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); - return tgl_dkl_phy_hdmi_ddi_trans; -} - -static const struct tgl_dkl_phy_ddi_buf_trans * -tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (crtc_state->port_clock > 270000) { - *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2); - return tgl_dkl_phy_dp_ddi_trans_hbr2; - } else { - *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); - return tgl_dkl_phy_dp_ddi_trans; - } -} - -static const struct tgl_dkl_phy_ddi_buf_trans * -tgl_get_dkl_buf_trans(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *n_entries) -{ - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries); - else - return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); -} - static int intel_ddi_hdmi_level(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int n_entries, level, default_entry; - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (INTEL_GEN(dev_priv) >= 12) { - if (intel_phy_is_combo(dev_priv, phy)) - tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); - else - tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries); - default_entry = n_entries - 1; - } else if (INTEL_GEN(dev_priv) == 11) { - if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); - else - icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries); - default_entry = n_entries - 1; - } else if (IS_CANNONLAKE(dev_priv)) { - cnl_get_buf_trans_hdmi(encoder, &n_entries); - default_entry = n_entries - 1; - } else if (IS_GEN9_LP(dev_priv)) { - bxt_get_buf_trans_hdmi(encoder, &n_entries); - default_entry = n_entries - 1; - } else if (IS_GEN9_BC(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - default_entry = 8; - } else if (IS_BROADWELL(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - default_entry = 7; - } else if (IS_HASWELL(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - default_entry = 6; - } else { - drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n"); + n_entries = intel_ddi_hdmi_num_entries(encoder, crtc_state, &default_entry); + if (n_entries == 0) return 0; - } - - if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0)) - return 0; - level = intel_bios_hdmi_level_shift(encoder); if (level < 0) level = default_entry; @@ -1470,8 +93,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder, * values in advance. This function programs the correct values for * DP/eDP/FDI use cases. */ -static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; @@ -1533,8 +156,8 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, ddi_translations[level].trans2); } -static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, - enum port port) +void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, + enum port port) { if (IS_BROXTON(dev_priv)) { udelay(16); @@ -1622,141 +245,6 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder, } } -/* Starting with Haswell, different DDI ports can work in FDI mode for - * connection to the PCH-located connectors. For this, it is necessary to train - * both the DDI port and PCH receiver for the desired DDI buffer settings. - * - * The recommended port to work in FDI mode is DDI E, which we use here. Also, - * please note that when FDI mode is active on DDI E, it shares 2 lines with - * DDI A (which is used for eDP) - */ - -void hsw_fdi_link_train(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 temp, i, rx_ctl_val, ddi_pll_sel; - - intel_prepare_dp_ddi_buffers(encoder, crtc_state); - - /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the - * mode set "sequence for CRT port" document: - * - TP1 to TP2 time with the default value - * - FDI delay to 90h - * - * WaFDIAutoLinkSetTimingOverrride:hsw - */ - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), - FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - - /* Enable the PCH Receiver FDI PLL */ - rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | - FDI_RX_PLL_ENABLE | - FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); - intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); - udelay(220); - - /* Switch from Rawclk to PCDclk */ - rx_ctl_val |= FDI_PCDCLK; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); - - /* Configure Port Clock Select */ - ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll); - intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel); - drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL); - - /* Start the training iterating through available voltages and emphasis, - * testing each value twice. */ - for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) { - /* Configure DP_TP_CTL with auto-training */ - intel_de_write(dev_priv, DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | - DP_TP_CTL_ENHANCED_FRAME_ENABLE | - DP_TP_CTL_LINK_TRAIN_PAT1 | - DP_TP_CTL_ENABLE); - - /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. - * DDI E does not support port reversal, the functionality is - * achieved on the PCH side in FDI_RX_CTL, so no need to set the - * port reversal bit */ - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), - DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2)); - intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); - - udelay(600); - - /* Program PCH FDI Receiver TU */ - intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); - - /* Enable PCH FDI Receiver with auto-training */ - rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); - intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); - - /* Wait for FDI receiver lane calibration */ - udelay(30); - - /* Unset FDI_RX_MISC pwrdn lanes */ - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); - intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); - - /* Wait for FDI auto training time */ - udelay(5); - - temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E)); - if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { - drm_dbg_kms(&dev_priv->drm, - "FDI link training done on step %d\n", i); - break; - } - - /* - * Leave things enabled even if we failed to train FDI. - * Results in less fireworks from the state checker. - */ - if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) { - drm_err(&dev_priv->drm, "FDI link training failed!\n"); - break; - } - - rx_ctl_val &= ~FDI_RX_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); - intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); - - temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); - temp &= ~DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp); - intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); - - /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ - temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E)); - temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - temp |= DP_TP_CTL_LINK_TRAIN_PAT1; - intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp); - intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); - - intel_wait_ddi_buf_idle(dev_priv, PORT_E); - - /* Reset FDI_RX_MISC pwrdn lanes */ - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); - intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); - } - - /* Enable normal pixel sending for FDI */ - intel_de_write(dev_priv, DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | - DP_TP_CTL_LINK_TRAIN_NORMAL | - DP_TP_CTL_ENHANCED_FRAME_ENABLE | - DP_TP_CTL_ENABLE); -} - static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { @@ -1815,25 +303,6 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) pipe_config->hw.adjusted_mode.crtc_clock = dotclock; } -static void intel_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - - if (intel_phy_is_tc(dev_priv, phy) && - intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) == - DPLL_ID_ICL_TBTPLL) - pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv, - encoder->port); - else - pipe_config->port_clock = - intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll, - &pipe_config->dpll_hw_state); - - ddi_dotclock_get(pipe_config); -} - void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -2480,13 +949,7 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, enum port port = encoder->port; int n_entries; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries); - else - ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries); - + ddi_translations = bxt_get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) @@ -2523,15 +986,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, else icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); } else if (IS_CANNONLAKE(dev_priv)) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - cnl_get_buf_trans_edp(encoder, &n_entries); - else - cnl_get_buf_trans_dp(encoder, &n_entries); + cnl_get_buf_trans(encoder, crtc_state, &n_entries); } else if (IS_GEN9_LP(dev_priv)) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - bxt_get_buf_trans_edp(encoder, &n_entries); - else - bxt_get_buf_trans_dp(encoder, &n_entries); + bxt_get_buf_trans(encoder, crtc_state, &n_entries); } else { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) intel_ddi_get_buf_trans_edp(encoder, &n_entries); @@ -2569,12 +1026,7 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, int n_entries, ln; u32 val; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries); - else - ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries); + ddi_translations = cnl_get_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; @@ -2827,6 +1279,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int n_entries, ln; u32 val; + if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + return; + ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) @@ -2962,6 +1417,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, u32 val, dpcnt_mask, dpcnt_val; int n_entries, ln; + if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + return; + ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) @@ -3104,196 +1562,580 @@ hsw_set_signal_levels(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); } -static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, - enum phy phy) +static void _cnl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg, + u32 clk_sel_mask, u32 clk_sel, u32 clk_off) { - if (IS_ROCKETLAKE(dev_priv)) { - return RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - } else if (intel_phy_is_combo(dev_priv, phy)) { - return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - } else if (intel_phy_is_tc(dev_priv, phy)) { - enum tc_port tc_port = intel_port_to_tc(dev_priv, - (enum port)phy); + mutex_lock(&i915->dpll.lock); - return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port); - } + intel_de_rmw(i915, reg, clk_sel_mask, clk_sel); - return 0; + /* + * "This step and the step before must be + * done with separate register writes." + */ + intel_de_rmw(i915, reg, clk_off, 0); + + mutex_unlock(&i915->dpll.lock); +} + +static void _cnl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg, + u32 clk_off) +{ + mutex_lock(&i915->dpll.lock); + + intel_de_rmw(i915, reg, 0, clk_off); + + mutex_unlock(&i915->dpll.lock); +} + +static bool _cnl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg, + u32 clk_off) +{ + return !(intel_de_read(i915, reg) & clk_off); +} + +static struct intel_shared_dpll * +_cnl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg, + u32 clk_sel_mask, u32 clk_sel_shift) +{ + enum intel_dpll_id id; + + id = (intel_de_read(i915, reg) & clk_sel_mask) >> clk_sel_shift; + + return intel_get_shared_dpll_by_id(i915, id); } -static void dg1_map_plls_to_ports(struct intel_encoder *encoder, +static void adls_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - u32 val; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (drm_WARN_ON(&i915->drm, !pll)) + return; + + _cnl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy), + ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy), + pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy), + ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} + +static void adls_ddi_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + _cnl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy), + ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} + +static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + return _cnl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy), + ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} + +static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + return _cnl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy), + ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy), + ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy)); +} + +static void rkl_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (drm_WARN_ON(&i915->drm, !pll)) + return; + + _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0, + RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), + RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), + RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} + +static void rkl_ddi_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, + RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} + +static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, + RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} + +static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0, + RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), + RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)); +} + +static void dg1_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (drm_WARN_ON(&i915->drm, !pll)) + return; /* * If we fail this, something went very wrong: first 2 PLLs should be * used by first 2 phys and last 2 PLLs by last phys */ - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(&i915->drm, (pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) || (pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C))) return; - mutex_lock(&dev_priv->dpll.lock); + _cnl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy), + DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), + DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), + DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} - val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)); - drm_WARN_ON(&dev_priv->drm, - (val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)) == 0); +static void dg1_ddi_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); - val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - val |= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); - intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val); - intel_de_posting_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)); + _cnl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy), + DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} - val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val); +static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); - mutex_unlock(&dev_priv->dpll.lock); + return _cnl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy), + DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } -static void icl_map_plls_to_ports(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - u32 val; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); - mutex_lock(&dev_priv->dpll.lock); + return _cnl_ddi_get_pll(i915, DG1_DPCLKA_CFGCR0(phy), + DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), + DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)); +} - val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); - drm_WARN_ON(&dev_priv->drm, - (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0); +static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum phy phy = intel_port_to_phy(i915, encoder->port); - if (intel_phy_is_combo(dev_priv, phy)) { - u32 mask, sel; + if (drm_WARN_ON(&i915->drm, !pll)) + return; - if (IS_ROCKETLAKE(dev_priv)) { - mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - sel = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); - } else { - mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - sel = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); - } + _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0, + ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), + ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy), + ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} - /* - * Even though this register references DDIs, note that we - * want to pass the PHY rather than the port (DDI). For - * ICL, port=phy in all cases so it doesn't matter, but for - * EHL the bspec notes the following: - * - * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA - * Clock Select chooses the PLL for both DDIA and DDID and - * drives port A in all cases." - */ - val &= ~mask; - val |= sel; - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); - intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); - } +static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); - val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0, + ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); +} + +static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); - mutex_unlock(&dev_priv->dpll.lock); + return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0, + ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); } -static void dg1_unmap_plls_to_ports(struct intel_encoder *encoder) +struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0, + ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy), + ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)); +} + +static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum port port = encoder->port; - mutex_lock(&dev_priv->dpll.lock); + if (drm_WARN_ON(&i915->drm, !pll)) + return; - intel_de_rmw(dev_priv, DG1_DPCLKA_CFGCR0(phy), 0, - DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); + /* + * "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port. + * MG does not exist, but the programming is required to ungate DDIC and DDID." + */ + intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG); - mutex_unlock(&dev_priv->dpll.lock); + icl_ddi_combo_enable_clock(encoder, crtc_state); } -static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) +static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - u32 val; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + icl_ddi_combo_disable_clock(encoder); - mutex_lock(&dev_priv->dpll.lock); + intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); +} - val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); - val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); +static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 tmp; - mutex_unlock(&dev_priv->dpll.lock); + tmp = intel_de_read(i915, DDI_CLK_SEL(port)); + + if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE) + return false; + + return icl_ddi_combo_is_clock_enabled(encoder); } -static void dg1_sanitize_port_clk_off(struct drm_i915_private *dev_priv, - u32 port_mask, bool ddi_clk_needed) +static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - enum port port; - u32 val; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum port port = encoder->port; - for_each_port_masked(port, port_mask) { - enum phy phy = intel_port_to_phy(dev_priv, port); - bool ddi_clk_off; + if (drm_WARN_ON(&i915->drm, !pll)) + return; - val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)); - ddi_clk_off = val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + intel_de_write(i915, DDI_CLK_SEL(port), + icl_pll_to_ddi_clk_sel(encoder, crtc_state)); - if (ddi_clk_needed == !ddi_clk_off) - continue; + mutex_lock(&i915->dpll.lock); - /* - * Punt on the case now where clock is gated, but it would - * be needed by the port. Something else is really broken then. - */ - if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed)) - continue; + intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, + ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0); + + mutex_unlock(&i915->dpll.lock); +} + +static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum port port = encoder->port; + + mutex_lock(&i915->dpll.lock); + + intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, + 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); + + mutex_unlock(&i915->dpll.lock); + + intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); +} + +static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum port port = encoder->port; + u32 tmp; + + tmp = intel_de_read(i915, DDI_CLK_SEL(port)); - drm_notice(&dev_priv->drm, - "PHY %c is disabled with an ungated DDI clock, gate it\n", - phy_name(phy)); - val |= DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val); + if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE) + return false; + + tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0); + + return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); +} + +static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + enum port port = encoder->port; + enum intel_dpll_id id; + u32 tmp; + + tmp = intel_de_read(i915, DDI_CLK_SEL(port)); + + switch (tmp & DDI_CLK_SEL_MASK) { + case DDI_CLK_SEL_TBT_162: + case DDI_CLK_SEL_TBT_270: + case DDI_CLK_SEL_TBT_540: + case DDI_CLK_SEL_TBT_810: + id = DPLL_ID_ICL_TBTPLL; + break; + case DDI_CLK_SEL_MG: + id = icl_tc_port_to_pll_id(tc_port); + break; + default: + MISSING_CASE(tmp); + fallthrough; + case DDI_CLK_SEL_NONE: + return NULL; } + + return intel_get_shared_dpll_by_id(i915, id); } -static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, - u32 port_mask, bool ddi_clk_needed) +static void cnl_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - enum port port; - u32 val; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum port port = encoder->port; - val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); - for_each_port_masked(port, port_mask) { - enum phy phy = intel_port_to_phy(dev_priv, port); - bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv, - phy); + if (drm_WARN_ON(&i915->drm, !pll)) + return; - if (ddi_clk_needed == !ddi_clk_off) - continue; + _cnl_ddi_enable_clock(i915, DPCLKA_CFGCR0, + DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port), + DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port), + DPCLKA_CFGCR0_DDI_CLK_OFF(port)); +} - /* - * Punt on the case now where clock is gated, but it would - * be needed by the port. Something else is really broken then. - */ - if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed)) - continue; +static void cnl_ddi_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + _cnl_ddi_disable_clock(i915, DPCLKA_CFGCR0, + DPCLKA_CFGCR0_DDI_CLK_OFF(port)); +} - drm_notice(&dev_priv->drm, - "PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", - phy_name(phy)); - val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); - intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); +static bool cnl_ddi_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + return _cnl_ddi_is_clock_enabled(i915, DPCLKA_CFGCR0, + DPCLKA_CFGCR0_DDI_CLK_OFF(port)); +} + +static struct intel_shared_dpll *cnl_ddi_get_pll(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + return _cnl_ddi_get_pll(i915, DPCLKA_CFGCR0, + DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port), + DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)); +} + +static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum intel_dpll_id id; + + switch (encoder->port) { + case PORT_A: + id = DPLL_ID_SKL_DPLL0; + break; + case PORT_B: + id = DPLL_ID_SKL_DPLL1; + break; + case PORT_C: + id = DPLL_ID_SKL_DPLL2; + break; + default: + MISSING_CASE(encoder->port); + return NULL; } + + return intel_get_shared_dpll_by_id(i915, id); } -void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) +static void skl_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum port port = encoder->port; + + if (drm_WARN_ON(&i915->drm, !pll)) + return; + + mutex_lock(&i915->dpll.lock); + + intel_de_rmw(i915, DPLL_CTRL2, + DPLL_CTRL2_DDI_CLK_OFF(port) | + DPLL_CTRL2_DDI_CLK_SEL_MASK(port), + DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | + DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); + + mutex_unlock(&i915->dpll.lock); +} + +static void skl_ddi_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + mutex_lock(&i915->dpll.lock); + + intel_de_rmw(i915, DPLL_CTRL2, + 0, DPLL_CTRL2_DDI_CLK_OFF(port)); + + mutex_unlock(&i915->dpll.lock); +} + +static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + /* + * FIXME Not sure if the override affects both + * the PLL selection and the CLK_OFF bit. + */ + return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)); +} + +static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + enum intel_dpll_id id; + u32 tmp; + + tmp = intel_de_read(i915, DPLL_CTRL2); + + /* + * FIXME Not sure if the override affects both + * the PLL selection and the CLK_OFF bit. + */ + if ((tmp & DPLL_CTRL2_DDI_SEL_OVERRIDE(port)) == 0) + return NULL; + + id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >> + DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port); + + return intel_get_shared_dpll_by_id(i915, id); +} + +void hsw_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum port port = encoder->port; + + if (drm_WARN_ON(&i915->drm, !pll)) + return; + + intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); +} + +void hsw_ddi_disable_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); +} + +bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE; +} + +static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + enum intel_dpll_id id; + u32 tmp; + + tmp = intel_de_read(i915, PORT_CLK_SEL(port)); + + switch (tmp & PORT_CLK_SEL_MASK) { + case PORT_CLK_SEL_WRPLL1: + id = DPLL_ID_WRPLL1; + break; + case PORT_CLK_SEL_WRPLL2: + id = DPLL_ID_WRPLL2; + break; + case PORT_CLK_SEL_SPLL: + id = DPLL_ID_SPLL; + break; + case PORT_CLK_SEL_LCPLL_810: + id = DPLL_ID_LCPLL_810; + break; + case PORT_CLK_SEL_LCPLL_1350: + id = DPLL_ID_LCPLL_1350; + break; + case PORT_CLK_SEL_LCPLL_2700: + id = DPLL_ID_LCPLL_2700; + break; + default: + MISSING_CASE(tmp); + fallthrough; + case PORT_CLK_SEL_NONE: + return NULL; + } + + return intel_get_shared_dpll_by_id(i915, id); +} + +void intel_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + if (encoder->enable_clock) + encoder->enable_clock(encoder, crtc_state); +} + +static void intel_ddi_disable_clock(struct intel_encoder *encoder) +{ + if (encoder->disable_clock) + encoder->disable_clock(encoder); +} + +void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 port_mask; bool ddi_clk_needed; @@ -3313,7 +2155,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) * In the unlikely case that BIOS enables DP in MST mode, just * warn since our MST HW readout is incomplete. */ - if (drm_WARN_ON(&dev_priv->drm, is_mst)) + if (drm_WARN_ON(&i915->drm, is_mst)) return; } @@ -3328,11 +2170,11 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) * Sanity check that we haven't incorrectly registered another * encoder using any of the ports of this DSI encoder. */ - for_each_intel_encoder(&dev_priv->drm, other_encoder) { + for_each_intel_encoder(&i915->drm, other_encoder) { if (other_encoder == encoder) continue; - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(&i915->drm, port_mask & BIT(other_encoder->port))) return; } @@ -3343,92 +2185,15 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) ddi_clk_needed = false; } - if (IS_DG1(dev_priv)) - dg1_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed); - else - icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed); -} - -static void intel_ddi_clk_select(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); - u32 val; - const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - - if (drm_WARN_ON(&dev_priv->drm, !pll)) + if (ddi_clk_needed || !encoder->disable_clock || + !encoder->is_clock_enabled(encoder)) return; - mutex_lock(&dev_priv->dpll.lock); - - if (INTEL_GEN(dev_priv) >= 11) { - if (!intel_phy_is_combo(dev_priv, phy)) - intel_de_write(dev_priv, DDI_CLK_SEL(port), - icl_pll_to_ddi_clk_sel(encoder, crtc_state)); - else if (IS_JSL_EHL(dev_priv) && port >= PORT_C) - /* - * MG does not exist but the programming is required - * to ungate DDIC and DDID - */ - intel_de_write(dev_priv, DDI_CLK_SEL(port), - DDI_CLK_SEL_MG); - } else if (IS_CANNONLAKE(dev_priv)) { - /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ - val = intel_de_read(dev_priv, DPCLKA_CFGCR0); - val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); - intel_de_write(dev_priv, DPCLKA_CFGCR0, val); - - /* - * Configure DPCLKA_CFGCR0 to turn on the clock for the DDI. - * This step and the step before must be done with separate - * register writes. - */ - val = intel_de_read(dev_priv, DPCLKA_CFGCR0); - val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); - intel_de_write(dev_priv, DPCLKA_CFGCR0, val); - } else if (IS_GEN9_BC(dev_priv)) { - /* DDI -> PLL mapping */ - val = intel_de_read(dev_priv, DPLL_CTRL2); - - val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) | - DPLL_CTRL2_DDI_CLK_SEL_MASK(port)); - val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | - DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); + drm_notice(&i915->drm, + "[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n", + encoder->base.base.id, encoder->base.name); - intel_de_write(dev_priv, DPLL_CTRL2, val); - - } else if (INTEL_GEN(dev_priv) < 9) { - intel_de_write(dev_priv, PORT_CLK_SEL(port), - hsw_pll_to_ddi_pll_sel(pll)); - } - - mutex_unlock(&dev_priv->dpll.lock); -} - -static void intel_ddi_clk_disable(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); - - if (INTEL_GEN(dev_priv) >= 11) { - if (!intel_phy_is_combo(dev_priv, phy) || - (IS_JSL_EHL(dev_priv) && port >= PORT_C)) - intel_de_write(dev_priv, DDI_CLK_SEL(port), - DDI_CLK_SEL_NONE); - } else if (IS_CANNONLAKE(dev_priv)) { - intel_de_write(dev_priv, DPCLKA_CFGCR0, - intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port)); - } else if (IS_GEN9_BC(dev_priv)) { - intel_de_write(dev_priv, DPLL_CTRL2, - intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port)); - } else if (INTEL_GEN(dev_priv) < 9) { - intel_de_write(dev_priv, PORT_CLK_SEL(port), - PORT_CLK_SEL_NONE); - } + encoder->disable_clock(encoder); } static void @@ -3437,10 +2202,12 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); u32 ln0, ln1, pin_assignment; u8 width; - if (dig_port->tc_mode == TC_PORT_TBT_ALT) + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode == TC_PORT_TBT_ALT) return; if (INTEL_GEN(dev_priv) >= 12) { @@ -3615,6 +2382,90 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); } +static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_phy_is_combo(i915, phy)) { + bool lane_reversal = + dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + intel_combo_phy_power_up_lanes(i915, phy, false, + crtc_state->lane_count, + lane_reversal); + } +} + +static void intel_ddi_mso_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 dss1; + + if (!HAS_MSO(i915)) + return; + + dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe)); + + pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE; + if (!pipe_config->splitter.enable) + return; + + /* Splitter enable is supported for pipe A only. */ + if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) { + pipe_config->splitter.enable = false; + return; + } + + switch (dss1 & SPLITTER_CONFIGURATION_MASK) { + default: + drm_WARN(&i915->drm, true, + "Invalid splitter configuration, dss1=0x%08x\n", dss1); + fallthrough; + case SPLITTER_CONFIGURATION_2_SEGMENT: + pipe_config->splitter.link_count = 2; + break; + case SPLITTER_CONFIGURATION_4_SEGMENT: + pipe_config->splitter.link_count = 4; + break; + } + + pipe_config->splitter.pixel_overlap = REG_FIELD_GET(OVERLAP_PIXELS_MASK, dss1); +} + +static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 dss1 = 0; + + if (!HAS_MSO(i915)) + return; + + if (crtc_state->splitter.enable) { + /* Splitter enable is supported for pipe A only. */ + if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) + return; + + dss1 |= SPLITTER_ENABLE; + dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap); + if (crtc_state->splitter.link_count == 2) + dss1 |= SPLITTER_CONFIGURATION_2_SEGMENT; + else + dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT; + } + + intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe), + SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK | + OVERLAP_PIXELS_MASK, dss1); +} + static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -3656,7 +2507,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only * configure the PLL to port mapping here. */ - intel_ddi_clk_select(encoder, crtc_state); + intel_ddi_enable_clock(encoder, crtc_state); /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */ if (!intel_phy_is_tc(dev_priv, phy) || @@ -3706,14 +2557,12 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up * the used lanes of the DDI. */ - if (intel_phy_is_combo(dev_priv, phy)) { - bool lane_reversal = - dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + intel_ddi_power_up_lanes(encoder, crtc_state); - intel_combo_phy_power_up_lanes(dev_priv, phy, false, - crtc_state->lane_count, - lane_reversal); - } + /* + * 7.g Program CoG/MSO configuration bits in DSS_CTL1 if selected. + */ + intel_ddi_mso_configure(crtc_state); /* * 7.g Configure and enable DDI_BUF_CTL @@ -3784,7 +2633,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_pps_on(intel_dp); - intel_ddi_clk_select(encoder, crtc_state); + intel_ddi_enable_clock(encoder, crtc_state); if (!intel_phy_is_tc(dev_priv, phy) || dig_port->tc_mode != TC_PORT_TBT_ALT) { @@ -3804,14 +2653,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, else intel_prepare_dp_ddi_buffers(encoder, crtc_state); - if (intel_phy_is_combo(dev_priv, phy)) { - bool lane_reversal = - dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - - intel_combo_phy_power_up_lanes(dev_priv, phy, false, - crtc_state->lane_count, - lane_reversal); - } + intel_ddi_power_up_lanes(encoder, crtc_state); intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) @@ -3864,10 +2706,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int level = intel_ddi_hdmi_level(encoder, crtc_state); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); - intel_ddi_clk_select(encoder, crtc_state); + intel_ddi_enable_clock(encoder, crtc_state); drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, @@ -3875,20 +2716,6 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, icl_program_mg_dp_mode(dig_port, crtc_state); - if (INTEL_GEN(dev_priv) >= 12) - tgl_ddi_vswing_sequence(encoder, crtc_state, level); - else if (INTEL_GEN(dev_priv) == 11) - icl_ddi_vswing_sequence(encoder, crtc_state, level); - else if (IS_CANNONLAKE(dev_priv)) - cnl_ddi_vswing_sequence(encoder, crtc_state, level); - else if (IS_GEN9_LP(dev_priv)) - bxt_ddi_vswing_sequence(encoder, crtc_state, level); - else - intel_prepare_hdmi_ddi_buffers(encoder, level); - - if (IS_GEN9_BC(dev_priv)) - skl_ddi_set_iboost(encoder, crtc_state, level); - intel_ddi_enable_pipe_clock(encoder, crtc_state); dig_port->set_infoframes(encoder, @@ -3920,11 +2747,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder); - if (IS_DG1(dev_priv)) - dg1_map_plls_to_ports(encoder, crtc_state); - else if (INTEL_GEN(dev_priv) >= 11) - icl_map_plls_to_ports(encoder, crtc_state); - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { @@ -4033,7 +2855,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, dig_port->ddi_io_power_domain, fetch_and_zero(&dig_port->ddi_io_wakeref)); - intel_ddi_clk_disable(encoder); + intel_ddi_disable_clock(encoder); } static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, @@ -4056,7 +2878,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, dig_port->ddi_io_power_domain, fetch_and_zero(&dig_port->ddi_io_wakeref)); - intel_ddi_clk_disable(encoder); + intel_ddi_disable_clock(encoder); intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } @@ -4097,7 +2919,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, intel_atomic_get_old_crtc_state(state, slave); intel_crtc_vblank_off(old_slave_crtc_state); - trace_intel_pipe_disable(slave); intel_dsc_disable(old_slave_crtc_state); skl_scaler_disable(old_slave_crtc_state); @@ -4123,11 +2944,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, intel_ddi_post_disable_dp(state, encoder, old_crtc_state, old_conn_state); - if (IS_DG1(dev_priv)) - dg1_unmap_plls_to_ports(encoder); - else if (INTEL_GEN(dev_priv) >= 11) - icl_unmap_plls_to_ports(encoder); - if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port) intel_display_power_put(dev_priv, intel_ddi_main_link_aux_domain(dig_port), @@ -4156,7 +2972,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); intel_disable_ddi_buf(encoder, old_crtc_state); - intel_ddi_clk_disable(encoder); + intel_ddi_disable_clock(encoder); val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); @@ -4264,6 +3080,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; + int level = intel_ddi_hdmi_level(encoder, crtc_state); enum port port = encoder->port; if (!intel_hdmi_handle_sink_scrambling(encoder, connector, @@ -4273,6 +3090,20 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", connector->base.id, connector->name); + if (INTEL_GEN(dev_priv) >= 12) + tgl_ddi_vswing_sequence(encoder, crtc_state, level); + else if (INTEL_GEN(dev_priv) == 11) + icl_ddi_vswing_sequence(encoder, crtc_state, level); + else if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_vswing_sequence(encoder, crtc_state, level); + else if (IS_GEN9_LP(dev_priv)) + bxt_ddi_vswing_sequence(encoder, crtc_state, level); + else + intel_prepare_hdmi_ddi_buffers(encoder, level); + + if (IS_GEN9_BC(dev_priv)) + skl_ddi_set_iboost(encoder, crtc_state, level); + /* Display WA #1143: skl,kbl,cfl */ if (IS_GEN9_BC(dev_priv)) { /* @@ -4308,6 +3139,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, intel_de_write(dev_priv, reg, val); } + intel_ddi_power_up_lanes(encoder, crtc_state); + /* In HDMI/DVI mode, the port width, and swing/emphasis values * are ignored so nothing special needs to be done besides * enabling the port. @@ -4794,8 +3627,8 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, } } -void intel_ddi_get_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) +static void intel_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; @@ -4817,6 +3650,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_ddi_read_func_ctl(encoder, pipe_config); } + intel_ddi_mso_get_config(encoder, pipe_config); + pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); @@ -4842,7 +3677,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, } if (!pipe_config->bigjoiner_slave) - intel_ddi_clock_get(encoder, pipe_config); + ddi_dotclock_get(pipe_config); if (IS_GEN9_LP(dev_priv)) pipe_config->lane_lat_optim_mask = @@ -4872,6 +3707,123 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC); } +void intel_ddi_get_clock(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct intel_shared_dpll *pll) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; + struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id]; + bool pll_active; + + if (drm_WARN_ON(&i915->drm, !pll)) + return; + + port_dpll->pll = pll; + pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state); + drm_WARN_ON(&i915->drm, !pll_active); + + icl_set_active_port_dpll(crtc_state, port_dpll_id); + + crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll, + &crtc_state->dpll_hw_state); +} + +static void adls_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, adls_ddi_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +static void rkl_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, rkl_ddi_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +static void dg1_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, dg1_ddi_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +static void icl_ddi_combo_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, icl_ddi_combo_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +static void icl_ddi_tc_get_clock(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct intel_shared_dpll *pll) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum icl_port_dpll_id port_dpll_id; + struct icl_port_dpll *port_dpll; + bool pll_active; + + if (drm_WARN_ON(&i915->drm, !pll)) + return; + + if (intel_get_shared_dpll_id(i915, pll) == DPLL_ID_ICL_TBTPLL) + port_dpll_id = ICL_PORT_DPLL_DEFAULT; + else + port_dpll_id = ICL_PORT_DPLL_MG_PHY; + + port_dpll = &crtc_state->icl_port_dplls[port_dpll_id]; + + port_dpll->pll = pll; + pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state); + drm_WARN_ON(&i915->drm, !pll_active); + + icl_set_active_port_dpll(crtc_state, port_dpll_id); + + if (intel_get_shared_dpll_id(i915, crtc_state->shared_dpll) == DPLL_ID_ICL_TBTPLL) + crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port); + else + crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll, + &crtc_state->dpll_hw_state); +} + +static void icl_ddi_tc_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + icl_ddi_tc_get_clock(encoder, crtc_state, icl_ddi_tc_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +static void cnl_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, cnl_ddi_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +static void bxt_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, bxt_ddi_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +static void skl_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, skl_ddi_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + +void hsw_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + intel_ddi_get_clock(encoder, crtc_state, hsw_ddi_get_pll(encoder)); + intel_ddi_get_config(encoder, crtc_state); +} + static void intel_ddi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { @@ -5449,6 +4401,24 @@ static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv, return HPD_PORT_A + port - PORT_A; } +static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port) +{ + if (HAS_PCH_TGP(dev_priv)) + return icl_hpd_pin(dev_priv, port); + + return HPD_PORT_A + port - PORT_A; +} + +static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port) +{ + if (INTEL_GEN(i915) >= 12) + return port >= PORT_TC1; + else if (INTEL_GEN(i915) >= 11) + return port >= PORT_C; + else + return false; +} + #define port_tc_name(port) ((port) - PORT_TC1 + '1') #define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1') @@ -5540,7 +4510,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->post_disable = intel_ddi_post_disable; encoder->update_pipe = intel_ddi_update_pipe; encoder->get_hw_state = intel_ddi_get_hw_state; - encoder->get_config = intel_ddi_get_config; encoder->sync_state = intel_ddi_sync_state; encoder->initial_fastset_check = intel_ddi_initial_fastset_check; encoder->suspend = intel_dp_encoder_suspend; @@ -5553,6 +4522,65 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->cloneable = 0; encoder->pipe_mask = ~0; + if (IS_ALDERLAKE_S(dev_priv)) { + encoder->enable_clock = adls_ddi_enable_clock; + encoder->disable_clock = adls_ddi_disable_clock; + encoder->is_clock_enabled = adls_ddi_is_clock_enabled; + encoder->get_config = adls_ddi_get_config; + } else if (IS_ROCKETLAKE(dev_priv)) { + encoder->enable_clock = rkl_ddi_enable_clock; + encoder->disable_clock = rkl_ddi_disable_clock; + encoder->is_clock_enabled = rkl_ddi_is_clock_enabled; + encoder->get_config = rkl_ddi_get_config; + } else if (IS_DG1(dev_priv)) { + encoder->enable_clock = dg1_ddi_enable_clock; + encoder->disable_clock = dg1_ddi_disable_clock; + encoder->is_clock_enabled = dg1_ddi_is_clock_enabled; + encoder->get_config = dg1_ddi_get_config; + } else if (IS_JSL_EHL(dev_priv)) { + if (intel_ddi_is_tc(dev_priv, port)) { + encoder->enable_clock = jsl_ddi_tc_enable_clock; + encoder->disable_clock = jsl_ddi_tc_disable_clock; + encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled; + encoder->get_config = icl_ddi_combo_get_config; + } else { + encoder->enable_clock = icl_ddi_combo_enable_clock; + encoder->disable_clock = icl_ddi_combo_disable_clock; + encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled; + encoder->get_config = icl_ddi_combo_get_config; + } + } else if (INTEL_GEN(dev_priv) >= 11) { + if (intel_ddi_is_tc(dev_priv, port)) { + encoder->enable_clock = icl_ddi_tc_enable_clock; + encoder->disable_clock = icl_ddi_tc_disable_clock; + encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled; + encoder->get_config = icl_ddi_tc_get_config; + } else { + encoder->enable_clock = icl_ddi_combo_enable_clock; + encoder->disable_clock = icl_ddi_combo_disable_clock; + encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled; + encoder->get_config = icl_ddi_combo_get_config; + } + } else if (IS_CANNONLAKE(dev_priv)) { + encoder->enable_clock = cnl_ddi_enable_clock; + encoder->disable_clock = cnl_ddi_disable_clock; + encoder->is_clock_enabled = cnl_ddi_is_clock_enabled; + encoder->get_config = cnl_ddi_get_config; + } else if (IS_GEN9_LP(dev_priv)) { + /* BXT/GLK have fixed PLL->port mapping */ + encoder->get_config = bxt_ddi_get_config; + } else if (IS_GEN9_BC(dev_priv)) { + encoder->enable_clock = skl_ddi_enable_clock; + encoder->disable_clock = skl_ddi_disable_clock; + encoder->is_clock_enabled = skl_ddi_is_clock_enabled; + encoder->get_config = skl_ddi_get_config; + } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { + encoder->enable_clock = hsw_ddi_enable_clock; + encoder->disable_clock = hsw_ddi_disable_clock; + encoder->is_clock_enabled = hsw_ddi_is_clock_enabled; + encoder->get_config = hsw_ddi_get_config; + } + if (IS_DG1(dev_priv)) encoder->hpd_pin = dg1_hpd_pin(dev_priv, port); else if (IS_ROCKETLAKE(dev_priv)) @@ -5565,6 +4593,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->hpd_pin = icl_hpd_pin(dev_priv, port); else if (IS_GEN(dev_priv, 10)) encoder->hpd_pin = cnl_hpd_pin(dev_priv, port); + else if (IS_GEN(dev_priv, 9)) + encoder->hpd_pin = skl_hpd_pin(dev_priv, port); else encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); @@ -5577,6 +4607,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_de_read(dev_priv, DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); + if (intel_bios_is_lane_reversal_needed(dev_priv, port)) + dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL; + dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = intel_ddi_max_lanes(dig_port); dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); @@ -5601,6 +4634,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) goto err; dig_port->hpd_pulse = intel_dp_hpd_pulse; + + /* Splitter enable for eDP MSO is supported for pipe A only. */ + if (dig_port->dp.mso_link_count) + encoder->pipe_mask = BIT(PIPE_A); } /* In theory we don't need the encoder->type check, but leave it just in diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index a4dd815c0000..59c6b01d4199 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -17,6 +17,7 @@ struct intel_crtc_state; struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; +struct intel_shared_dpll; enum transcoder; i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, @@ -27,8 +28,22 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, struct intel_encoder *intel_encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state); -void hsw_fdi_link_train(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void intel_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_ddi_get_clock(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct intel_shared_dpll *pll); +void hsw_ddi_enable_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void hsw_ddi_disable_clock(struct intel_encoder *encoder); +bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder); +void hsw_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); +struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder); +void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, + enum port port); void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, @@ -40,8 +55,6 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); -void intel_ddi_get_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config); void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, bool state); void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, @@ -53,6 +66,6 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp, int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, enum transcoder cpu_transcoder, bool enable, u32 hdcp_mask); -void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); +void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); #endif /* __INTEL_DDI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c new file mode 100644 index 000000000000..f65c2b35461c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -0,0 +1,1394 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" +#include "intel_display_types.h" + +/* HDMI/DVI modes ignore everything but the last 2 items. So we share + * them for both DP and FDI transports, allowing those ports to + * automatically adapt to HDMI connections as well + */ +static const struct ddi_buf_trans hsw_ddi_translations_dp[] = { + { 0x00FFFFFF, 0x0006000E, 0x0 }, + { 0x00D75FFF, 0x0005000A, 0x0 }, + { 0x00C30FFF, 0x00040006, 0x0 }, + { 0x80AAAFFF, 0x000B0000, 0x0 }, + { 0x00FFFFFF, 0x0005000A, 0x0 }, + { 0x00D75FFF, 0x000C0004, 0x0 }, + { 0x80C30FFF, 0x000B0000, 0x0 }, + { 0x00FFFFFF, 0x00040006, 0x0 }, + { 0x80D75FFF, 0x000B0000, 0x0 }, +}; + +static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = { + { 0x00FFFFFF, 0x0007000E, 0x0 }, + { 0x00D75FFF, 0x000F000A, 0x0 }, + { 0x00C30FFF, 0x00060006, 0x0 }, + { 0x00AAAFFF, 0x001E0000, 0x0 }, + { 0x00FFFFFF, 0x000F000A, 0x0 }, + { 0x00D75FFF, 0x00160004, 0x0 }, + { 0x00C30FFF, 0x001E0000, 0x0 }, + { 0x00FFFFFF, 0x00060006, 0x0 }, + { 0x00D75FFF, 0x001E0000, 0x0 }, +}; + +static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = { + /* Idx NT mV d T mV d db */ + { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */ + { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */ + { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */ + { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */ + { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */ + { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */ + { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */ + { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */ + { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */ + { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */ + { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */ + { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */ +}; + +static const struct ddi_buf_trans bdw_ddi_translations_edp[] = { + { 0x00FFFFFF, 0x00000012, 0x0 }, + { 0x00EBAFFF, 0x00020011, 0x0 }, + { 0x00C71FFF, 0x0006000F, 0x0 }, + { 0x00AAAFFF, 0x000E000A, 0x0 }, + { 0x00FFFFFF, 0x00020011, 0x0 }, + { 0x00DB6FFF, 0x0005000F, 0x0 }, + { 0x00BEEFFF, 0x000A000C, 0x0 }, + { 0x00FFFFFF, 0x0005000F, 0x0 }, + { 0x00DB6FFF, 0x000A000C, 0x0 }, +}; + +static const struct ddi_buf_trans bdw_ddi_translations_dp[] = { + { 0x00FFFFFF, 0x0007000E, 0x0 }, + { 0x00D75FFF, 0x000E000A, 0x0 }, + { 0x00BEFFFF, 0x00140006, 0x0 }, + { 0x80B2CFFF, 0x001B0002, 0x0 }, + { 0x00FFFFFF, 0x000E000A, 0x0 }, + { 0x00DB6FFF, 0x00160005, 0x0 }, + { 0x80C71FFF, 0x001A0002, 0x0 }, + { 0x00F7DFFF, 0x00180004, 0x0 }, + { 0x80D75FFF, 0x001B0002, 0x0 }, +}; + +static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = { + { 0x00FFFFFF, 0x0001000E, 0x0 }, + { 0x00D75FFF, 0x0004000A, 0x0 }, + { 0x00C30FFF, 0x00070006, 0x0 }, + { 0x00AAAFFF, 0x000C0000, 0x0 }, + { 0x00FFFFFF, 0x0004000A, 0x0 }, + { 0x00D75FFF, 0x00090004, 0x0 }, + { 0x00C30FFF, 0x000C0000, 0x0 }, + { 0x00FFFFFF, 0x00070006, 0x0 }, + { 0x00D75FFF, 0x000C0000, 0x0 }, +}; + +static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { + /* Idx NT mV d T mV df db */ + { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */ + { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */ + { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */ + { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */ + { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */ + { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */ + { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */ + { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */ + { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */ + { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */ +}; + +/* Skylake H and S */ +static const struct ddi_buf_trans skl_ddi_translations_dp[] = { + { 0x00002016, 0x000000A0, 0x0 }, + { 0x00005012, 0x0000009B, 0x0 }, + { 0x00007011, 0x00000088, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, + { 0x00002016, 0x0000009B, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000C0, 0x1 }, + { 0x00002016, 0x000000DF, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, +}; + +/* Skylake U */ +static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { + { 0x0000201B, 0x000000A2, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x1 }, + { 0x80009010, 0x000000C0, 0x1 }, + { 0x0000201B, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, + { 0x80007011, 0x000000C0, 0x1 }, + { 0x00002016, 0x00000088, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, +}; + +/* Skylake Y */ +static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { + { 0x00000018, 0x000000A2, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, + { 0x80009010, 0x000000C0, 0x3 }, + { 0x00000018, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, + { 0x00000018, 0x00000088, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, +}; + +/* Kabylake H and S */ +static const struct ddi_buf_trans kbl_ddi_translations_dp[] = { + { 0x00002016, 0x000000A0, 0x0 }, + { 0x00005012, 0x0000009B, 0x0 }, + { 0x00007011, 0x00000088, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, + { 0x00002016, 0x0000009B, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000C0, 0x1 }, + { 0x00002016, 0x00000097, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, +}; + +/* Kabylake U */ +static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = { + { 0x0000201B, 0x000000A1, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, + { 0x80009010, 0x000000C0, 0x3 }, + { 0x0000201B, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, + { 0x00002016, 0x0000004F, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, +}; + +/* Kabylake Y */ +static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = { + { 0x00001017, 0x000000A1, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, + { 0x8000800F, 0x000000C0, 0x3 }, + { 0x00001017, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, + { 0x00001017, 0x0000004C, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, +}; + +/* + * Skylake/Kabylake H and S + * eDP 1.4 low vswing translation parameters + */ +static const struct ddi_buf_trans skl_ddi_translations_edp[] = { + { 0x00000018, 0x000000A8, 0x0 }, + { 0x00004013, 0x000000A9, 0x0 }, + { 0x00007011, 0x000000A2, 0x0 }, + { 0x00009010, 0x0000009C, 0x0 }, + { 0x00000018, 0x000000A9, 0x0 }, + { 0x00006013, 0x000000A2, 0x0 }, + { 0x00007011, 0x000000A6, 0x0 }, + { 0x00000018, 0x000000AB, 0x0 }, + { 0x00007013, 0x0000009F, 0x0 }, + { 0x00000018, 0x000000DF, 0x0 }, +}; + +/* + * Skylake/Kabylake U + * eDP 1.4 low vswing translation parameters + */ +static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = { + { 0x00000018, 0x000000A8, 0x0 }, + { 0x00004013, 0x000000A9, 0x0 }, + { 0x00007011, 0x000000A2, 0x0 }, + { 0x00009010, 0x0000009C, 0x0 }, + { 0x00000018, 0x000000A9, 0x0 }, + { 0x00006013, 0x000000A2, 0x0 }, + { 0x00007011, 0x000000A6, 0x0 }, + { 0x00002016, 0x000000AB, 0x0 }, + { 0x00005013, 0x0000009F, 0x0 }, + { 0x00000018, 0x000000DF, 0x0 }, +}; + +/* + * Skylake/Kabylake Y + * eDP 1.4 low vswing translation parameters + */ +static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = { + { 0x00000018, 0x000000A8, 0x0 }, + { 0x00004013, 0x000000AB, 0x0 }, + { 0x00007011, 0x000000A4, 0x0 }, + { 0x00009010, 0x000000DF, 0x0 }, + { 0x00000018, 0x000000AA, 0x0 }, + { 0x00006013, 0x000000A4, 0x0 }, + { 0x00007011, 0x0000009D, 0x0 }, + { 0x00000018, 0x000000A0, 0x0 }, + { 0x00006012, 0x000000DF, 0x0 }, + { 0x00000018, 0x0000008A, 0x0 }, +}; + +/* Skylake/Kabylake U, H and S */ +static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { + { 0x00000018, 0x000000AC, 0x0 }, + { 0x00005012, 0x0000009D, 0x0 }, + { 0x00007011, 0x00000088, 0x0 }, + { 0x00000018, 0x000000A1, 0x0 }, + { 0x00000018, 0x00000098, 0x0 }, + { 0x00004013, 0x00000088, 0x0 }, + { 0x80006012, 0x000000CD, 0x1 }, + { 0x00000018, 0x000000DF, 0x0 }, + { 0x80003015, 0x000000CD, 0x1 }, /* Default */ + { 0x80003015, 0x000000C0, 0x1 }, + { 0x80000018, 0x000000C0, 0x1 }, +}; + +/* Skylake/Kabylake Y */ +static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { + { 0x00000018, 0x000000A1, 0x0 }, + { 0x00005012, 0x000000DF, 0x0 }, + { 0x80007011, 0x000000CB, 0x3 }, + { 0x00000018, 0x000000A4, 0x0 }, + { 0x00000018, 0x0000009D, 0x0 }, + { 0x00004013, 0x00000080, 0x0 }, + { 0x80006013, 0x000000C0, 0x3 }, + { 0x00000018, 0x0000008A, 0x0 }, + { 0x80003015, 0x000000C0, 0x3 }, /* Default */ + { 0x80003015, 0x000000C0, 0x3 }, + { 0x80000018, 0x000000C0, 0x3 }, +}; + + +static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { + /* Idx NT mV diff db */ + { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ + { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */ + { 104, 0x9A, 0, 64, }, /* 2: 400 6 */ + { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */ + { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ + { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */ + { 154, 0x9A, 0, 64, }, /* 6: 600 6 */ + { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ + { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */ + { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ +}; + +static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = { + /* Idx NT mV diff db */ + { 26, 0, 0, 128, }, /* 0: 200 0 */ + { 38, 0, 0, 112, }, /* 1: 200 1.5 */ + { 48, 0, 0, 96, }, /* 2: 200 4 */ + { 54, 0, 0, 69, }, /* 3: 200 6 */ + { 32, 0, 0, 128, }, /* 4: 250 0 */ + { 48, 0, 0, 104, }, /* 5: 250 1.5 */ + { 54, 0, 0, 85, }, /* 6: 250 4 */ + { 43, 0, 0, 128, }, /* 7: 300 0 */ + { 54, 0, 0, 101, }, /* 8: 300 1.5 */ + { 48, 0, 0, 128, }, /* 9: 300 0 */ +}; + +/* BSpec has 2 recommended values - entries 0 and 8. + * Using the entry with higher vswing. + */ +static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { + /* Idx NT mV diff db */ + { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ + { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */ + { 52, 0x9A, 0, 64, }, /* 2: 400 6 */ + { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */ + { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ + { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */ + { 77, 0x9A, 0, 64, }, /* 6: 600 6 */ + { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ + { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */ + { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ +}; + +/* Voltage Swing Programming for VccIO 0.85V for DP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ + { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ + { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ + { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ + { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ + { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.85V for HDMI */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ + { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ + { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.85V for eDP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ + { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ + { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ + { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */ + { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */ + { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.95V for DP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ + { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ + { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ + { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ + { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ + { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.95V for HDMI */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ + { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ + { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ + { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ + { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ + { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ + { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ + { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ + { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.95V for eDP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ + { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ + { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ + { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ + { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ + { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */ + { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 1.05V for DP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ + { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ + { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ + { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */ + { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ + { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */ + { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */ + { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 1.05V for HDMI */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ + { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ + { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ + { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ + { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ + { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ + { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ + { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ + { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 1.05V for eDP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ + { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ + { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ + { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ + { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ + { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +}; + +/* icl_combo_phy_ddi_translations */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ + { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ + { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ +}; + +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { + /* NT mV Trans mV db */ + { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ + { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ + { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ +}; + +static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = { + /* NT mV Trans mV db */ + { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ + { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */ + { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ + { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = { + /* NT mV Trans mV db */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ + { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ + { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ +}; + +static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */ + { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */ + { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ +}; + +static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = { + /* NT mV Trans mV db */ + { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ + { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ + { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = { + /* Voltage swing pre-emphasis */ + { 0x18, 0x00, 0x00 }, /* 0 0 */ + { 0x1D, 0x00, 0x05 }, /* 0 1 */ + { 0x24, 0x00, 0x0C }, /* 0 2 */ + { 0x2B, 0x00, 0x14 }, /* 0 3 */ + { 0x21, 0x00, 0x00 }, /* 1 0 */ + { 0x2B, 0x00, 0x08 }, /* 1 1 */ + { 0x30, 0x00, 0x0F }, /* 1 2 */ + { 0x31, 0x00, 0x03 }, /* 2 0 */ + { 0x34, 0x00, 0x0B }, /* 2 1 */ + { 0x3F, 0x00, 0x00 }, /* 3 0 */ +}; + +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = { + /* Voltage swing pre-emphasis */ + { 0x18, 0x00, 0x00 }, /* 0 0 */ + { 0x1D, 0x00, 0x05 }, /* 0 1 */ + { 0x24, 0x00, 0x0C }, /* 0 2 */ + { 0x2B, 0x00, 0x14 }, /* 0 3 */ + { 0x26, 0x00, 0x00 }, /* 1 0 */ + { 0x2C, 0x00, 0x07 }, /* 1 1 */ + { 0x33, 0x00, 0x0C }, /* 1 2 */ + { 0x2E, 0x00, 0x00 }, /* 2 0 */ + { 0x36, 0x00, 0x09 }, /* 2 1 */ + { 0x3F, 0x00, 0x00 }, /* 3 0 */ +}; + +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = { + /* HDMI Preset VS Pre-emph */ + { 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */ + { 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */ + { 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */ + { 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */ + { 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */ + { 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */ + { 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */ + { 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */ + { 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */ + { 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */ +}; + +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ + { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ + { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ + { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */ + { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ + { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ + { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ + { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ + { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ + { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ +}; + +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ + { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ + { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ + { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */ + { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ + { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ + { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ + { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ + { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ + { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ +}; + +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = { + /* HDMI Preset VS Pre-emph */ + { 0x7, 0x0, 0x0 }, /* 1 400mV 0dB */ + { 0x6, 0x0, 0x0 }, /* 2 500mV 0dB */ + { 0x4, 0x0, 0x0 }, /* 3 650mV 0dB */ + { 0x2, 0x0, 0x0 }, /* 4 800mV 0dB */ + { 0x0, 0x0, 0x0 }, /* 5 1000mV 0dB */ + { 0x0, 0x0, 0x5 }, /* 6 Full -1.5 dB */ + { 0x0, 0x0, 0x6 }, /* 7 Full -1.8 dB */ + { 0x0, 0x0, 0x7 }, /* 8 Full -2 dB */ + { 0x0, 0x0, 0x8 }, /* 9 Full -2.5 dB */ + { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */ +}; + +static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = { + /* NT mV Trans mV db */ + { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ + { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ + { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ + { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ + { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +/* + * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries + * that DisplayPort specification requires + */ +static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = { + /* VS pre-emp */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */ +}; + +static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = { + /* NT mV Trans mV db */ + { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ + { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */ + { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table) +{ + return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl; +} + +static const struct ddi_buf_trans * +bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp); + return bdw_ddi_translations_edp; + } else { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); + return bdw_ddi_translations_dp; + } +} + +static const struct ddi_buf_trans * +skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (IS_SKL_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); + return skl_y_ddi_translations_dp; + } else if (IS_SKL_ULT(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); + return skl_u_ddi_translations_dp; + } else { + *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_dp; + } +} + +static const struct ddi_buf_trans * +kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv) || + IS_CML_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); + return kbl_y_ddi_translations_dp; + } else if (IS_KBL_ULT(dev_priv) || + IS_CFL_ULT(dev_priv) || + IS_CML_ULT(dev_priv)) { + *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp); + return kbl_u_ddi_translations_dp; + } else { + *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp); + return kbl_ddi_translations_dp; + } +} + +static const struct ddi_buf_trans * +skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dev_priv->vbt.edp.low_vswing) { + if (IS_SKL_ULX(dev_priv) || + IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv) || + IS_CML_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); + return skl_y_ddi_translations_edp; + } else if (IS_SKL_ULT(dev_priv) || + IS_KBL_ULT(dev_priv) || + IS_CFL_ULT(dev_priv) || + IS_CML_ULT(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); + return skl_u_ddi_translations_edp; + } else { + *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); + return skl_ddi_translations_edp; + } + } + + if (IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv) || + IS_COMETLAKE(dev_priv)) + return kbl_get_buf_trans_dp(encoder, n_entries); + else + return skl_get_buf_trans_dp(encoder, n_entries); +} + +static const struct ddi_buf_trans * +skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +{ + if (IS_SKL_ULX(dev_priv) || + IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv) || + IS_CML_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); + return skl_y_ddi_translations_hdmi; + } else { + *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); + return skl_ddi_translations_hdmi; + } +} + +static int skl_buf_trans_num_entries(enum port port, int n_entries) +{ + /* Only DDIA and DDIE can select the 10th register with DP */ + if (port == PORT_A || port == PORT_E) + return min(n_entries, 10); + else + return min(n_entries, 9); +} + +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv) || + IS_COMETLAKE(dev_priv)) { + const struct ddi_buf_trans *ddi_translations = + kbl_get_buf_trans_dp(encoder, n_entries); + *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); + return ddi_translations; + } else if (IS_SKYLAKE(dev_priv)) { + const struct ddi_buf_trans *ddi_translations = + skl_get_buf_trans_dp(encoder, n_entries); + *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); + return ddi_translations; + } else if (IS_BROADWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); + return bdw_ddi_translations_dp; + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); + return hsw_ddi_translations_dp; + } + + *n_entries = 0; + return NULL; +} + +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (IS_GEN9_BC(dev_priv)) { + const struct ddi_buf_trans *ddi_translations = + skl_get_buf_trans_edp(encoder, n_entries); + *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); + return ddi_translations; + } else if (IS_BROADWELL(dev_priv)) { + return bdw_get_buf_trans_edp(encoder, n_entries); + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); + return hsw_ddi_translations_dp; + } + + *n_entries = 0; + return NULL; +} + +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, + int *n_entries) +{ + if (IS_BROADWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); + return bdw_ddi_translations_fdi; + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); + return hsw_ddi_translations_fdi; + } + + *n_entries = 0; + return NULL; +} + +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (IS_GEN9_BC(dev_priv)) { + return skl_get_buf_trans_hdmi(dev_priv, n_entries); + } else if (IS_BROADWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + return bdw_ddi_translations_hdmi; + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); + return hsw_ddi_translations_hdmi; + } + + *n_entries = 0; + return NULL; +} + +static const struct bxt_ddi_buf_trans * +bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) +{ + *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); + return bxt_ddi_translations_dp; +} + +static const struct bxt_ddi_buf_trans * +bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); + return bxt_ddi_translations_edp; + } + + return bxt_get_buf_trans_dp(encoder, n_entries); +} + +static const struct bxt_ddi_buf_trans * +bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) +{ + *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi); + return bxt_ddi_translations_hdmi; +} + +const struct bxt_ddi_buf_trans * +bxt_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return bxt_get_buf_trans_hdmi(encoder, n_entries); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return bxt_get_buf_trans_edp(encoder, n_entries); + return bxt_get_buf_trans_dp(encoder, n_entries); +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V); + return cnl_ddi_translations_hdmi_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V); + return cnl_ddi_translations_hdmi_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V); + return cnl_ddi_translations_hdmi_1_05V; + } else { + *n_entries = 1; /* shut up gcc */ + MISSING_CASE(voltage); + } + return NULL; +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V); + return cnl_ddi_translations_dp_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V); + return cnl_ddi_translations_dp_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V); + return cnl_ddi_translations_dp_1_05V; + } else { + *n_entries = 1; /* shut up gcc */ + MISSING_CASE(voltage); + } + return NULL; +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (dev_priv->vbt.edp.low_vswing) { + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); + return cnl_ddi_translations_edp_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); + return cnl_ddi_translations_edp_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V); + return cnl_ddi_translations_edp_1_05V; + } else { + *n_entries = 1; /* shut up gcc */ + MISSING_CASE(voltage); + } + return NULL; + } else { + return cnl_get_buf_trans_dp(encoder, n_entries); + } +} + +const struct cnl_ddi_buf_trans * +cnl_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return cnl_get_buf_trans_hdmi(encoder, n_entries); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return cnl_get_buf_trans_edp(encoder, n_entries); + return cnl_get_buf_trans_dp(encoder, n_entries); +} + +static const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; +} + +static const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (crtc_state->port_clock > 540000) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; + } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) { + *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3); + return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3; + } else if (IS_DG1(dev_priv)) { + *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr); + return dg1_combo_phy_ddi_translations_dp_rbr_hbr; + } + + return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct icl_mg_phy_ddi_buf_trans * +icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi); + return icl_mg_phy_ddi_translations_hdmi; +} + +static const struct icl_mg_phy_ddi_buf_trans * +icl_get_mg_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (crtc_state->port_clock > 270000) { + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3); + return icl_mg_phy_ddi_translations_hbr2_hbr3; + } else { + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr); + return icl_mg_phy_ddi_translations_rbr_hbr; + } +} + +const struct icl_mg_phy_ddi_buf_trans * +icl_get_mg_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries); + else + return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp); + return ehl_combo_phy_ddi_translations_dp; +} + +static const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; + } + + return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; +} + +static const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dev_priv->vbt.edp.low_vswing) { + if (crtc_state->port_clock > 270000) { + *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2); + return jsl_combo_phy_ddi_translations_edp_hbr2; + } else { + *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr); + return jsl_combo_phy_ddi_translations_edp_hbr; + } + } + + return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (crtc_state->port_clock > 270000) { + if (IS_ROCKETLAKE(dev_priv)) { + *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3); + return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3; + } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { + *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); + return tgl_uy_combo_phy_ddi_translations_dp_hbr2; + } else { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); + return tgl_combo_phy_ddi_translations_dp_hbr2; + } + } else { + if (IS_ROCKETLAKE(dev_priv)) { + *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr); + return rkl_combo_phy_ddi_translations_dp_hbr; + } else { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); + return tgl_combo_phy_ddi_translations_dp_hbr; + } + } +} + +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (crtc_state->port_clock > 540000) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); + return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; + } else if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; + } + + return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct tgl_dkl_phy_ddi_buf_trans * +tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); + return tgl_dkl_phy_hdmi_ddi_trans; +} + +static const struct tgl_dkl_phy_ddi_buf_trans * +tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (crtc_state->port_clock > 270000) { + *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2); + return tgl_dkl_phy_dp_ddi_trans_hbr2; + } else { + *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); + return tgl_dkl_phy_dp_ddi_trans; + } +} + +const struct tgl_dkl_phy_ddi_buf_trans * +tgl_get_dkl_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries); + else + return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); +} + +int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *default_entry) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + int n_entries; + + if (INTEL_GEN(dev_priv) >= 12) { + if (intel_phy_is_combo(dev_priv, phy)) + tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); + else + tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries); + *default_entry = n_entries - 1; + } else if (INTEL_GEN(dev_priv) == 11) { + if (intel_phy_is_combo(dev_priv, phy)) + icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); + else + icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries); + *default_entry = n_entries - 1; + } else if (IS_CANNONLAKE(dev_priv)) { + cnl_get_buf_trans_hdmi(encoder, &n_entries); + *default_entry = n_entries - 1; + } else if (IS_GEN9_LP(dev_priv)) { + bxt_get_buf_trans_hdmi(encoder, &n_entries); + *default_entry = n_entries - 1; + } else if (IS_GEN9_BC(dev_priv)) { + intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); + *default_entry = 8; + } else if (IS_BROADWELL(dev_priv)) { + intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); + *default_entry = 7; + } else if (IS_HASWELL(dev_priv)) { + intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); + *default_entry = 6; + } else { + drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n"); + return 0; + } + + if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0)) + return 0; + + return n_entries; +} diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h new file mode 100644 index 000000000000..f8f0ef87e977 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef _INTEL_DDI_BUF_TRANS_H_ +#define _INTEL_DDI_BUF_TRANS_H_ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_encoder; +struct intel_crtc_state; + +struct ddi_buf_trans { + u32 trans1; /* balance leg enable, de-emph level */ + u32 trans2; /* vref sel, vswing */ + u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */ +}; + +struct bxt_ddi_buf_trans { + u8 margin; /* swing value */ + u8 scale; /* scale value */ + u8 enable; /* scale enable */ + u8 deemphasis; +}; + +struct cnl_ddi_buf_trans { + u8 dw2_swing_sel; + u8 dw7_n_scalar; + u8 dw4_cursor_coeff; + u8 dw4_post_cursor_2; + u8 dw4_post_cursor_1; +}; + +struct icl_mg_phy_ddi_buf_trans { + u32 cri_txdeemph_override_11_6; + u32 cri_txdeemph_override_5_0; + u32 cri_txdeemph_override_17_12; +}; + +struct tgl_dkl_phy_ddi_buf_trans { + u32 dkl_vswing_control; + u32 dkl_preshoot_control; + u32 dkl_de_emphasis_control; +}; + +bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table); + +int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *default_entry); + +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries); +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, + int *n_entries); +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder, + int *n_entries); +const struct ddi_buf_trans * +intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries); + +const struct bxt_ddi_buf_trans * +bxt_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); + +const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); +const struct tgl_dkl_phy_ddi_buf_trans * +tgl_get_dkl_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); +const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); +const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); +const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); +const struct icl_mg_phy_ddi_buf_trans * +icl_get_mg_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); + +const struct cnl_ddi_buf_trans * +cnl_get_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 9843a0f202a0..e3f8d0034fcf 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -24,6 +24,7 @@ * Eric Anholt <eric@anholt.net> */ +#include <acpi/video.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/intel-iommu.h> @@ -43,6 +44,7 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> +#include "display/intel_audio.h" #include "display/intel_crt.h" #include "display/intel_ddi.h" #include "display/intel_display_debugfs.h" @@ -65,7 +67,6 @@ #include "gt/intel_rps.h" #include "i915_drv.h" -#include "i915_trace.h" #include "intel_acpi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" @@ -94,6 +95,8 @@ #include "intel_tc.h" #include "intel_vga.h" #include "i9xx_plane.h" +#include "skl_scaler.h" +#include "skl_universal_plane.h" static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); @@ -112,11 +115,6 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); -static void vlv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config); -static void chv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config); -static void skl_pfit_enable(const struct intel_crtc_state *crtc_state); static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); @@ -569,224 +567,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); } -static void _vlv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); - intel_de_posting_read(dev_priv, DPLL(pipe)); - udelay(150); - - if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) - drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); -} - -static void vlv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); - - /* PLL is protected by panel, make sure we can write it */ - assert_panel_unlocked(dev_priv, pipe); - - if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) - _vlv_enable_pll(crtc, pipe_config); - - intel_de_write(dev_priv, DPLL_MD(pipe), - pipe_config->dpll_hw_state.dpll_md); - intel_de_posting_read(dev_priv, DPLL_MD(pipe)); -} - - -static void _chv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - enum dpio_channel port = vlv_pipe_to_channel(pipe); - u32 tmp; - - vlv_dpio_get(dev_priv); - - /* Enable back the 10bit clock to display controller */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); - tmp |= DPIO_DCLKP_EN; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); - - vlv_dpio_put(dev_priv); - - /* - * Need to wait > 100ns between dclkp clock enable bit and PLL enable. - */ - udelay(1); - - /* Enable PLL */ - intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); - - /* Check PLL is locked */ - if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) - drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); -} - -static void chv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); - - /* PLL is protected by panel, make sure we can write it */ - assert_panel_unlocked(dev_priv, pipe); - - if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) - _chv_enable_pll(crtc, pipe_config); - - if (pipe != PIPE_A) { - /* - * WaPixelRepeatModeFixForC0:chv - * - * DPLLCMD is AWOL. Use chicken bits to propagate - * the value from DPLLBMD to either pipe B or C. - */ - intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); - intel_de_write(dev_priv, DPLL_MD(PIPE_B), - pipe_config->dpll_hw_state.dpll_md); - intel_de_write(dev_priv, CBR4_VLV, 0); - dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; - - /* - * DPLLB VGA mode also seems to cause problems. - * We should always have it disabled. - */ - drm_WARN_ON(&dev_priv->drm, - (intel_de_read(dev_priv, DPLL(PIPE_B)) & - DPLL_VGA_MODE_DIS) == 0); - } else { - intel_de_write(dev_priv, DPLL_MD(pipe), - pipe_config->dpll_hw_state.dpll_md); - intel_de_posting_read(dev_priv, DPLL_MD(pipe)); - } -} - -static bool i9xx_has_pps(struct drm_i915_private *dev_priv) -{ - if (IS_I830(dev_priv)) - return false; - - return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); -} - -static void i9xx_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg = DPLL(crtc->pipe); - u32 dpll = crtc_state->dpll_hw_state.dpll; - int i; - - assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); - - /* PLL is protected by panel, make sure we can write it */ - if (i9xx_has_pps(dev_priv)) - assert_panel_unlocked(dev_priv, crtc->pipe); - - /* - * Apparently we need to have VGA mode enabled prior to changing - * the P1/P2 dividers. Otherwise the DPLL will keep using the old - * dividers, even though the register value does change. - */ - intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); - intel_de_write(dev_priv, reg, dpll); - - /* Wait for the clocks to stabilize. */ - intel_de_posting_read(dev_priv, reg); - udelay(150); - - if (INTEL_GEN(dev_priv) >= 4) { - intel_de_write(dev_priv, DPLL_MD(crtc->pipe), - crtc_state->dpll_hw_state.dpll_md); - } else { - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - intel_de_write(dev_priv, reg, dpll); - } - - /* We do this three times for luck */ - for (i = 0; i < 3; i++) { - intel_de_write(dev_priv, reg, dpll); - intel_de_posting_read(dev_priv, reg); - udelay(150); /* wait for warmup */ - } -} - -static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - /* Don't disable pipe or pipe PLLs if needed */ - if (IS_I830(dev_priv)) - return; - - /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); - - intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); - intel_de_posting_read(dev_priv, DPLL(pipe)); -} - -static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - u32 val; - - /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, (enum transcoder)pipe); - - val = DPLL_INTEGRATED_REF_CLK_VLV | - DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; - if (pipe != PIPE_A) - val |= DPLL_INTEGRATED_CRI_CLK_VLV; - - intel_de_write(dev_priv, DPLL(pipe), val); - intel_de_posting_read(dev_priv, DPLL(pipe)); -} - -static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - enum dpio_channel port = vlv_pipe_to_channel(pipe); - u32 val; - - /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, (enum transcoder)pipe); - - val = DPLL_SSC_REF_CLK_CHV | - DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; - if (pipe != PIPE_A) - val |= DPLL_INTEGRATED_CRI_CLK_VLV; - - intel_de_write(dev_priv, DPLL(pipe), val); - intel_de_posting_read(dev_priv, DPLL(pipe)); - - vlv_dpio_get(dev_priv); - - /* Disable 10bit clock to display controller */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); - val &= ~DPIO_DCLKP_EN; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); - - vlv_dpio_put(dev_priv); -} - void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dig_port, unsigned int expected_mask) @@ -1013,8 +793,6 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) /* FIXME: assert CPU port conditions for SNB+ */ } - trace_intel_pipe_enable(crtc); - reg = PIPECONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); if (val & PIPECONF_ENABLE) { @@ -1054,8 +832,6 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - trace_intel_pipe_disable(crtc); - reg = PIPECONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); if ((val & PIPECONF_ENABLE) == 0) @@ -1082,32 +858,6 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) return IS_GEN(dev_priv, 2) ? 2048 : 4096; } -static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) -{ - if (!is_ccs_modifier(fb->modifier)) - return false; - - return plane >= fb->format->num_planes / 2; -} - -static bool is_gen12_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; -} - -static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) -{ - return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); -} - -static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane) -{ - return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC && - plane == 2; -} - static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) { if (is_ccs_modifier(fb->modifier)) @@ -1116,38 +866,6 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane) return plane == 1; } -static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) -{ - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || - (main_plane && main_plane >= fb->format->num_planes / 2)); - - return fb->format->num_planes / 2 + main_plane; -} - -static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) -{ - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || - ccs_plane < fb->format->num_planes / 2); - - if (is_gen12_ccs_cc_plane(fb, ccs_plane)) - return 0; - - return ccs_plane - fb->format->num_planes / 2; -} - -int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) -{ - struct drm_i915_private *i915 = to_i915(fb->dev); - - if (is_ccs_modifier(fb->modifier)) - return main_to_ccs_plane(fb, main_plane); - else if (INTEL_GEN(i915) < 11 && - intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) - return 1; - else - return 0; -} - bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, u64 modifier) @@ -1163,7 +881,7 @@ static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, color_plane == 1; } -static unsigned int +unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) { struct drm_i915_private *dev_priv = to_i915(fb->dev); @@ -1217,7 +935,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) } } -static unsigned int +unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane) { if (is_gen12_ccs_plane(fb, color_plane)) @@ -1322,8 +1040,8 @@ static bool has_async_flips(struct drm_i915_private *i915) return INTEL_GEN(i915) >= 5; } -static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, - int color_plane) +unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, + int color_plane) { struct drm_i915_private *dev_priv = to_i915(fb->dev); @@ -1590,10 +1308,10 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, * Adjust the tile offset by moving the difference into * the x/y offsets. */ -static u32 intel_plane_adjust_aligned_offset(int *x, int *y, - const struct intel_plane_state *state, - int color_plane, - u32 old_offset, u32 new_offset) +u32 intel_plane_adjust_aligned_offset(int *x, int *y, + const struct intel_plane_state *state, + int color_plane, + u32 old_offset, u32 new_offset) { return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, state->hw.rotation, @@ -1881,18 +1599,9 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) } } -bool is_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; -} - static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) { - return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)], + return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)], 512) * 64; } @@ -2050,7 +1759,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) return stride > max_stride; } -static void +void intel_fb_plane_get_subsampling(int *hsub, int *vsub, const struct drm_framebuffer *fb, int color_plane) @@ -2075,7 +1784,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub, return; } - main_plane = ccs_to_main_plane(fb, color_plane); + main_plane = skl_ccs_to_main_plane(fb, color_plane); *hsub = drm_format_info_block_width(fb->format, color_plane) / drm_format_info_block_width(fb->format, main_plane); @@ -2115,7 +1824,7 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y) ccs_x = (x * hsub) % tile_width; ccs_y = (y * vsub) % tile_height; - main_plane = ccs_to_main_plane(fb, ccs_plane); + main_plane = skl_ccs_to_main_plane(fb, ccs_plane); main_x = intel_fb->normal[main_plane].x % tile_width; main_y = intel_fb->normal[main_plane].y % tile_height; @@ -2141,7 +1850,7 @@ static void intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane) { int main_plane = is_ccs_plane(fb, color_plane) ? - ccs_to_main_plane(fb, color_plane) : 0; + skl_ccs_to_main_plane(fb, color_plane) : 0; int main_hsub, main_vsub; int hsub, vsub; @@ -2495,106 +2204,6 @@ intel_plane_compute_gtt(struct intel_plane_state *plane_state) return intel_plane_check_stride(plane_state); } -static int i9xx_format_to_fourcc(int format) -{ - switch (format) { - case DISPPLANE_8BPP: - return DRM_FORMAT_C8; - case DISPPLANE_BGRA555: - return DRM_FORMAT_ARGB1555; - case DISPPLANE_BGRX555: - return DRM_FORMAT_XRGB1555; - case DISPPLANE_BGRX565: - return DRM_FORMAT_RGB565; - default: - case DISPPLANE_BGRX888: - return DRM_FORMAT_XRGB8888; - case DISPPLANE_RGBX888: - return DRM_FORMAT_XBGR8888; - case DISPPLANE_BGRA888: - return DRM_FORMAT_ARGB8888; - case DISPPLANE_RGBA888: - return DRM_FORMAT_ABGR8888; - case DISPPLANE_BGRX101010: - return DRM_FORMAT_XRGB2101010; - case DISPPLANE_RGBX101010: - return DRM_FORMAT_XBGR2101010; - case DISPPLANE_BGRA101010: - return DRM_FORMAT_ARGB2101010; - case DISPPLANE_RGBA101010: - return DRM_FORMAT_ABGR2101010; - case DISPPLANE_RGBX161616: - return DRM_FORMAT_XBGR16161616F; - } -} - -int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) -{ - switch (format) { - case PLANE_CTL_FORMAT_RGB_565: - return DRM_FORMAT_RGB565; - case PLANE_CTL_FORMAT_NV12: - return DRM_FORMAT_NV12; - case PLANE_CTL_FORMAT_XYUV: - return DRM_FORMAT_XYUV8888; - case PLANE_CTL_FORMAT_P010: - return DRM_FORMAT_P010; - case PLANE_CTL_FORMAT_P012: - return DRM_FORMAT_P012; - case PLANE_CTL_FORMAT_P016: - return DRM_FORMAT_P016; - case PLANE_CTL_FORMAT_Y210: - return DRM_FORMAT_Y210; - case PLANE_CTL_FORMAT_Y212: - return DRM_FORMAT_Y212; - case PLANE_CTL_FORMAT_Y216: - return DRM_FORMAT_Y216; - case PLANE_CTL_FORMAT_Y410: - return DRM_FORMAT_XVYU2101010; - case PLANE_CTL_FORMAT_Y412: - return DRM_FORMAT_XVYU12_16161616; - case PLANE_CTL_FORMAT_Y416: - return DRM_FORMAT_XVYU16161616; - default: - case PLANE_CTL_FORMAT_XRGB_8888: - if (rgb_order) { - if (alpha) - return DRM_FORMAT_ABGR8888; - else - return DRM_FORMAT_XBGR8888; - } else { - if (alpha) - return DRM_FORMAT_ARGB8888; - else - return DRM_FORMAT_XRGB8888; - } - case PLANE_CTL_FORMAT_XRGB_2101010: - if (rgb_order) { - if (alpha) - return DRM_FORMAT_ABGR2101010; - else - return DRM_FORMAT_XBGR2101010; - } else { - if (alpha) - return DRM_FORMAT_ARGB2101010; - else - return DRM_FORMAT_XRGB2101010; - } - case PLANE_CTL_FORMAT_XRGB_16161616F: - if (rgb_order) { - if (alpha) - return DRM_FORMAT_ABGR16161616F; - else - return DRM_FORMAT_XBGR16161616F; - } else { - if (alpha) - return DRM_FORMAT_ARGB16161616F; - else - return DRM_FORMAT_XRGB16161616F; - } - } -} - static struct i915_vma * initial_plane_vma(struct drm_i915_private *i915, struct intel_initial_plane_config *plane_config) @@ -2789,6 +2398,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); intel_disable_plane(plane, crtc_state); + intel_wait_for_vblank(dev_priv, crtc->pipe); } static void @@ -2899,52 +2509,6 @@ valid_fb: &to_intel_frontbuffer(fb)->bits); } - -static bool -skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, - int main_x, int main_y, u32 main_offset, - int ccs_plane) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - int aux_x = plane_state->color_plane[ccs_plane].x; - int aux_y = plane_state->color_plane[ccs_plane].y; - u32 aux_offset = plane_state->color_plane[ccs_plane].offset; - u32 alignment = intel_surf_alignment(fb, ccs_plane); - int hsub; - int vsub; - - intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); - while (aux_offset >= main_offset && aux_y <= main_y) { - int x, y; - - if (aux_x == main_x && aux_y == main_y) - break; - - if (aux_offset == 0) - break; - - x = aux_x / hsub; - y = aux_y / vsub; - aux_offset = intel_plane_adjust_aligned_offset(&x, &y, - plane_state, - ccs_plane, - aux_offset, - aux_offset - - alignment); - aux_x = x * hsub + aux_x % hsub; - aux_y = y * vsub + aux_y % vsub; - } - - if (aux_x != main_x || aux_y != main_y) - return false; - - plane_state->color_plane[ccs_plane].offset = aux_offset; - plane_state->color_plane[ccs_plane].x = aux_x; - plane_state->color_plane[ccs_plane].y = aux_y; - - return true; -} - unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) { @@ -2956,643 +2520,6 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) return y; } -static int intel_plane_min_width(struct intel_plane *plane, - const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - if (plane->min_width) - return plane->min_width(fb, color_plane, rotation); - else - return 1; -} - -static int intel_plane_max_width(struct intel_plane *plane, - const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - if (plane->max_width) - return plane->max_width(fb, color_plane, rotation); - else - return INT_MAX; -} - -static int intel_plane_max_height(struct intel_plane *plane, - const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - if (plane->max_height) - return plane->max_height(fb, color_plane, rotation); - else - return INT_MAX; -} - -int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, - int *x, int *y, u32 *offset) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - const int aux_plane = intel_main_to_aux_plane(fb, 0); - const u32 aux_offset = plane_state->color_plane[aux_plane].offset; - const u32 alignment = intel_surf_alignment(fb, 0); - const int w = drm_rect_width(&plane_state->uapi.src) >> 16; - - intel_add_fb_offsets(x, y, plane_state, 0); - *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0); - if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) - return -EINVAL; - - /* - * AUX surface offset is specified as the distance from the - * main surface offset, and it must be non-negative. Make - * sure that is what we will get. - */ - if (aux_plane && *offset > aux_offset) - *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, - *offset, - aux_offset & ~(alignment - 1)); - - /* - * When using an X-tiled surface, the plane blows up - * if the x offset + width exceed the stride. - * - * TODO: linear and Y-tiled seem fine, Yf untested, - */ - if (fb->modifier == I915_FORMAT_MOD_X_TILED) { - int cpp = fb->format->cpp[0]; - - while ((*x + w) * cpp > plane_state->color_plane[0].stride) { - if (*offset == 0) { - drm_dbg_kms(&dev_priv->drm, - "Unable to find suitable display surface offset due to X-tiling\n"); - return -EINVAL; - } - - *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, - *offset, - *offset - alignment); - } - } - - return 0; -} - -static int skl_check_main_surface(struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - const unsigned int rotation = plane_state->hw.rotation; - int x = plane_state->uapi.src.x1 >> 16; - int y = plane_state->uapi.src.y1 >> 16; - const int w = drm_rect_width(&plane_state->uapi.src) >> 16; - const int h = drm_rect_height(&plane_state->uapi.src) >> 16; - const int min_width = intel_plane_min_width(plane, fb, 0, rotation); - const int max_width = intel_plane_max_width(plane, fb, 0, rotation); - const int max_height = intel_plane_max_height(plane, fb, 0, rotation); - const int aux_plane = intel_main_to_aux_plane(fb, 0); - const u32 alignment = intel_surf_alignment(fb, 0); - u32 offset; - int ret; - - if (w > max_width || w < min_width || h > max_height) { - drm_dbg_kms(&dev_priv->drm, - "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", - w, h, min_width, max_width, max_height); - return -EINVAL; - } - - ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); - if (ret) - return ret; - - /* - * CCS AUX surface doesn't have its own x/y offsets, we must make sure - * they match with the main surface x/y offsets. - */ - if (is_ccs_modifier(fb->modifier)) { - while (!skl_check_main_ccs_coordinates(plane_state, x, y, - offset, aux_plane)) { - if (offset == 0) - break; - - offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, - offset, offset - alignment); - } - - if (x != plane_state->color_plane[aux_plane].x || - y != plane_state->color_plane[aux_plane].y) { - drm_dbg_kms(&dev_priv->drm, - "Unable to find suitable display surface offset due to CCS\n"); - return -EINVAL; - } - } - - drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191); - - plane_state->color_plane[0].offset = offset; - plane_state->color_plane[0].x = x; - plane_state->color_plane[0].y = y; - - /* - * Put the final coordinates back so that the src - * coordinate checks will see the right values. - */ - drm_rect_translate_to(&plane_state->uapi.src, - x << 16, y << 16); - - return 0; -} - -static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *i915 = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int rotation = plane_state->hw.rotation; - int uv_plane = 1; - int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation); - int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation); - int x = plane_state->uapi.src.x1 >> 17; - int y = plane_state->uapi.src.y1 >> 17; - int w = drm_rect_width(&plane_state->uapi.src) >> 17; - int h = drm_rect_height(&plane_state->uapi.src) >> 17; - u32 offset; - - /* FIXME not quite sure how/if these apply to the chroma plane */ - if (w > max_width || h > max_height) { - drm_dbg_kms(&i915->drm, - "CbCr source size %dx%d too big (limit %dx%d)\n", - w, h, max_width, max_height); - return -EINVAL; - } - - intel_add_fb_offsets(&x, &y, plane_state, uv_plane); - offset = intel_plane_compute_aligned_offset(&x, &y, - plane_state, uv_plane); - - if (is_ccs_modifier(fb->modifier)) { - int ccs_plane = main_to_ccs_plane(fb, uv_plane); - u32 aux_offset = plane_state->color_plane[ccs_plane].offset; - u32 alignment = intel_surf_alignment(fb, uv_plane); - - if (offset > aux_offset) - offset = intel_plane_adjust_aligned_offset(&x, &y, - plane_state, - uv_plane, - offset, - aux_offset & ~(alignment - 1)); - - while (!skl_check_main_ccs_coordinates(plane_state, x, y, - offset, ccs_plane)) { - if (offset == 0) - break; - - offset = intel_plane_adjust_aligned_offset(&x, &y, - plane_state, - uv_plane, - offset, offset - alignment); - } - - if (x != plane_state->color_plane[ccs_plane].x || - y != plane_state->color_plane[ccs_plane].y) { - drm_dbg_kms(&i915->drm, - "Unable to find suitable display surface offset due to CCS\n"); - return -EINVAL; - } - } - - drm_WARN_ON(&i915->drm, x > 8191 || y > 8191); - - plane_state->color_plane[uv_plane].offset = offset; - plane_state->color_plane[uv_plane].x = x; - plane_state->color_plane[uv_plane].y = y; - - return 0; -} - -static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - int src_x = plane_state->uapi.src.x1 >> 16; - int src_y = plane_state->uapi.src.y1 >> 16; - u32 offset; - int ccs_plane; - - for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { - int main_hsub, main_vsub; - int hsub, vsub; - int x, y; - - if (!is_ccs_plane(fb, ccs_plane) || - is_gen12_ccs_cc_plane(fb, ccs_plane)) - continue; - - intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, - ccs_to_main_plane(fb, ccs_plane)); - intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); - - hsub *= main_hsub; - vsub *= main_vsub; - x = src_x / hsub; - y = src_y / vsub; - - intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); - - offset = intel_plane_compute_aligned_offset(&x, &y, - plane_state, - ccs_plane); - - plane_state->color_plane[ccs_plane].offset = offset; - plane_state->color_plane[ccs_plane].x = (x * hsub + - src_x % hsub) / - main_hsub; - plane_state->color_plane[ccs_plane].y = (y * vsub + - src_y % vsub) / - main_vsub; - } - - return 0; -} - -int skl_check_plane_surface(struct intel_plane_state *plane_state) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - int ret, i; - - ret = intel_plane_compute_gtt(plane_state); - if (ret) - return ret; - - if (!plane_state->uapi.visible) - return 0; - - /* - * Handle the AUX surface first since the main surface setup depends on - * it. - */ - if (is_ccs_modifier(fb->modifier)) { - ret = skl_check_ccs_aux_surface(plane_state); - if (ret) - return ret; - } - - if (intel_format_info_is_yuv_semiplanar(fb->format, - fb->modifier)) { - ret = skl_check_nv12_aux_surface(plane_state); - if (ret) - return ret; - } - - for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) { - plane_state->color_plane[i].offset = 0; - plane_state->color_plane[i].x = 0; - plane_state->color_plane[i].y = 0; - } - - ret = skl_check_main_surface(plane_state); - if (ret) - return ret; - - return 0; -} - -static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); - intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); - intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -/* - * This function detaches (aka. unbinds) unused scalers in hardware - */ -static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - const struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - int i; - - /* loop through and disable scalers that aren't in use */ - for (i = 0; i < intel_crtc->num_scalers; i++) { - if (!scaler_state->scalers[i].in_use) - skl_detach_scaler(intel_crtc, i); - } -} - -static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, - int color_plane, unsigned int rotation) -{ - /* - * The stride is either expressed as a multiple of 64 bytes chunks for - * linear buffers or in number of tiles for tiled buffers. - */ - if (is_surface_linear(fb, color_plane)) - return 64; - else if (drm_rotation_90_or_270(rotation)) - return intel_tile_height(fb, color_plane); - else - return intel_tile_width_bytes(fb, color_plane); -} - -u32 skl_plane_stride(const struct intel_plane_state *plane_state, - int color_plane) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int rotation = plane_state->hw.rotation; - u32 stride = plane_state->color_plane[color_plane].stride; - - if (color_plane >= fb->format->num_planes) - return 0; - - return stride / skl_plane_stride_mult(fb, color_plane, rotation); -} - -static u32 skl_plane_ctl_format(u32 pixel_format) -{ - switch (pixel_format) { - case DRM_FORMAT_C8: - return PLANE_CTL_FORMAT_INDEXED; - case DRM_FORMAT_RGB565: - return PLANE_CTL_FORMAT_RGB_565; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - return PLANE_CTL_FORMAT_XRGB_8888; - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ABGR2101010: - return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_ARGB2101010: - return PLANE_CTL_FORMAT_XRGB_2101010; - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: - return PLANE_CTL_FORMAT_XRGB_16161616F; - case DRM_FORMAT_XYUV8888: - return PLANE_CTL_FORMAT_XYUV; - case DRM_FORMAT_YUYV: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; - case DRM_FORMAT_YVYU: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; - case DRM_FORMAT_UYVY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; - case DRM_FORMAT_VYUY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; - case DRM_FORMAT_NV12: - return PLANE_CTL_FORMAT_NV12; - case DRM_FORMAT_P010: - return PLANE_CTL_FORMAT_P010; - case DRM_FORMAT_P012: - return PLANE_CTL_FORMAT_P012; - case DRM_FORMAT_P016: - return PLANE_CTL_FORMAT_P016; - case DRM_FORMAT_Y210: - return PLANE_CTL_FORMAT_Y210; - case DRM_FORMAT_Y212: - return PLANE_CTL_FORMAT_Y212; - case DRM_FORMAT_Y216: - return PLANE_CTL_FORMAT_Y216; - case DRM_FORMAT_XVYU2101010: - return PLANE_CTL_FORMAT_Y410; - case DRM_FORMAT_XVYU12_16161616: - return PLANE_CTL_FORMAT_Y412; - case DRM_FORMAT_XVYU16161616: - return PLANE_CTL_FORMAT_Y416; - default: - MISSING_CASE(pixel_format); - } - - return 0; -} - -static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) -{ - if (!plane_state->hw.fb->format->has_alpha) - return PLANE_CTL_ALPHA_DISABLE; - - switch (plane_state->hw.pixel_blend_mode) { - case DRM_MODE_BLEND_PIXEL_NONE: - return PLANE_CTL_ALPHA_DISABLE; - case DRM_MODE_BLEND_PREMULTI: - return PLANE_CTL_ALPHA_SW_PREMULTIPLY; - case DRM_MODE_BLEND_COVERAGE: - return PLANE_CTL_ALPHA_HW_PREMULTIPLY; - default: - MISSING_CASE(plane_state->hw.pixel_blend_mode); - return PLANE_CTL_ALPHA_DISABLE; - } -} - -static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) -{ - if (!plane_state->hw.fb->format->has_alpha) - return PLANE_COLOR_ALPHA_DISABLE; - - switch (plane_state->hw.pixel_blend_mode) { - case DRM_MODE_BLEND_PIXEL_NONE: - return PLANE_COLOR_ALPHA_DISABLE; - case DRM_MODE_BLEND_PREMULTI: - return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; - case DRM_MODE_BLEND_COVERAGE: - return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; - default: - MISSING_CASE(plane_state->hw.pixel_blend_mode); - return PLANE_COLOR_ALPHA_DISABLE; - } -} - -static u32 skl_plane_ctl_tiling(u64 fb_modifier) -{ - switch (fb_modifier) { - case DRM_FORMAT_MOD_LINEAR: - break; - case I915_FORMAT_MOD_X_TILED: - return PLANE_CTL_TILED_X; - case I915_FORMAT_MOD_Y_TILED: - return PLANE_CTL_TILED_Y; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - return PLANE_CTL_TILED_Y | - PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | - PLANE_CTL_CLEAR_COLOR_DISABLE; - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; - case I915_FORMAT_MOD_Yf_TILED: - return PLANE_CTL_TILED_YF; - case I915_FORMAT_MOD_Yf_TILED_CCS: - return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; - default: - MISSING_CASE(fb_modifier); - } - - return 0; -} - -static u32 skl_plane_ctl_rotate(unsigned int rotate) -{ - switch (rotate) { - case DRM_MODE_ROTATE_0: - break; - /* - * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr - * while i915 HW rotation is clockwise, thats why this swapping. - */ - case DRM_MODE_ROTATE_90: - return PLANE_CTL_ROTATE_270; - case DRM_MODE_ROTATE_180: - return PLANE_CTL_ROTATE_180; - case DRM_MODE_ROTATE_270: - return PLANE_CTL_ROTATE_90; - default: - MISSING_CASE(rotate); - } - - return 0; -} - -static u32 cnl_plane_ctl_flip(unsigned int reflect) -{ - switch (reflect) { - case 0: - break; - case DRM_MODE_REFLECT_X: - return PLANE_CTL_FLIP_HORIZONTAL; - case DRM_MODE_REFLECT_Y: - default: - MISSING_CASE(reflect); - } - - return 0; -} - -u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 plane_ctl = 0; - - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - return plane_ctl; - - if (crtc_state->gamma_enable) - plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; - - if (crtc_state->csc_enable) - plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; - - return plane_ctl; -} - -u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int rotation = plane_state->hw.rotation; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 plane_ctl; - - plane_ctl = PLANE_CTL_ENABLE; - - if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { - plane_ctl |= skl_plane_ctl_alpha(plane_state); - plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; - - if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) - plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; - - if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; - } - - plane_ctl |= skl_plane_ctl_format(fb->format->format); - plane_ctl |= skl_plane_ctl_tiling(fb->modifier); - plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); - - if (INTEL_GEN(dev_priv) >= 10) - plane_ctl |= cnl_plane_ctl_flip(rotation & - DRM_MODE_REFLECT_MASK); - - if (key->flags & I915_SET_COLORKEY_DESTINATION) - plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; - else if (key->flags & I915_SET_COLORKEY_SOURCE) - plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; - - return plane_ctl; -} - -u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 plane_color_ctl = 0; - - if (INTEL_GEN(dev_priv) >= 11) - return plane_color_ctl; - - if (crtc_state->gamma_enable) - plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; - - if (crtc_state->csc_enable) - plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; - - return plane_color_ctl; -} - -u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - u32 plane_color_ctl = 0; - - plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; - plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); - - if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { - switch (plane_state->hw.color_encoding) { - case DRM_COLOR_YCBCR_BT709: - plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; - break; - case DRM_COLOR_YCBCR_BT2020: - plane_color_ctl |= - PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020; - break; - default: - plane_color_ctl |= - PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601; - } - if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; - } else if (fb->format->is_yuv) { - plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; - if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; - } - - return plane_color_ctl; -} - static int __intel_display_resume(struct drm_device *dev, struct drm_atomic_state *state, @@ -4157,461 +3084,6 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv, } } -/* - * The hardware phase 0.0 refers to the center of the pixel. - * We want to start from the top/left edge which is phase - * -0.5. That matches how the hardware calculates the scaling - * factors (from top-left of the first pixel to bottom-right - * of the last pixel, as opposed to the pixel centers). - * - * For 4:2:0 subsampled chroma planes we obviously have to - * adjust that so that the chroma sample position lands in - * the right spot. - * - * Note that for packed YCbCr 4:2:2 formats there is no way to - * control chroma siting. The hardware simply replicates the - * chroma samples for both of the luma samples, and thus we don't - * actually get the expected MPEG2 chroma siting convention :( - * The same behaviour is observed on pre-SKL platforms as well. - * - * Theory behind the formula (note that we ignore sub-pixel - * source coordinates): - * s = source sample position - * d = destination sample position - * - * Downscaling 4:1: - * -0.5 - * | 0.0 - * | | 1.5 (initial phase) - * | | | - * v v v - * | s | s | s | s | - * | d | - * - * Upscaling 1:4: - * -0.5 - * | -0.375 (initial phase) - * | | 0.0 - * | | | - * v v v - * | s | - * | d | d | d | d | - */ -u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) -{ - int phase = -0x8000; - u16 trip = 0; - - if (chroma_cosited) - phase += (sub - 1) * 0x8000 / sub; - - phase += scale / (2 * sub); - - /* - * Hardware initial phase limited to [-0.5:1.5]. - * Since the max hardware scale factor is 3.0, we - * should never actually excdeed 1.0 here. - */ - WARN_ON(phase < -0x8000 || phase > 0x18000); - - if (phase < 0) - phase = 0x10000 + phase; - else - trip = PS_PHASE_TRIP; - - return ((phase >> 2) & PS_PHASE_MASK) | trip; -} - -#define SKL_MIN_SRC_W 8 -#define SKL_MAX_SRC_W 4096 -#define SKL_MIN_SRC_H 8 -#define SKL_MAX_SRC_H 4096 -#define SKL_MIN_DST_W 8 -#define SKL_MAX_DST_W 4096 -#define SKL_MIN_DST_H 8 -#define SKL_MAX_DST_H 4096 -#define ICL_MAX_SRC_W 5120 -#define ICL_MAX_SRC_H 4096 -#define ICL_MAX_DST_W 5120 -#define ICL_MAX_DST_H 4096 -#define SKL_MIN_YUV_420_SRC_W 16 -#define SKL_MIN_YUV_420_SRC_H 16 - -static int -skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, - unsigned int scaler_user, int *scaler_id, - int src_w, int src_h, int dst_w, int dst_h, - const struct drm_format_info *format, - u64 modifier, bool need_scaler) -{ - struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - struct intel_crtc *intel_crtc = - to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - if (src_w != dst_w || src_h != dst_h) - need_scaler = true; - - /* - * Scaling/fitting not supported in IF-ID mode in GEN9+ - * TODO: Interlace fetch mode doesn't support YUV420 planar formats. - * Once NV12 is enabled, handle it here while allocating scaler - * for NV12. - */ - if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && - need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { - drm_dbg_kms(&dev_priv->drm, - "Pipe/Plane scaling not supported with IF-ID mode\n"); - return -EINVAL; - } - - /* - * if plane is being disabled or scaler is no more required or force detach - * - free scaler binded to this plane/crtc - * - in order to do this, update crtc->scaler_usage - * - * Here scaler state in crtc_state is set free so that - * scaler can be assigned to other user. Actual register - * update to free the scaler is done in plane/panel-fit programming. - * For this purpose crtc/plane_state->scaler_id isn't reset here. - */ - if (force_detach || !need_scaler) { - if (*scaler_id >= 0) { - scaler_state->scaler_users &= ~(1 << scaler_user); - scaler_state->scalers[*scaler_id].in_use = 0; - - drm_dbg_kms(&dev_priv->drm, - "scaler_user index %u.%u: " - "Staged freeing scaler id %d scaler_users = 0x%x\n", - intel_crtc->pipe, scaler_user, *scaler_id, - scaler_state->scaler_users); - *scaler_id = -1; - } - return 0; - } - - if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && - (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { - drm_dbg_kms(&dev_priv->drm, - "Planar YUV: src dimensions not met\n"); - return -EINVAL; - } - - /* range checks */ - if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || - dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || - (INTEL_GEN(dev_priv) >= 11 && - (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || - dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || - (INTEL_GEN(dev_priv) < 11 && - (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || - dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { - drm_dbg_kms(&dev_priv->drm, - "scaler_user index %u.%u: src %ux%u dst %ux%u " - "size is out of scaler range\n", - intel_crtc->pipe, scaler_user, src_w, src_h, - dst_w, dst_h); - return -EINVAL; - } - - /* mark this plane as a scaler user in crtc_state */ - scaler_state->scaler_users |= (1 << scaler_user); - drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " - "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", - intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, - scaler_state->scaler_users); - - return 0; -} - -static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) -{ - const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - int width, height; - - if (crtc_state->pch_pfit.enabled) { - width = drm_rect_width(&crtc_state->pch_pfit.dst); - height = drm_rect_height(&crtc_state->pch_pfit.dst); - } else { - width = pipe_mode->crtc_hdisplay; - height = pipe_mode->crtc_vdisplay; - } - return skl_update_scaler(crtc_state, !crtc_state->hw.active, - SKL_CRTC_INDEX, - &crtc_state->scaler_state.scaler_id, - crtc_state->pipe_src_w, crtc_state->pipe_src_h, - width, height, NULL, 0, - crtc_state->pch_pfit.enabled); -} - -/** - * skl_update_scaler_plane - Stages update to scaler state for a given plane. - * @crtc_state: crtc's scaler state - * @plane_state: atomic plane state to update - * - * Return - * 0 - scaler_usage updated successfully - * error - requested scaling cannot be supported or other error condition - */ -static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) -{ - struct intel_plane *intel_plane = - to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); - struct drm_framebuffer *fb = plane_state->hw.fb; - int ret; - bool force_detach = !fb || !plane_state->uapi.visible; - bool need_scaler = false; - - /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ - if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && - fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) - need_scaler = true; - - ret = skl_update_scaler(crtc_state, force_detach, - drm_plane_index(&intel_plane->base), - &plane_state->scaler_id, - drm_rect_width(&plane_state->uapi.src) >> 16, - drm_rect_height(&plane_state->uapi.src) >> 16, - drm_rect_width(&plane_state->uapi.dst), - drm_rect_height(&plane_state->uapi.dst), - fb ? fb->format : NULL, - fb ? fb->modifier : 0, - need_scaler); - - if (ret || plane_state->scaler_id < 0) - return ret; - - /* check colorkey */ - if (plane_state->ckey.flags) { - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] scaling with color key not allowed", - intel_plane->base.base.id, - intel_plane->base.name); - return -EINVAL; - } - - /* Check src format */ - switch (fb->format->format) { - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_XYUV8888: - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - case DRM_FORMAT_Y210: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - break; - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: - if (INTEL_GEN(dev_priv) >= 11) - break; - fallthrough; - default: - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, intel_plane->base.name, - fb->base.id, fb->format->format); - return -EINVAL; - } - - return 0; -} - -void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); - int i; - - for (i = 0; i < crtc->num_scalers; i++) - skl_detach_scaler(crtc, i); -} - -static int cnl_coef_tap(int i) -{ - return i % 7; -} - -static u16 cnl_nearest_filter_coef(int t) -{ - return t == 3 ? 0x0800 : 0x3000; -} - -/* - * Theory behind setting nearest-neighbor integer scaling: - * - * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. - * The letter represents the filter tap (D is the center tap) and the number - * represents the coefficient set for a phase (0-16). - * - * +------------+------------------------+------------------------+ - * |Index value | Data value coeffient 1 | Data value coeffient 2 | - * +------------+------------------------+------------------------+ - * | 00h | B0 | A0 | - * +------------+------------------------+------------------------+ - * | 01h | D0 | C0 | - * +------------+------------------------+------------------------+ - * | 02h | F0 | E0 | - * +------------+------------------------+------------------------+ - * | 03h | A1 | G0 | - * +------------+------------------------+------------------------+ - * | 04h | C1 | B1 | - * +------------+------------------------+------------------------+ - * | ... | ... | ... | - * +------------+------------------------+------------------------+ - * | 38h | B16 | A16 | - * +------------+------------------------+------------------------+ - * | 39h | D16 | C16 | - * +------------+------------------------+------------------------+ - * | 3Ah | F16 | C16 | - * +------------+------------------------+------------------------+ - * | 3Bh | Reserved | G16 | - * +------------+------------------------+------------------------+ - * - * To enable nearest-neighbor scaling: program scaler coefficents with - * the center tap (Dxx) values set to 1 and all other values set to 0 as per - * SCALER_COEFFICIENT_FORMAT - * - */ - -static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv, - enum pipe pipe, int id, int set) -{ - int i; - - intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), - PS_COEE_INDEX_AUTO_INC); - - for (i = 0; i < 17 * 7; i += 2) { - u32 tmp; - int t; - - t = cnl_coef_tap(i); - tmp = cnl_nearest_filter_coef(t); - - t = cnl_coef_tap(i + 1); - tmp |= cnl_nearest_filter_coef(t) << 16; - - intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set), - tmp); - } - - intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0); -} - -u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set) -{ - if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) { - return (PS_FILTER_PROGRAMMED | - PS_Y_VERT_FILTER_SELECT(set) | - PS_Y_HORZ_FILTER_SELECT(set) | - PS_UV_VERT_FILTER_SELECT(set) | - PS_UV_HORZ_FILTER_SELECT(set)); - } - - return PS_FILTER_MEDIUM; -} - -void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe, - int id, int set, enum drm_scaling_filter filter) -{ - switch (filter) { - case DRM_SCALING_FILTER_DEFAULT: - break; - case DRM_SCALING_FILTER_NEAREST_NEIGHBOR: - cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set); - break; - default: - MISSING_CASE(filter); - } -} - -static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - struct drm_rect src = { - .x2 = crtc_state->pipe_src_w << 16, - .y2 = crtc_state->pipe_src_h << 16, - }; - const struct drm_rect *dst = &crtc_state->pch_pfit.dst; - u16 uv_rgb_hphase, uv_rgb_vphase; - enum pipe pipe = crtc->pipe; - int width = drm_rect_width(dst); - int height = drm_rect_height(dst); - int x = dst->x1; - int y = dst->y1; - int hscale, vscale; - unsigned long irqflags; - int id; - u32 ps_ctrl; - - if (!crtc_state->pch_pfit.enabled) - return; - - if (drm_WARN_ON(&dev_priv->drm, - crtc_state->scaler_state.scaler_id < 0)) - return; - - hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); - vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); - - uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); - uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); - - id = scaler_state->scaler_id; - - ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); - ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode; - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - skl_scaler_setup_filter(dev_priv, pipe, id, 0, - crtc_state->hw.scaling_filter); - - intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl); - - intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), - PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); - intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), - PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); - intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), - x << 16 | y); - intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), - width << 16 | height); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -5546,10 +4018,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (INTEL_GEN(dev_priv) >= 11) icl_pipe_mbus_enable(crtc); - if (new_crtc_state->bigjoiner_slave) { - trace_intel_pipe_enable(crtc); + if (new_crtc_state->bigjoiner_slave) intel_crtc_vblank_on(new_crtc_state); - } intel_encoders_enable(state, crtc); @@ -5680,6 +4150,8 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) return false; + else if (IS_ALDERLAKE_S(dev_priv)) + return phy <= PHY_E; else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return phy <= PHY_D; else if (IS_JSL_EHL(dev_priv)) @@ -5692,11 +4164,9 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) - return false; - else if (INTEL_GEN(dev_priv) >= 12) + if (IS_TIGERLAKE(dev_priv)) return phy >= PHY_D && phy <= PHY_I; - else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv)) + else if (IS_ICELAKE(dev_priv)) return phy >= PHY_C && phy <= PHY_F; else return false; @@ -5704,7 +4174,9 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) { - if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) + if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) + return PHY_B + port - PORT_TC1; + else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) return PHY_C + port - PORT_TC1; else if (IS_JSL_EHL(i915) && port == PORT_D) return PHY_A; @@ -6380,8 +4852,30 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state pipe_mode->crtc_clock /= 2; } - intel_mode_from_crtc_timings(pipe_mode, pipe_mode); - intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode); + if (crtc_state->splitter.enable) { + int n = crtc_state->splitter.link_count; + int overlap = crtc_state->splitter.pixel_overlap; + + /* + * eDP MSO uses segment timings from EDID for transcoder + * timings, but full mode for everything else. + * + * h_full = (h_segment - pixel_overlap) * link_count + */ + pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n; + pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n; + pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n; + pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n; + pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n; + pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n; + pipe_mode->crtc_clock *= n; + + intel_mode_from_crtc_timings(pipe_mode, pipe_mode); + intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); + } else { + intel_mode_from_crtc_timings(pipe_mode, pipe_mode); + intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode); + } intel_crtc_compute_pixel_rate(crtc_state); @@ -6419,6 +4913,19 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, pipe_config->pipe_src_w /= 2; } + if (pipe_config->splitter.enable) { + int n = pipe_config->splitter.link_count; + int overlap = pipe_config->splitter.pixel_overlap; + + pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n; + pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n; + pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n; + pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n; + pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n; + pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n; + pipe_mode->crtc_clock *= n; + } + intel_mode_from_crtc_timings(pipe_mode, pipe_mode); if (INTEL_GEN(dev_priv) < 4) { @@ -6554,35 +5061,6 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) } } -static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe - pipe) -{ - u32 reg_val; - - /* - * PLLB opamp always calibrates to max value of 0x3f, force enable it - * and set it to a reasonable value instead. - */ - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); - reg_val &= 0xffffff00; - reg_val |= 0x00000030; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); - - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); - reg_val &= 0x00ffffff; - reg_val |= 0x8c000000; - vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); - - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); - reg_val &= 0xffffff00; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); - - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); - reg_val &= 0x00ffffff; - reg_val |= 0xb0000000; - vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); -} - static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, const struct intel_link_m_n *m_n) { @@ -6678,267 +5156,6 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); } -static void vlv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - u32 mdiv; - u32 bestn, bestm1, bestm2, bestp1, bestp2; - u32 coreclk, reg_val; - - /* Enable Refclk */ - intel_de_write(dev_priv, DPLL(pipe), - pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); - - /* No need to actually set up the DPLL with DSI */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) - return; - - vlv_dpio_get(dev_priv); - - bestn = pipe_config->dpll.n; - bestm1 = pipe_config->dpll.m1; - bestm2 = pipe_config->dpll.m2; - bestp1 = pipe_config->dpll.p1; - bestp2 = pipe_config->dpll.p2; - - /* See eDP HDMI DPIO driver vbios notes doc */ - - /* PLL B needs special handling */ - if (pipe == PIPE_B) - vlv_pllb_recal_opamp(dev_priv, pipe); - - /* Set up Tx target for periodic Rcomp update */ - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); - - /* Disable target IRef on PLL */ - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); - reg_val &= 0x00ffffff; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); - - /* Disable fast lock */ - vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); - - /* Set idtafcrecal before PLL is enabled */ - mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); - mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); - mdiv |= ((bestn << DPIO_N_SHIFT)); - mdiv |= (1 << DPIO_K_SHIFT); - - /* - * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, - * but we don't support that). - * Note: don't use the DAC post divider as it seems unstable. - */ - mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); - - mdiv |= DPIO_ENABLE_CALIBRATION; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); - - /* Set HBR and RBR LPF coefficients */ - if (pipe_config->port_clock == 162000 || - intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || - intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), - 0x009f0003); - else - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), - 0x00d0000f); - - if (intel_crtc_has_dp_encoder(pipe_config)) { - /* Use SSC source */ - if (pipe == PIPE_A) - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), - 0x0df40000); - else - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), - 0x0df70000); - } else { /* HDMI or VGA */ - /* Use bend source */ - if (pipe == PIPE_A) - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), - 0x0df70000); - else - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), - 0x0df40000); - } - - coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); - coreclk = (coreclk & 0x0000ff00) | 0x01c00000; - if (intel_crtc_has_dp_encoder(pipe_config)) - coreclk |= 0x01000000; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); - - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); - - vlv_dpio_put(dev_priv); -} - -static void chv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - enum dpio_channel port = vlv_pipe_to_channel(pipe); - u32 loopfilter, tribuf_calcntr; - u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; - u32 dpio_val; - int vco; - - /* Enable Refclk and SSC */ - intel_de_write(dev_priv, DPLL(pipe), - pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); - - /* No need to actually set up the DPLL with DSI */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) - return; - - bestn = pipe_config->dpll.n; - bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; - bestm1 = pipe_config->dpll.m1; - bestm2 = pipe_config->dpll.m2 >> 22; - bestp1 = pipe_config->dpll.p1; - bestp2 = pipe_config->dpll.p2; - vco = pipe_config->dpll.vco; - dpio_val = 0; - loopfilter = 0; - - vlv_dpio_get(dev_priv); - - /* p1 and p2 divider */ - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), - 5 << DPIO_CHV_S1_DIV_SHIFT | - bestp1 << DPIO_CHV_P1_DIV_SHIFT | - bestp2 << DPIO_CHV_P2_DIV_SHIFT | - 1 << DPIO_CHV_K_DIV_SHIFT); - - /* Feedback post-divider - m2 */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); - - /* Feedback refclk divider - n and m1 */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), - DPIO_CHV_M1_DIV_BY_2 | - 1 << DPIO_CHV_N_DIV_SHIFT); - - /* M2 fraction division */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); - - /* M2 fraction division enable */ - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); - dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); - dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); - if (bestm2_frac) - dpio_val |= DPIO_CHV_FRAC_DIV_EN; - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); - - /* Program digital lock detect threshold */ - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); - dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | - DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); - dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); - if (!bestm2_frac) - dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); - - /* Loop filter */ - if (vco == 5400000) { - loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); - tribuf_calcntr = 0x9; - } else if (vco <= 6200000) { - loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); - tribuf_calcntr = 0x9; - } else if (vco <= 6480000) { - loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); - tribuf_calcntr = 0x8; - } else { - /* Not supported. Apply the same limits as in the max case */ - loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); - loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); - loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); - tribuf_calcntr = 0; - } - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); - - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); - dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; - dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); - - /* AFC Recal */ - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), - vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | - DPIO_AFC_RECAL); - - vlv_dpio_put(dev_priv); -} - -/** - * vlv_force_pll_on - forcibly enable just the PLL - * @dev_priv: i915 private structure - * @pipe: pipe PLL to enable - * @dpll: PLL configuration - * - * Enable the PLL for @pipe using the supplied @dpll config. To be used - * in cases where we need the PLL enabled even when @pipe is not going to - * be enabled. - */ -int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, - const struct dpll *dpll) -{ - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - struct intel_crtc_state *pipe_config; - - pipe_config = intel_crtc_state_alloc(crtc); - if (!pipe_config) - return -ENOMEM; - - pipe_config->cpu_transcoder = (enum transcoder)pipe; - pipe_config->pixel_multiplier = 1; - pipe_config->dpll = *dpll; - - if (IS_CHERRYVIEW(dev_priv)) { - chv_compute_dpll(crtc, pipe_config); - chv_prepare_pll(crtc, pipe_config); - chv_enable_pll(crtc, pipe_config); - } else { - vlv_compute_dpll(crtc, pipe_config); - vlv_prepare_pll(crtc, pipe_config); - vlv_enable_pll(crtc, pipe_config); - } - - kfree(pipe_config); - - return 0; -} - -/** - * vlv_force_pll_off - forcibly disable just the PLL - * @dev_priv: i915 private structure - * @pipe: pipe PLL to disable - * - * Disable the PLL for @pipe. To be used in cases where we need - * the PLL enabled even when @pipe is not going to be enabled. - */ -void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - if (IS_CHERRYVIEW(dev_priv)) - chv_disable_pll(dev_priv, pipe); - else - vlv_disable_pll(dev_priv, pipe); -} - - - static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -7206,92 +5423,6 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); } -static void -i9xx_get_initial_plane_config(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - enum pipe pipe; - u32 val, base, offset; - int fourcc, pixel_format; - unsigned int aligned_height; - struct drm_framebuffer *fb; - struct intel_framebuffer *intel_fb; - - if (!plane->get_hw_state(plane, &pipe)) - return; - - drm_WARN_ON(dev, pipe != crtc->pipe); - - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) { - drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); - return; - } - - fb = &intel_fb->base; - - fb->dev = dev; - - val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); - - if (INTEL_GEN(dev_priv) >= 4) { - if (val & DISPPLANE_TILED) { - plane_config->tiling = I915_TILING_X; - fb->modifier = I915_FORMAT_MOD_X_TILED; - } - - if (val & DISPPLANE_ROTATE_180) - plane_config->rotation = DRM_MODE_ROTATE_180; - } - - if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && - val & DISPPLANE_MIRROR) - plane_config->rotation |= DRM_MODE_REFLECT_X; - - pixel_format = val & DISPPLANE_PIXFORMAT_MASK; - fourcc = i9xx_format_to_fourcc(pixel_format); - fb->format = drm_format_info(fourcc); - - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); - base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; - } else if (INTEL_GEN(dev_priv) >= 4) { - if (plane_config->tiling) - offset = intel_de_read(dev_priv, - DSPTILEOFF(i9xx_plane)); - else - offset = intel_de_read(dev_priv, - DSPLINOFF(i9xx_plane)); - base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; - } else { - base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); - } - plane_config->base = base; - - val = intel_de_read(dev_priv, PIPESRC(pipe)); - fb->width = ((val >> 16) & 0xfff) + 1; - fb->height = ((val >> 0) & 0xfff) + 1; - - val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); - fb->pitches[0] = val & 0xffffffc0; - - aligned_height = intel_fb_align_height(fb, 0, fb->height); - - plane_config->size = fb->pitches[0] * aligned_height; - - drm_dbg_kms(&dev_priv->drm, - "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", - crtc->base.name, plane->base.name, fb->width, fb->height, - fb->format->cpp[0] * 8, base, fb->pitches[0], - plane_config->size); - - plane_config->fb = intel_fb; -} - static void chv_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -8274,150 +6405,6 @@ static void skl_get_pfit_config(struct intel_crtc_state *crtc_state) scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); } -static void -skl_get_initial_plane_config(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) -{ - struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - enum plane_id plane_id = plane->id; - enum pipe pipe; - u32 val, base, offset, stride_mult, tiling, alpha; - int fourcc, pixel_format; - unsigned int aligned_height; - struct drm_framebuffer *fb; - struct intel_framebuffer *intel_fb; - - if (!plane->get_hw_state(plane, &pipe)) - return; - - drm_WARN_ON(dev, pipe != crtc->pipe); - - if (crtc_state->bigjoiner) { - drm_dbg_kms(&dev_priv->drm, - "Unsupported bigjoiner configuration for initial FB\n"); - return; - } - - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) { - drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); - return; - } - - fb = &intel_fb->base; - - fb->dev = dev; - - val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); - - if (INTEL_GEN(dev_priv) >= 11) - pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; - else - pixel_format = val & PLANE_CTL_FORMAT_MASK; - - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - alpha = intel_de_read(dev_priv, - PLANE_COLOR_CTL(pipe, plane_id)); - alpha &= PLANE_COLOR_ALPHA_MASK; - } else { - alpha = val & PLANE_CTL_ALPHA_MASK; - } - - fourcc = skl_format_to_fourcc(pixel_format, - val & PLANE_CTL_ORDER_RGBX, alpha); - fb->format = drm_format_info(fourcc); - - tiling = val & PLANE_CTL_TILED_MASK; - switch (tiling) { - case PLANE_CTL_TILED_LINEAR: - fb->modifier = DRM_FORMAT_MOD_LINEAR; - break; - case PLANE_CTL_TILED_X: - plane_config->tiling = I915_TILING_X; - fb->modifier = I915_FORMAT_MOD_X_TILED; - break; - case PLANE_CTL_TILED_Y: - plane_config->tiling = I915_TILING_Y; - if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) - fb->modifier = INTEL_GEN(dev_priv) >= 12 ? - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : - I915_FORMAT_MOD_Y_TILED_CCS; - else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) - fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; - else - fb->modifier = I915_FORMAT_MOD_Y_TILED; - break; - case PLANE_CTL_TILED_YF: - if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) - fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; - else - fb->modifier = I915_FORMAT_MOD_Yf_TILED; - break; - default: - MISSING_CASE(tiling); - goto error; - } - - /* - * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr - * while i915 HW rotation is clockwise, thats why this swapping. - */ - switch (val & PLANE_CTL_ROTATE_MASK) { - case PLANE_CTL_ROTATE_0: - plane_config->rotation = DRM_MODE_ROTATE_0; - break; - case PLANE_CTL_ROTATE_90: - plane_config->rotation = DRM_MODE_ROTATE_270; - break; - case PLANE_CTL_ROTATE_180: - plane_config->rotation = DRM_MODE_ROTATE_180; - break; - case PLANE_CTL_ROTATE_270: - plane_config->rotation = DRM_MODE_ROTATE_90; - break; - } - - if (INTEL_GEN(dev_priv) >= 10 && - val & PLANE_CTL_FLIP_HORIZONTAL) - plane_config->rotation |= DRM_MODE_REFLECT_X; - - /* 90/270 degree rotation would require extra work */ - if (drm_rotation_90_or_270(plane_config->rotation)) - goto error; - - base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; - plane_config->base = base; - - offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); - - val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); - fb->height = ((val >> 16) & 0xffff) + 1; - fb->width = ((val >> 0) & 0xffff) + 1; - - val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); - stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); - fb->pitches[0] = (val & 0x3ff) * stride_mult; - - aligned_height = intel_fb_align_height(fb, 0, fb->height); - - plane_config->size = fb->pitches[0] * aligned_height; - - drm_dbg_kms(&dev_priv->drm, - "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", - crtc->base.name, plane->base.name, fb->width, fb->height, - fb->format->cpp[0] * 8, base, fb->pitches[0], - plane_config->size); - - plane_config->fb = intel_fb; - return; - -error: - kfree(intel_fb); -} - static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -8564,205 +6551,6 @@ out: return ret; } -static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, - struct intel_crtc_state *pipe_config) -{ - enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; - enum phy phy = intel_port_to_phy(dev_priv, port); - struct icl_port_dpll *port_dpll; - struct intel_shared_dpll *pll; - enum intel_dpll_id id; - bool pll_active; - u32 clk_sel; - - clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy); - - if (WARN_ON(id > DPLL_ID_DG1_DPLL3)) - return; - - pll = intel_get_shared_dpll_by_id(dev_priv, id); - port_dpll = &pipe_config->icl_port_dplls[port_dpll_id]; - - port_dpll->pll = pll; - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &port_dpll->hw_state); - drm_WARN_ON(&dev_priv->drm, !pll_active); - - icl_set_active_port_dpll(pipe_config, port_dpll_id); -} - -static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, - struct intel_crtc_state *pipe_config) -{ - enum phy phy = intel_port_to_phy(dev_priv, port); - enum icl_port_dpll_id port_dpll_id; - struct icl_port_dpll *port_dpll; - struct intel_shared_dpll *pll; - enum intel_dpll_id id; - bool pll_active; - u32 temp; - - if (intel_phy_is_combo(dev_priv, phy)) { - u32 mask, shift; - - if (IS_ROCKETLAKE(dev_priv)) { - mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); - } else { - mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); - shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); - } - - temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask; - id = temp >> shift; - port_dpll_id = ICL_PORT_DPLL_DEFAULT; - } else if (intel_phy_is_tc(dev_priv, phy)) { - u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; - - if (clk_sel == DDI_CLK_SEL_MG) { - id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, - port)); - port_dpll_id = ICL_PORT_DPLL_MG_PHY; - } else { - drm_WARN_ON(&dev_priv->drm, - clk_sel < DDI_CLK_SEL_TBT_162); - id = DPLL_ID_ICL_TBTPLL; - port_dpll_id = ICL_PORT_DPLL_DEFAULT; - } - } else { - drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port); - return; - } - - pll = intel_get_shared_dpll_by_id(dev_priv, id); - port_dpll = &pipe_config->icl_port_dplls[port_dpll_id]; - - port_dpll->pll = pll; - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &port_dpll->hw_state); - drm_WARN_ON(&dev_priv->drm, !pll_active); - - icl_set_active_port_dpll(pipe_config, port_dpll_id); -} - -static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, - struct intel_crtc_state *pipe_config) -{ - struct intel_shared_dpll *pll; - enum intel_dpll_id id; - bool pll_active; - u32 temp; - - temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); - - if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) - return; - - pll = intel_get_shared_dpll_by_id(dev_priv, id); - - pipe_config->shared_dpll = pll; - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state); - drm_WARN_ON(&dev_priv->drm, !pll_active); -} - -static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, - enum port port, - struct intel_crtc_state *pipe_config) -{ - struct intel_shared_dpll *pll; - enum intel_dpll_id id; - bool pll_active; - - switch (port) { - case PORT_A: - id = DPLL_ID_SKL_DPLL0; - break; - case PORT_B: - id = DPLL_ID_SKL_DPLL1; - break; - case PORT_C: - id = DPLL_ID_SKL_DPLL2; - break; - default: - drm_err(&dev_priv->drm, "Incorrect port type\n"); - return; - } - - pll = intel_get_shared_dpll_by_id(dev_priv, id); - - pipe_config->shared_dpll = pll; - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state); - drm_WARN_ON(&dev_priv->drm, !pll_active); -} - -static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, - struct intel_crtc_state *pipe_config) -{ - struct intel_shared_dpll *pll; - enum intel_dpll_id id; - bool pll_active; - u32 temp; - - temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); - id = temp >> (port * 3 + 1); - - if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3)) - return; - - pll = intel_get_shared_dpll_by_id(dev_priv, id); - - pipe_config->shared_dpll = pll; - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state); - drm_WARN_ON(&dev_priv->drm, !pll_active); -} - -static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, - struct intel_crtc_state *pipe_config) -{ - struct intel_shared_dpll *pll; - enum intel_dpll_id id; - u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port)); - bool pll_active; - - switch (ddi_pll_sel) { - case PORT_CLK_SEL_WRPLL1: - id = DPLL_ID_WRPLL1; - break; - case PORT_CLK_SEL_WRPLL2: - id = DPLL_ID_WRPLL2; - break; - case PORT_CLK_SEL_SPLL: - id = DPLL_ID_SPLL; - break; - case PORT_CLK_SEL_LCPLL_810: - id = DPLL_ID_LCPLL_810; - break; - case PORT_CLK_SEL_LCPLL_1350: - id = DPLL_ID_LCPLL_1350; - break; - case PORT_CLK_SEL_LCPLL_2700: - id = DPLL_ID_LCPLL_2700; - break; - default: - MISSING_CASE(ddi_pll_sel); - fallthrough; - case PORT_CLK_SEL_NONE: - return; - } - - pll = intel_get_shared_dpll_by_id(dev_priv, id); - - pipe_config->shared_dpll = pll; - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state); - drm_WARN_ON(&dev_priv->drm, !pll_active); -} - static bool hsw_get_transcoder_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, struct intel_display_power_domain_set *power_domain_set) @@ -8919,19 +6707,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc, port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); } - if (IS_DG1(dev_priv)) - dg1_get_ddi_pll(dev_priv, port, pipe_config); - else if (INTEL_GEN(dev_priv) >= 11) - icl_get_ddi_pll(dev_priv, port, pipe_config); - else if (IS_CANNONLAKE(dev_priv)) - cnl_get_ddi_pll(dev_priv, port, pipe_config); - else if (IS_GEN9_LP(dev_priv)) - bxt_get_ddi_pll(dev_priv, port, pipe_config); - else if (IS_GEN9_BC(dev_priv)) - skl_get_ddi_pll(dev_priv, port, pipe_config); - else - hsw_get_ddi_pll(dev_priv, port, pipe_config); - /* * Haswell has only FDI/PCH transcoder A. It is which is connected to * DDI E. So just check whether this pipe is wired to DDI E and whether @@ -10022,19 +7797,27 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { - if (connector->base.state->crtc) + struct drm_connector_state *conn_state = connector->base.state; + struct intel_encoder *encoder = + to_intel_encoder(connector->base.encoder); + + if (conn_state->crtc) drm_connector_put(&connector->base); - if (connector->base.encoder) { - connector->base.state->best_encoder = - connector->base.encoder; - connector->base.state->crtc = - connector->base.encoder->crtc; + if (encoder) { + struct intel_crtc *crtc = + to_intel_crtc(encoder->base.crtc); + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + conn_state->best_encoder = &encoder->base; + conn_state->crtc = &crtc->base; + conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3; drm_connector_get(&connector->base); } else { - connector->base.state->best_encoder = NULL; - connector->base.state->crtc = NULL; + conn_state->best_encoder = NULL; + conn_state->crtc = NULL; } } drm_connector_list_iter_end(&conn_iter); @@ -10211,7 +7994,6 @@ static void snprintf_output_types(char *buf, size_t len, } static const char * const output_format_str[] = { - [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", [INTEL_OUTPUT_FORMAT_RGB] = "RGB", [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", @@ -10220,7 +8002,7 @@ static const char * const output_format_str[] = { static const char *output_formats(enum intel_output_format format) { if (format >= ARRAY_SIZE(output_format_str)) - format = INTEL_OUTPUT_FORMAT_INVALID; + return "invalid"; return output_format_str[format]; } @@ -10229,7 +8011,6 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state) struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_format_name_buf format_name; if (!fb) { drm_dbg_kms(&i915->drm, @@ -10240,10 +8021,9 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state) } drm_dbg_kms(&i915->drm, - "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n", + "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n", plane->base.base.id, plane->base.name, - fb->base.id, fb->width, fb->height, - drm_get_format_name(fb->format->format, &format_name), + fb->base.id, fb->width, fb->height, &fb->format->format, fb->modifier, yesno(plane_state->uapi.visible)); drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", plane_state->hw.rotation, plane_state->scaler_id); @@ -10296,6 +8076,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, pipe_config->bigjoiner_slave ? "slave" : pipe_config->bigjoiner ? "master" : "no"); + drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n", + enableddisabled(pipe_config->splitter.enable), + pipe_config->splitter.link_count, + pipe_config->splitter.pixel_overlap); + if (pipe_config->has_pch_encoder) intel_dump_m_n_config(pipe_config, "fdi", pipe_config->fdi_lanes, @@ -11336,6 +9121,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(dsc.dsc_split); PIPE_CONF_CHECK_I(dsc.compressed_bpp); + PIPE_CONF_CHECK_BOOL(splitter.enable); + PIPE_CONF_CHECK_I(splitter.link_count); + PIPE_CONF_CHECK_I(splitter.pixel_overlap); + PIPE_CONF_CHECK_I(mst_master_transcoder); PIPE_CONF_CHECK_BOOL(vrr.enable); @@ -11385,11 +9174,10 @@ static void verify_wm_state(struct intel_crtc *crtc, struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; struct skl_pipe_wm wm; } *hw; - struct skl_pipe_wm *sw_wm; - struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; + const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; + int level, max_level = ilk_wm_max_level(dev_priv); + struct intel_plane *plane; u8 hw_enabled_slices; - const enum pipe pipe = crtc->pipe; - int plane, level, max_level = ilk_wm_max_level(dev_priv); if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active) return; @@ -11399,7 +9187,6 @@ static void verify_wm_state(struct intel_crtc *crtc, return; skl_pipe_wm_get_hw_state(crtc, &hw->wm); - sw_wm = &new_crtc_state->wm.skl.optimal; skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); @@ -11412,110 +9199,52 @@ static void verify_wm_state(struct intel_crtc *crtc, dev_priv->dbuf.enabled_slices, hw_enabled_slices); - /* planes */ - for_each_universal_plane(dev_priv, pipe, plane) { - struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; - - hw_plane_wm = &hw->wm.planes[plane]; - sw_plane_wm = &sw_wm->planes[plane]; + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; + const struct skl_wm_level *hw_wm_level, *sw_wm_level; /* Watermarks */ for (level = 0; level <= max_level; level++) { - if (skl_wm_level_equals(&hw_plane_wm->wm[level], - &sw_plane_wm->wm[level]) || - (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], - &sw_plane_wm->sagv_wm0))) - continue; + hw_wm_level = &hw->wm.planes[plane->id].wm[level]; + sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); - drm_err(&dev_priv->drm, - "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), plane + 1, level, - sw_plane_wm->wm[level].plane_en, - sw_plane_wm->wm[level].plane_res_b, - sw_plane_wm->wm[level].plane_res_l, - hw_plane_wm->wm[level].plane_en, - hw_plane_wm->wm[level].plane_res_b, - hw_plane_wm->wm[level].plane_res_l); - } - - if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, - &sw_plane_wm->trans_wm)) { - drm_err(&dev_priv->drm, - "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), plane + 1, - sw_plane_wm->trans_wm.plane_en, - sw_plane_wm->trans_wm.plane_res_b, - sw_plane_wm->trans_wm.plane_res_l, - hw_plane_wm->trans_wm.plane_en, - hw_plane_wm->trans_wm.plane_res_b, - hw_plane_wm->trans_wm.plane_res_l); - } - - /* DDB */ - hw_ddb_entry = &hw->ddb_y[plane]; - sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; - - if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { - drm_err(&dev_priv->drm, - "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", - pipe_name(pipe), plane + 1, - sw_ddb_entry->start, sw_ddb_entry->end, - hw_ddb_entry->start, hw_ddb_entry->end); - } - } - - /* - * cursor - * If the cursor plane isn't active, we may not have updated it's ddb - * allocation. In that case since the ddb allocation will be updated - * once the plane becomes visible, we can skip this check - */ - if (1) { - struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; - - hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; - sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; - - /* Watermarks */ - for (level = 0; level <= max_level; level++) { - if (skl_wm_level_equals(&hw_plane_wm->wm[level], - &sw_plane_wm->wm[level]) || - (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level], - &sw_plane_wm->sagv_wm0))) + if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) continue; drm_err(&dev_priv->drm, - "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), level, - sw_plane_wm->wm[level].plane_en, - sw_plane_wm->wm[level].plane_res_b, - sw_plane_wm->wm[level].plane_res_l, - hw_plane_wm->wm[level].plane_en, - hw_plane_wm->wm[level].plane_res_b, - hw_plane_wm->wm[level].plane_res_l); + "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, level, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); } - if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, - &sw_plane_wm->trans_wm)) { + hw_wm_level = &hw->wm.planes[plane->id].trans_wm; + sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); + + if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { drm_err(&dev_priv->drm, - "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), - sw_plane_wm->trans_wm.plane_en, - sw_plane_wm->trans_wm.plane_res_b, - sw_plane_wm->trans_wm.plane_res_l, - hw_plane_wm->trans_wm.plane_en, - hw_plane_wm->trans_wm.plane_res_b, - hw_plane_wm->trans_wm.plane_res_l); + "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); } /* DDB */ - hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; - sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; + hw_ddb_entry = &hw->ddb_y[plane->id]; + sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { drm_err(&dev_priv->drm, - "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", - pipe_name(pipe), + "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", + plane->base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry->end, hw_ddb_entry->start, hw_ddb_entry->end); } @@ -11690,7 +9419,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, struct intel_crtc_state *new_crtc_state) { struct intel_dpll_hw_state dpll_hw_state; - unsigned int crtc_mask; + u8 pipe_mask; bool active; memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); @@ -11703,34 +9432,34 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, I915_STATE_WARN(!pll->on && pll->active_mask, "pll in active use but not on in sw tracking\n"); I915_STATE_WARN(pll->on && !pll->active_mask, - "pll is on but not used by any active crtc\n"); + "pll is on but not used by any active pipe\n"); I915_STATE_WARN(pll->on != active, "pll on state mismatch (expected %i, found %i)\n", pll->on, active); } if (!crtc) { - I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, - "more active pll users than references: %x vs %x\n", - pll->active_mask, pll->state.crtc_mask); + I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask, + "more active pll users than references: 0x%x vs 0x%x\n", + pll->active_mask, pll->state.pipe_mask); return; } - crtc_mask = drm_crtc_mask(&crtc->base); + pipe_mask = BIT(crtc->pipe); if (new_crtc_state->hw.active) - I915_STATE_WARN(!(pll->active_mask & crtc_mask), - "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", + I915_STATE_WARN(!(pll->active_mask & pipe_mask), + "pll active mismatch (expected pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); else - I915_STATE_WARN(pll->active_mask & crtc_mask, - "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", + I915_STATE_WARN(pll->active_mask & pipe_mask, + "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n", pipe_name(crtc->pipe), pll->active_mask); - I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), - "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", - crtc_mask, pll->state.crtc_mask); + I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask), + "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n", + pipe_mask, pll->state.pipe_mask); I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state, @@ -11750,15 +9479,15 @@ verify_shared_dpll_state(struct intel_crtc *crtc, if (old_crtc_state->shared_dpll && old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { - unsigned int crtc_mask = drm_crtc_mask(&crtc->base); + u8 pipe_mask = BIT(crtc->pipe); struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; - I915_STATE_WARN(pll->active_mask & crtc_mask, - "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(crtc->pipe)); - I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, - "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(crtc->pipe)); + I915_STATE_WARN(pll->active_mask & pipe_mask, + "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n", + pipe_name(crtc->pipe), pll->active_mask); + I915_STATE_WARN(pll->state.pipe_mask & pipe_mask, + "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n", + pipe_name(crtc->pipe), pll->state.pipe_mask); } } @@ -13911,7 +11640,13 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) { + if (IS_ALDERLAKE_S(dev_priv)) { + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_TC1); + intel_ddi_init(dev_priv, PORT_TC2); + intel_ddi_init(dev_priv, PORT_TC3); + intel_ddi_init(dev_priv, PORT_TC4); + } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_TC1); @@ -13967,8 +11702,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) /* * Haswell uses DDI functions to detect digital outputs. - * On SKL pre-D0 the strap isn't connected, so we assume - * it's there. + * On SKL pre-D0 the strap isn't connected. Later SKUs may or + * may not have it - it was supposed to be fixed by the same + * time we stopped using straps. Assume it's there. */ found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; /* WaIgnoreDDIAStrap: skl */ @@ -13977,7 +11713,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP * register */ - found = intel_de_read(dev_priv, SFUSE_STRAP); + if (HAS_PCH_TGP(dev_priv)) { + /* W/A due to lack of STRAP config on TGP PCH*/ + found = (SFUSE_STRAP_DDIB_DETECTED | + SFUSE_STRAP_DDIC_DETECTED | + SFUSE_STRAP_DDID_DETECTED); + } else { + found = intel_de_read(dev_priv, SFUSE_STRAP); + } if (found & SFUSE_STRAP_DDIB_DETECTED) intel_ddi_init(dev_priv, PORT_B); @@ -14133,8 +11876,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_dvo_init(dev_priv); } - intel_psr_init(dev_priv); - for_each_intel_encoder(&dev_priv->drm, encoder) { encoder->base.possible_crtcs = intel_encoder_possible_crtcs(encoder); @@ -14237,13 +11978,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (!drm_any_plane_has_format(&dev_priv->drm, mode_cmd->pixel_format, mode_cmd->modifier[0])) { - struct drm_format_name_buf format_name; - drm_dbg_kms(&dev_priv->drm, - "unsupported pixel format %s / modifier 0x%llx\n", - drm_get_format_name(mode_cmd->pixel_format, - &format_name), - mode_cmd->modifier[0]); + "unsupported pixel format %p4cc / modifier 0x%llx\n", + &mode_cmd->pixel_format, mode_cmd->modifier[0]); goto err; } @@ -14504,6 +12241,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { void intel_init_display_hooks(struct drm_i915_private *dev_priv) { intel_init_cdclk_hooks(dev_priv); + intel_init_audio_hooks(dev_priv); intel_dpll_init_clock_hook(dev_priv); @@ -14939,6 +12677,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) intel_update_czclk(i915); intel_modeset_init_hw(i915); + intel_dpll_update_ref_clks(i915); intel_hdcp_component_init(i915); @@ -15394,8 +13133,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) /* notify opregion of the sanitized encoder state */ intel_opregion_notify_encoder(encoder, connector && has_active_crtc); - if (INTEL_GEN(dev_priv) >= 11) - icl_sanitize_encoder_pll_mapping(encoder); + if (HAS_DDI(dev_priv)) + intel_ddi_sanitize_encoder_pll_mapping(encoder); } /* FIXME read out full plane state for all planes */ @@ -15475,8 +13214,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) readout_plane_state(dev_priv); - intel_dpll_readout_hw_state(dev_priv); - for_each_intel_encoder(dev, encoder) { pipe = 0; @@ -15511,6 +13248,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe_name(pipe)); } + intel_dpll_readout_hw_state(dev_priv); + drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->get_hw_state(connector)) { @@ -15972,6 +13711,57 @@ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) intel_bios_driver_remove(i915); } +void intel_display_driver_register(struct drm_i915_private *i915) +{ + if (!HAS_DISPLAY(i915)) + return; + + intel_display_debugfs_register(i915); + + /* Must be done after probing outputs */ + intel_opregion_register(i915); + acpi_video_register(); + + intel_audio_init(i915); + + /* + * Some ports require correctly set-up hpd registers for + * detection to work properly (leading to ghost connected + * connector status), e.g. VGA on gm45. Hence we can only set + * up the initial fbdev config after hpd irqs are fully + * enabled. We do it last so that the async config cannot run + * before the connectors are registered. + */ + intel_fbdev_initial_config_async(&i915->drm); + + /* + * We need to coordinate the hotplugs with the asynchronous + * fbdev configuration, for which we use the + * fbdev->async_cookie. + */ + drm_kms_helper_poll_init(&i915->drm); +} + +void intel_display_driver_unregister(struct drm_i915_private *i915) +{ + if (!HAS_DISPLAY(i915)) + return; + + intel_fbdev_unregister(i915); + intel_audio_deinit(i915); + + /* + * After flushing the fbdev (incl. a late async config which + * will have delayed queuing of a hotplug event), then flush + * the hotplug events. + */ + drm_kms_helper_poll_fini(&i915->drm); + drm_atomic_helper_shutdown(&i915->drm); + + acpi_video_unregister(); + intel_opregion_unregister(i915); +} + #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct intel_display_error_state { diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 64ffa34544a7..431770eeadb4 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -52,6 +52,7 @@ struct intel_crtc_state; struct intel_digital_port; struct intel_dp; struct intel_encoder; +struct intel_initial_plane_config; struct intel_load_detect_pipe; struct intel_plane; struct intel_plane_state; @@ -352,11 +353,6 @@ enum phy_fia { for_each_cpu_transcoder(__dev_priv, __t) \ for_each_if ((__mask) & BIT(__t)) -#define for_each_universal_plane(__dev_priv, __pipe, __p) \ - for ((__p) = 0; \ - (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ - (__p)++) - #define for_each_sprite(__dev_priv, __p, __s) \ for ((__s) = 0; \ (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \ @@ -417,10 +413,19 @@ enum phy_fia { for_each_if((encoder_mask) & \ drm_encoder_mask(&intel_encoder->base)) +#define for_each_intel_encoder_mask_with_psr(dev, intel_encoder, encoder_mask) \ + list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ + for_each_if(((encoder_mask) & drm_encoder_mask(&(intel_encoder)->base)) && \ + intel_encoder_can_psr(intel_encoder)) + #define for_each_intel_dp(dev, intel_encoder) \ for_each_intel_encoder(dev, intel_encoder) \ for_each_if(intel_encoder_is_dp(intel_encoder)) +#define for_each_intel_encoder_with_psr(dev, intel_encoder) \ + for_each_intel_encoder((dev), (intel_encoder)) \ + for_each_if(intel_encoder_can_psr(intel_encoder)) + #define for_each_intel_connector_iter(intel_connector, iter) \ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter)))) @@ -507,8 +512,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, bool constant_n, bool fec_enable); -bool is_ccs_modifier(u64 modifier); -int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane); void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); @@ -586,9 +589,6 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, enum pipe pipe); -int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, - const struct dpll *dpll); -void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); int lpt_get_iclkip(struct drm_i915_private *dev_priv); bool intel_fuzzy_clock_check(int clock1, int clock2); @@ -613,25 +613,8 @@ enum intel_display_power_domain intel_legacy_aux_to_power_domain(enum aux_ch aux_ch); void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); - -u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); -void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); -u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set); -void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe, - int id, int set, enum drm_scaling_filter filter); void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); -u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); -u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state); -u32 skl_plane_stride(const struct intel_plane_state *plane_state, - int plane); -int skl_check_plane_surface(struct intel_plane_state *plane_state); -int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, - int *x, int *y, u32 *offset); -int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); + int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); @@ -654,6 +637,21 @@ struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state); +unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, + int color_plane); +void intel_fb_plane_get_subsampling(int *hsub, int *vsub, + const struct drm_framebuffer *fb, + int color_plane); +u32 intel_plane_adjust_aligned_offset(int *x, int *y, + const struct intel_plane_state *state, + int color_plane, + u32 old_offset, u32 new_offset); +unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane); +unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane); + +void intel_display_driver_register(struct drm_i915_private *i915); +void intel_display_driver_unregister(struct drm_i915_private *i915); + /* modesetting */ void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init_noirq(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index d62b18d5ecd8..20194ccfec05 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -249,12 +249,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) "sink internal error", }; struct drm_connector *connector = m->private; - struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); int ret; - if (!CAN_PSR(dev_priv)) { + if (!CAN_PSR(intel_dp)) { seq_puts(m, "PSR Unsupported\n"); return -ENODEV; } @@ -280,12 +279,13 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); static void -psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) +psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) { - u32 val, status_val; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const char *status = "unknown"; + u32 val, status_val; - if (dev_priv->psr.psr2_enabled) { + if (intel_dp->psr.psr2_enabled) { static const char * const live_status[] = { "IDLE", "CAPTURE", @@ -300,7 +300,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "TG_ON" }; val = intel_de_read(dev_priv, - EDP_PSR2_STATUS(dev_priv->psr.transcoder)); + EDP_PSR2_STATUS(intel_dp->psr.transcoder)); status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; if (status_val < ARRAY_SIZE(live_status)) @@ -317,7 +317,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "SRDENT_ON", }; val = intel_de_read(dev_priv, - EDP_PSR_STATUS(dev_priv->psr.transcoder)); + EDP_PSR_STATUS(intel_dp->psr.transcoder)); status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> EDP_PSR_STATUS_STATE_SHIFT; if (status_val < ARRAY_SIZE(live_status)) @@ -327,21 +327,18 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); } -static int i915_edp_psr_status(struct seq_file *m, void *data) +static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_psr *psr = &dev_priv->psr; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_psr *psr = &intel_dp->psr; intel_wakeref_t wakeref; const char *status; bool enabled; u32 val; - if (!HAS_PSR(dev_priv)) - return -ENODEV; - seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); - if (psr->dp) - seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); + if (psr->sink_support) + seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); seq_puts(m, "\n"); if (!psr->sink_support) @@ -365,16 +362,16 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) if (psr->psr2_enabled) { val = intel_de_read(dev_priv, - EDP_PSR2_CTL(dev_priv->psr.transcoder)); + EDP_PSR2_CTL(intel_dp->psr.transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { val = intel_de_read(dev_priv, - EDP_PSR_CTL(dev_priv->psr.transcoder)); + EDP_PSR_CTL(intel_dp->psr.transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", enableddisabled(enabled), val); - psr_source_status(dev_priv, m); + psr_source_status(intel_dp, m); seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", psr->busy_frontbuffer_bits); @@ -383,7 +380,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { val = intel_de_read(dev_priv, - EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); + EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); val &= EDP_PSR_PERF_CNT_MASK; seq_printf(m, "Performance counter: %u\n", val); } @@ -404,7 +401,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) */ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { val = intel_de_read(dev_priv, - PSR2_SU_STATUS(dev_priv->psr.transcoder, frame)); + PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); su_frames_val[frame / 3] = val; } @@ -430,23 +427,50 @@ unlock: return 0; } +static int i915_edp_psr_status(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_dp *intel_dp = NULL; + struct intel_encoder *encoder; + + if (!HAS_PSR(dev_priv)) + return -ENODEV; + + /* Find the first EDP which supports PSR */ + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + intel_dp = enc_to_intel_dp(encoder); + break; + } + + if (!intel_dp) + return -ENODEV; + + return intel_psr_status(m, intel_dp); +} + static int i915_edp_psr_debug_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; + struct intel_encoder *encoder; intel_wakeref_t wakeref; - int ret; + int ret = -ENODEV; - if (!CAN_PSR(dev_priv)) - return -ENODEV; + if (!HAS_PSR(dev_priv)) + return ret; - drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); - ret = intel_psr_debug_set(dev_priv, val); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + // TODO: split to each transcoder's PSR debug state + ret = intel_psr_debug_set(intel_dp, val); + + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + } return ret; } @@ -455,12 +479,20 @@ static int i915_edp_psr_debug_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; + struct intel_encoder *encoder; - if (!CAN_PSR(dev_priv)) + if (!HAS_PSR(dev_priv)) return -ENODEV; - *val = READ_ONCE(dev_priv->psr.debug); - return 0; + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + // TODO: split to each transcoder's PSR debug state + *val = READ_ONCE(intel_dp->psr.debug); + return 0; + } + + return -ENODEV; } DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, @@ -772,27 +804,25 @@ static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); const struct drm_framebuffer *fb = plane_state->uapi.fb; - struct drm_format_name_buf format_name; struct drm_rect src, dst; char rot_str[48]; src = drm_plane_state_src(&plane_state->uapi); dst = drm_plane_state_dest(&plane_state->uapi); - if (fb) - drm_get_format_name(fb->format->format, &format_name); - plane_rotation(rot_str, sizeof(rot_str), plane_state->uapi.rotation); - seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", - fb ? fb->base.id : 0, fb ? format_name.str : "n/a", - fb ? fb->modifier : 0, - fb ? fb->width : 0, fb ? fb->height : 0, - plane_visibility(plane_state), - DRM_RECT_FP_ARG(&src), - DRM_RECT_ARG(&dst), - rot_str); + seq_puts(m, "\t\tuapi: [FB:"); + if (fb) + seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id, + &fb->format->format, fb->modifier, fb->width, + fb->height); + else + seq_puts(m, "0] n/a,0x0,0x0,"); + seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT + ", rotation=%s\n", plane_visibility(plane_state), + DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str); if (plane_state->planar_linked_plane) seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n", @@ -805,19 +835,17 @@ static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_format_name_buf format_name; char rot_str[48]; if (!fb) return; - drm_get_format_name(fb->format->format, &format_name); - plane_rotation(rot_str, sizeof(rot_str), plane_state->hw.rotation); - seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", - fb->base.id, format_name.str, + seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src=" + DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", + fb->base.id, &fb->format->format, fb->modifier, fb->width, fb->height, yesno(plane_state->uapi.visible), DRM_RECT_FP_ARG(&plane_state->uapi.src), @@ -1066,8 +1094,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, pll->info->id); - seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", - pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); + seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n", + pll->state.pipe_mask, pll->active_mask, yesno(pll->on)); seq_printf(m, " tracked hardware state:\n"); seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); seq_printf(m, " dpll_md: 0x%08x\n", @@ -1233,9 +1261,6 @@ static void drrs_status_per_crtc(struct seq_file *m, /* disable_drrs() will make drrs->dp NULL */ if (!drrs->dp) { seq_puts(m, "Idleness DRRS: Disabled\n"); - if (dev_priv->psr.enabled) - seq_puts(m, - "\tAs PSR is enabled, DRRS is not enabled\n"); mutex_unlock(&drrs->mutex); return; } @@ -2169,19 +2194,40 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel); static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); + int ret; - if (connector->status != connector_status_connected) - return -ENODEV; + ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + if (ret) + return ret; + + if (!connector->encoder || connector->status != connector_status_connected) { + ret = -ENODEV; + goto out; + } seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); intel_hdcp_info(m, intel_connector); - return 0; +out: + drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + + return ret; } DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); +static int i915_psr_status_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct intel_dp *intel_dp = + intel_attached_dp(to_intel_connector(connector)); + + return intel_psr_status(m, intel_dp); +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_status); + #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \ seq_puts(m, "LPSP: incapable\n")) @@ -2357,6 +2403,12 @@ int intel_connector_debugfs_add(struct drm_connector *connector) connector, &i915_psr_sink_status_fops); } + if (HAS_PSR(dev_priv) && + connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + debugfs_create_file("i915_psr_status", 0444, root, + connector, &i915_psr_status_fops); + } + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c11c37c65d86..7e0eaa872350 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -2886,24 +2886,24 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_E) | \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_G) | \ - BIT_ULL(POWER_DOMAIN_AUX_H) | \ - BIT_ULL(POWER_DOMAIN_AUX_I) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -2921,18 +2921,12 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) -#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) -#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) -#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) -#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) -#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) -#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) +#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) +#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) +#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) +#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) +#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) +#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) #define TGL_AUX_A_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ @@ -2941,44 +2935,34 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, BIT_ULL(POWER_DOMAIN_AUX_B)) #define TGL_AUX_C_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_C)) -#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D)) -#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_E)) -#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F)) -#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_G)) -#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_H)) -#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_I)) -#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) -#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) -#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) -#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) -#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) -#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) + +#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) +#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) +#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) +#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) +#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) +#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) + +#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) +#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) +#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) +#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) +#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) +#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_E) | \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_G) | \ - BIT_ULL(POWER_DOMAIN_AUX_H) | \ - BIT_ULL(POWER_DOMAIN_AUX_I) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) #define RKL_PW_4_POWER_DOMAINS ( \ @@ -2994,10 +2978,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, BIT_ULL(POWER_DOMAIN_AUDIO) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ BIT_ULL(POWER_DOMAIN_INIT)) /* @@ -4145,8 +4129,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { } }, { - .name = "DDI D TC1 IO", - .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, + .name = "DDI IO TC1", + .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4155,8 +4139,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI E TC2 IO", - .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, + .name = "DDI IO TC2", + .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4165,8 +4149,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI F TC3 IO", - .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, + .name = "DDI IO TC3", + .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4175,8 +4159,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI G TC4 IO", - .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, + .name = "DDI IO TC4", + .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4185,8 +4169,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI H TC5 IO", - .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, + .name = "DDI IO TC5", + .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4195,8 +4179,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI I TC6 IO", - .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, + .name = "DDI IO TC6", + .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4241,8 +4225,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX D TC1", - .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, + .name = "AUX USBC1", + .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4252,8 +4236,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX E TC2", - .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, + .name = "AUX USBC2", + .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4263,8 +4247,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX F TC3", - .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, + .name = "AUX USBC3", + .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4274,8 +4258,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX G TC4", - .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, + .name = "AUX USBC4", + .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4285,8 +4269,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX H TC5", - .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, + .name = "AUX USBC5", + .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4296,8 +4280,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX I TC6", - .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, + .name = "AUX USBC6", + .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4307,8 +4291,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX D TBT1", - .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, + .name = "AUX TBT1", + .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4318,8 +4302,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX E TBT2", - .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, + .name = "AUX TBT2", + .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4329,8 +4313,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX F TBT3", - .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, + .name = "AUX TBT3", + .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4340,8 +4324,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX G TBT4", - .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, + .name = "AUX TBT4", + .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4351,8 +4335,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX H TBT5", - .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, + .name = "AUX TBT5", + .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4362,8 +4346,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX I TBT6", - .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, + .name = "AUX TBT6", + .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4471,8 +4455,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = { } }, { - .name = "DDI D TC1 IO", - .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, + .name = "DDI IO TC1", + .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4481,8 +4465,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { - .name = "DDI E TC2 IO", - .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, + .name = "DDI IO TC2", + .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4511,8 +4495,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { - .name = "AUX D TC1", - .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, + .name = "AUX USBC1", + .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4521,8 +4505,8 @@ static const struct i915_power_well_desc rkl_power_wells[] = { }, }, { - .name = "AUX E TC2", - .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, + .name = "AUX USBC2", + .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -4689,7 +4673,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_DG1(dev_priv)) { + if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) { err = set_power_wells_mask(power_domains, tgl_power_wells, BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); } else if (IS_ROCKETLAKE(dev_priv)) { @@ -5317,17 +5301,25 @@ struct buddy_page_mask { static const struct buddy_page_mask tgl_buddy_page_masks[] = { { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, + { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, + { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, + { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, + { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, {} }; static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, + { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, + { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, + { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, + { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, {} }; @@ -5339,9 +5331,10 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; int config, i; - if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) || - IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) - /* Wa_1409767108:tgl,dg1 */ + if (IS_ALDERLAKE_S(dev_priv) || + IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) || + IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_B0)) + /* Wa_1409767108:tgl,dg1,adl-s */ table = wa_1409767108_buddy_page_masks; else table = tgl_buddy_page_masks; @@ -5379,7 +5372,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - /* Wa_14011294188:ehl,jsl,tgl,rkl */ + /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1) intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index bc30c479be53..f3ca5d5c9778 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -41,6 +41,14 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_G_LANES, POWER_DOMAIN_PORT_DDI_H_LANES, POWER_DOMAIN_PORT_DDI_I_LANES, + + POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_LANES_TC2, + POWER_DOMAIN_PORT_DDI_LANES_TC3, + POWER_DOMAIN_PORT_DDI_LANES_TC4, + POWER_DOMAIN_PORT_DDI_LANES_TC5, + POWER_DOMAIN_PORT_DDI_LANES_TC6, + POWER_DOMAIN_PORT_DDI_A_IO, POWER_DOMAIN_PORT_DDI_B_IO, POWER_DOMAIN_PORT_DDI_C_IO, @@ -50,6 +58,14 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_G_IO, POWER_DOMAIN_PORT_DDI_H_IO, POWER_DOMAIN_PORT_DDI_I_IO, + + POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_IO_TC2, + POWER_DOMAIN_PORT_DDI_IO_TC3, + POWER_DOMAIN_PORT_DDI_IO_TC4, + POWER_DOMAIN_PORT_DDI_IO_TC5, + POWER_DOMAIN_PORT_DDI_IO_TC6, + POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -64,6 +80,14 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_G, POWER_DOMAIN_AUX_H, POWER_DOMAIN_AUX_I, + + POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */ + POWER_DOMAIN_AUX_USBC2, + POWER_DOMAIN_AUX_USBC3, + POWER_DOMAIN_AUX_USBC4, + POWER_DOMAIN_AUX_USBC5, + POWER_DOMAIN_AUX_USBC6, + POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_C_TBT, POWER_DOMAIN_AUX_D_TBT, @@ -72,6 +96,14 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_G_TBT, POWER_DOMAIN_AUX_H_TBT, POWER_DOMAIN_AUX_I_TBT, + + POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */ + POWER_DOMAIN_AUX_TBT2, + POWER_DOMAIN_AUX_TBT3, + POWER_DOMAIN_AUX_TBT4, + POWER_DOMAIN_AUX_TBT5, + POWER_DOMAIN_AUX_TBT6, + POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 39397748b4b0..eaebba5889d2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -37,6 +37,7 @@ #include <drm/drm_dp_mst_helper.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> @@ -219,6 +220,16 @@ struct intel_encoder { * encoders have been disabled and suspended. */ void (*shutdown)(struct intel_encoder *encoder); + /* + * Enable/disable the clock to the port. + */ + void (*enable_clock)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); + void (*disable_clock)(struct intel_encoder *encoder); + /* + * Returns whether the port clock is enabled or not. + */ + bool (*is_clock_enabled)(struct intel_encoder *encoder); enum hpd_pin hpd_pin; enum intel_display_power_domain power_domain; /* for communication with audio component; protected by av_mutex */ @@ -714,9 +725,9 @@ struct intel_pipe_wm { struct skl_wm_level { u16 min_ddb_alloc; - u16 plane_res_b; - u8 plane_res_l; - bool plane_en; + u16 blocks; + u8 lines; + bool enable; bool ignore_lines; bool can_sagv; }; @@ -725,7 +736,10 @@ struct skl_plane_wm { struct skl_wm_level wm[8]; struct skl_wm_level uv_wm[8]; struct skl_wm_level trans_wm; - struct skl_wm_level sagv_wm0; + struct { + struct skl_wm_level wm0; + struct skl_wm_level trans_wm; + } sagv; bool is_planar; }; @@ -830,7 +844,6 @@ struct intel_crtc_wm_state { }; enum intel_output_format { - INTEL_OUTPUT_FORMAT_INVALID, INTEL_OUTPUT_FORMAT_RGB, INTEL_OUTPUT_FORMAT_YCBCR420, INTEL_OUTPUT_FORMAT_YCBCR444, @@ -1160,6 +1173,13 @@ struct intel_crtc_state { u8 pipeline_full; u16 flipline, vmin, vmax; } vrr; + + /* Stream Splitter for eDP MSO */ + struct { + bool enable; + u8 link_count; + u8 pixel_overlap; + } splitter; }; enum intel_pipe_crc_source { @@ -1415,6 +1435,44 @@ struct intel_pps { struct edp_power_seq pps_delays; }; +struct intel_psr { + /* Mutex for PSR state of the transcoder */ + struct mutex lock; + +#define I915_PSR_DEBUG_MODE_MASK 0x0f +#define I915_PSR_DEBUG_DEFAULT 0x00 +#define I915_PSR_DEBUG_DISABLE 0x01 +#define I915_PSR_DEBUG_ENABLE 0x02 +#define I915_PSR_DEBUG_FORCE_PSR1 0x03 +#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4 +#define I915_PSR_DEBUG_IRQ 0x10 + + u32 debug; + bool sink_support; + bool source_support; + bool enabled; + enum pipe pipe; + enum transcoder transcoder; + bool active; + struct work_struct work; + unsigned int busy_frontbuffer_bits; + bool sink_psr2_support; + bool link_standby; + bool colorimetry_support; + bool psr2_enabled; + bool psr2_sel_fetch_enabled; + u8 sink_sync_latency; + ktime_t last_entry_attempt; + ktime_t last_exit; + bool sink_not_reliable; + bool irq_aux_error; + u16 su_x_granularity; + bool dc3co_enabled; + u32 dc3co_exit_delay; + struct delayed_work dc3co_work; + struct drm_dp_vsc_sdp vsc; +}; + struct intel_dp { i915_reg_t output_reg; u32 DP; @@ -1449,6 +1507,8 @@ struct intel_dp { int max_link_lane_count; /* Max rate for the current link */ int max_link_rate; + int mso_link_count; + int mso_pixel_overlap; /* sink or branch descriptor */ struct drm_dp_desc desc; struct drm_dp_aux aux; @@ -1517,6 +1577,8 @@ struct intel_dp { bool hobl_active; struct intel_dp_pcon_frl frl; + + struct intel_psr psr; }; enum lspcon_vendor { @@ -1753,6 +1815,17 @@ dp_to_i915(struct intel_dp *intel_dp) return to_i915(dp_to_dig_port(intel_dp)->base.base.dev); } +#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ + (intel_dp)->psr.source_support) + +static inline bool intel_encoder_can_psr(struct intel_encoder *encoder) +{ + if (!intel_encoder_is_dp(encoder)) + return false; + + return CAN_PSR(enc_to_intel_dp(encoder)); +} + static inline struct intel_digital_port * hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) { @@ -1894,4 +1967,39 @@ static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv, return dev_priv->fdi_pll_freq; } +static inline bool is_ccs_modifier(u64 modifier) +{ + return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; +} + +static inline bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) +{ + if (!is_ccs_modifier(fb->modifier)) + return false; + + return plane >= fb->format->num_planes / 2; +} + +static inline bool is_gen12_ccs_modifier(u64 modifier) +{ + return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; +} + +static inline bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) +{ + return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); +} + +static inline bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane) +{ + return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC && + plane == 2; +} + #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8c12d5375607..b6b5776f5a66 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -50,6 +50,7 @@ #include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dpll.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" #include "intel_hdcp.h" @@ -788,10 +789,10 @@ intel_dp_mode_valid(struct drm_connector *connector, return MODE_H_ILLEGAL; if (intel_dp_is_edp(intel_dp) && fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) + if (mode->hdisplay != fixed_mode->hdisplay) return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) + if (mode->vdisplay != fixed_mode->vdisplay) return MODE_PANEL; target_clock = fixed_mode->clock; @@ -1663,12 +1664,10 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, const struct drm_connector_state *conn_state, struct drm_dp_vsc_sdp *vsc) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - vsc->sdp_type = DP_SDP_VSC; - if (dev_priv->psr.psr2_enabled) { - if (dev_priv->psr.colorimetry_support && + if (intel_dp->psr.psr2_enabled) { + if (intel_dp->psr.colorimetry_support && intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { /* [PSR2, +Colorimetry] */ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, @@ -1724,6 +1723,7 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp, { struct intel_connector *intel_connector = intel_dp->attached_connector; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + int pixel_clock; if (pipe_config->vrr.enable) return; @@ -1742,10 +1742,18 @@ intel_dp_drrs_compute_config(struct intel_dp *intel_dp, return; pipe_config->has_drrs = true; - intel_link_compute_m_n(output_bpp, pipe_config->lane_count, - intel_connector->panel.downclock_mode->clock, + + pixel_clock = intel_connector->panel.downclock_mode->clock; + if (pipe_config->splitter.enable) + pixel_clock /= pipe_config->splitter.link_count; + + intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, pipe_config->port_clock, &pipe_config->dp_m2_n2, constant_n, pipe_config->fec_enable); + + /* FIXME: abstract this better */ + if (pipe_config->splitter.enable) + pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; } int @@ -1820,6 +1828,26 @@ intel_dp_compute_config(struct intel_encoder *encoder, output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_config->pipe_bpp); + if (intel_dp->mso_link_count) { + int n = intel_dp->mso_link_count; + int overlap = intel_dp->mso_pixel_overlap; + + pipe_config->splitter.enable = true; + pipe_config->splitter.link_count = n; + pipe_config->splitter.pixel_overlap = overlap; + + drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", + n, overlap); + + adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; + adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; + adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; + adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; + adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; + adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; + adjusted_mode->crtc_clock /= n; + } + intel_link_compute_m_n(output_bpp, pipe_config->lane_count, adjusted_mode->crtc_clock, @@ -1827,6 +1855,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, &pipe_config->dp_m_n, constant_n, pipe_config->fec_enable); + /* FIXME: abstract this better */ + if (pipe_config->splitter.enable) + pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; + if (!HAS_DDI(dev_priv)) intel_dp_set_clock(encoder, pipe_config); @@ -2359,7 +2391,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, return false; } - if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) { + if (CAN_PSR(intel_dp)) { drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); crtc_state->uapi.mode_changed = true; return false; @@ -2650,7 +2682,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp) if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { int ret, mode; - drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n"); + drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); @@ -3517,6 +3549,64 @@ static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) } } +static void intel_edp_mso_mode_fixup(struct intel_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); + int n = intel_dp->mso_link_count; + int overlap = intel_dp->mso_pixel_overlap; + + if (!mode || !n) + return; + + mode->hdisplay = (mode->hdisplay - overlap) * n; + mode->hsync_start = (mode->hsync_start - overlap) * n; + mode->hsync_end = (mode->hsync_end - overlap) * n; + mode->htotal = (mode->htotal - overlap) * n; + mode->clock *= n; + + drm_mode_set_name(mode); + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] using generated MSO mode: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(mode); +} + +static void intel_edp_mso_init(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 mso; + + if (intel_dp->edp_dpcd[0] < DP_EDP_14) + return; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { + drm_err(&i915->drm, "Failed to read MSO cap\n"); + return; + } + + /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ + mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; + if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { + drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); + mso = 0; + } + + if (mso) { + drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n", + mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso); + if (!HAS_MSO(i915)) { + drm_err(&i915->drm, "No source MSO support, disabling\n"); + mso = 0; + } + } + + intel_dp->mso_link_count = mso; + intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */ +} + static bool intel_edp_init_dpcd(struct intel_dp *intel_dp) { @@ -3600,6 +3690,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) */ intel_edp_init_source_oui(intel_dp, true); + intel_edp_mso_init(intel_dp); + return true; } @@ -5548,19 +5640,18 @@ static int intel_dp_get_modes(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); struct edid *edid; + int num_modes = 0; edid = intel_connector->detect_edid; if (edid) { - int ret = intel_connector_update_modes(connector, edid); + num_modes = intel_connector_update_modes(connector, edid); if (intel_vrr_is_capable(connector)) drm_connector_set_vrr_capable_property(connector, true); - if (ret) - return ret; } - /* if eDP has no EDID, fall back to fixed mode */ + /* Also add fixed mode, which may or may not be present in EDID */ if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && intel_connector->panel.fixed_mode) { struct drm_display_mode *mode; @@ -5569,10 +5660,13 @@ static int intel_dp_get_modes(struct drm_connector *connector) intel_connector->panel.fixed_mode); if (mode) { drm_mode_probed_add(connector, mode); - return 1; + num_modes++; } } + if (num_modes) + return num_modes; + if (!edid) { struct intel_dp *intel_dp = intel_attached_dp(intel_connector); struct drm_display_mode *mode; @@ -5582,11 +5676,11 @@ static int intel_dp_get_modes(struct drm_connector *connector) intel_dp->downstream_ports); if (mode) { drm_mode_probed_add(connector, mode); - return 1; + num_modes++; } } - return 0; + return num_modes; } static int @@ -6459,6 +6553,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (fixed_mode) downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); + /* multiply the mode clock and horizontal timings for MSO */ + intel_edp_mso_mode_fixup(intel_connector, fixed_mode); + intel_edp_mso_mode_fixup(intel_connector, downclock_mode); + /* fallback to VBT if available for eDP */ if (!fixed_mode) fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); @@ -6641,6 +6739,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp->frl.is_trained = false; intel_dp->frl.trained_rate_gbps = 0; + intel_psr_init(intel_dp); + return true; fail: diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 4dba5bb15af5..40c516e90193 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -698,30 +698,6 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector, return 0; } -static bool intel_dp_mst_get_qses_status(struct intel_digital_port *dig_port, - struct intel_connector *connector) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct drm_dp_query_stream_enc_status_ack_reply reply; - struct intel_dp *intel_dp = &dig_port->dp; - int ret; - - ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr, - connector->port, &reply); - if (ret) { - drm_dbg_kms(&i915->drm, - "[%s:%d] failed QSES ret=%d\n", - connector->base.name, connector->base.base.id, ret); - return false; - } - - drm_dbg_kms(&i915->drm, "[%s:%d] QSES stream auth: %d stream enc: %d\n", - connector->base.name, connector->base.base.id, - reply.auth_completed, reply.encryption_enabled); - - return reply.auth_completed && reply.encryption_enabled; -} - static int intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, bool enable) @@ -757,11 +733,6 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, return 0; } -/* - * DP v2.0 I.3.3 ignore the stream signature L' in QSES reply msg reply. - * I.3.5 MST source device may use a QSES msg to query downstream status - * for a particular stream. - */ static int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port, struct intel_connector *connector) @@ -781,7 +752,7 @@ int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port, return ret; } - return intel_dp_mst_get_qses_status(dig_port, connector) ? 0 : -EINVAL; + return 0; } static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 892d7db7d94f..19ba7c7cbaab 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -26,12 +26,13 @@ #include "intel_dp_link_training.h" static void -intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) +intel_dp_dump_link_status(struct drm_device *drm, + const u8 link_status[DP_LINK_STATUS_SIZE]) { - - DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x", - link_status[0], link_status[1], link_status[2], - link_status[3], link_status[4], link_status[5]); + drm_dbg_kms(drm, + "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", + link_status[0], link_status[1], link_status[2], + link_status[3], link_status[4], link_status[5]); } static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) @@ -642,7 +643,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, /* Make sure clock is still ok */ if (!drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { - intel_dp_dump_link_status(link_status); + intel_dp_dump_link_status(&i915->drm, link_status); drm_dbg_kms(&i915->drm, "Clock recovery check failed, cannot " "continue channel equalization\n"); @@ -669,7 +670,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, /* Try 5 times, else fail and try at lower BW */ if (tries == 5) { - intel_dp_dump_link_status(link_status); + intel_dp_dump_link_status(&i915->drm, link_status); drm_dbg_kms(&i915->drm, "Channel equalization failed 5 times\n"); } @@ -731,7 +732,7 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp, out: drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s", + "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n", intel_connector->base.base.id, intel_connector->base.name, ret ? "passed" : "failed", diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index b4621ed0127e..906860ad8eb8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -39,6 +39,7 @@ #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_hdcp.h" +#include "skl_scaler.h" static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, @@ -590,7 +591,7 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; - intel_ddi_get_config(&dig_port->base, pipe_config); + dig_port->base.get_config(&dig_port->base, pipe_config); } static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 7ba7f315aaee..166e9a3a8c09 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -3,11 +3,13 @@ * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> +#include "intel_crtc.h" #include "intel_display_types.h" #include "intel_display.h" #include "intel_dpll.h" #include "intel_lvds.h" #include "intel_panel.h" +#include "intel_sideband.h" struct intel_limit { struct { @@ -1361,3 +1363,510 @@ intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) else dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; } + +static bool i9xx_has_pps(struct drm_i915_private *dev_priv) +{ + if (IS_I830(dev_priv)) + return false; + + return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); +} + +void i9xx_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + i915_reg_t reg = DPLL(crtc->pipe); + u32 dpll = crtc_state->dpll_hw_state.dpll; + int i; + + assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); + + /* PLL is protected by panel, make sure we can write it */ + if (i9xx_has_pps(dev_priv)) + assert_panel_unlocked(dev_priv, crtc->pipe); + + /* + * Apparently we need to have VGA mode enabled prior to changing + * the P1/P2 dividers. Otherwise the DPLL will keep using the old + * dividers, even though the register value does change. + */ + intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); + intel_de_write(dev_priv, reg, dpll); + + /* Wait for the clocks to stabilize. */ + intel_de_posting_read(dev_priv, reg); + udelay(150); + + if (INTEL_GEN(dev_priv) >= 4) { + intel_de_write(dev_priv, DPLL_MD(crtc->pipe), + crtc_state->dpll_hw_state.dpll_md); + } else { + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + intel_de_write(dev_priv, reg, dpll); + } + + /* We do this three times for luck */ + for (i = 0; i < 3; i++) { + intel_de_write(dev_priv, reg, dpll); + intel_de_posting_read(dev_priv, reg); + udelay(150); /* wait for warmup */ + } +} + +static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 reg_val; + + /* + * PLLB opamp always calibrates to max value of 0x3f, force enable it + * and set it to a reasonable value instead. + */ + reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); + reg_val &= 0xffffff00; + reg_val |= 0x00000030; + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); + + reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); + reg_val &= 0x00ffffff; + reg_val |= 0x8c000000; + vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); + + reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); + reg_val &= 0xffffff00; + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); + + reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); + reg_val &= 0x00ffffff; + reg_val |= 0xb0000000; + vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); +} + +static void _vlv_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); + intel_de_posting_read(dev_priv, DPLL(pipe)); + udelay(150); + + if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) + drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); +} + +void vlv_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); + + /* PLL is protected by panel, make sure we can write it */ + assert_panel_unlocked(dev_priv, pipe); + + if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) + _vlv_enable_pll(crtc, pipe_config); + + intel_de_write(dev_priv, DPLL_MD(pipe), + pipe_config->dpll_hw_state.dpll_md); + intel_de_posting_read(dev_priv, DPLL_MD(pipe)); +} + + +static void _chv_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + enum dpio_channel port = vlv_pipe_to_channel(pipe); + u32 tmp; + + vlv_dpio_get(dev_priv); + + /* Enable back the 10bit clock to display controller */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); + tmp |= DPIO_DCLKP_EN; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); + + vlv_dpio_put(dev_priv); + + /* + * Need to wait > 100ns between dclkp clock enable bit and PLL enable. + */ + udelay(1); + + /* Enable PLL */ + intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); + + /* Check PLL is locked */ + if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) + drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); +} + +void chv_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); + + /* PLL is protected by panel, make sure we can write it */ + assert_panel_unlocked(dev_priv, pipe); + + if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) + _chv_enable_pll(crtc, pipe_config); + + if (pipe != PIPE_A) { + /* + * WaPixelRepeatModeFixForC0:chv + * + * DPLLCMD is AWOL. Use chicken bits to propagate + * the value from DPLLBMD to either pipe B or C. + */ + intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); + intel_de_write(dev_priv, DPLL_MD(PIPE_B), + pipe_config->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, CBR4_VLV, 0); + dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; + + /* + * DPLLB VGA mode also seems to cause problems. + * We should always have it disabled. + */ + drm_WARN_ON(&dev_priv->drm, + (intel_de_read(dev_priv, DPLL(PIPE_B)) & + DPLL_VGA_MODE_DIS) == 0); + } else { + intel_de_write(dev_priv, DPLL_MD(pipe), + pipe_config->dpll_hw_state.dpll_md); + intel_de_posting_read(dev_priv, DPLL_MD(pipe)); + } +} + +void vlv_prepare_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe = crtc->pipe; + u32 mdiv; + u32 bestn, bestm1, bestm2, bestp1, bestp2; + u32 coreclk, reg_val; + + /* Enable Refclk */ + intel_de_write(dev_priv, DPLL(pipe), + pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); + + /* No need to actually set up the DPLL with DSI */ + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) + return; + + vlv_dpio_get(dev_priv); + + bestn = pipe_config->dpll.n; + bestm1 = pipe_config->dpll.m1; + bestm2 = pipe_config->dpll.m2; + bestp1 = pipe_config->dpll.p1; + bestp2 = pipe_config->dpll.p2; + + /* See eDP HDMI DPIO driver vbios notes doc */ + + /* PLL B needs special handling */ + if (pipe == PIPE_B) + vlv_pllb_recal_opamp(dev_priv, pipe); + + /* Set up Tx target for periodic Rcomp update */ + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); + + /* Disable target IRef on PLL */ + reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); + reg_val &= 0x00ffffff; + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); + + /* Disable fast lock */ + vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); + + /* Set idtafcrecal before PLL is enabled */ + mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); + mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); + mdiv |= ((bestn << DPIO_N_SHIFT)); + mdiv |= (1 << DPIO_K_SHIFT); + + /* + * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, + * but we don't support that). + * Note: don't use the DAC post divider as it seems unstable. + */ + mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); + + mdiv |= DPIO_ENABLE_CALIBRATION; + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); + + /* Set HBR and RBR LPF coefficients */ + if (pipe_config->port_clock == 162000 || + intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || + intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), + 0x009f0003); + else + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), + 0x00d0000f); + + if (intel_crtc_has_dp_encoder(pipe_config)) { + /* Use SSC source */ + if (pipe == PIPE_A) + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + 0x0df40000); + else + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + 0x0df70000); + } else { /* HDMI or VGA */ + /* Use bend source */ + if (pipe == PIPE_A) + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + 0x0df70000); + else + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + 0x0df40000); + } + + coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); + coreclk = (coreclk & 0x0000ff00) | 0x01c00000; + if (intel_crtc_has_dp_encoder(pipe_config)) + coreclk |= 0x01000000; + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); + + vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); + + vlv_dpio_put(dev_priv); +} + +void chv_prepare_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe = crtc->pipe; + enum dpio_channel port = vlv_pipe_to_channel(pipe); + u32 loopfilter, tribuf_calcntr; + u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; + u32 dpio_val; + int vco; + + /* Enable Refclk and SSC */ + intel_de_write(dev_priv, DPLL(pipe), + pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); + + /* No need to actually set up the DPLL with DSI */ + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) + return; + + bestn = pipe_config->dpll.n; + bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; + bestm1 = pipe_config->dpll.m1; + bestm2 = pipe_config->dpll.m2 >> 22; + bestp1 = pipe_config->dpll.p1; + bestp2 = pipe_config->dpll.p2; + vco = pipe_config->dpll.vco; + dpio_val = 0; + loopfilter = 0; + + vlv_dpio_get(dev_priv); + + /* p1 and p2 divider */ + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), + 5 << DPIO_CHV_S1_DIV_SHIFT | + bestp1 << DPIO_CHV_P1_DIV_SHIFT | + bestp2 << DPIO_CHV_P2_DIV_SHIFT | + 1 << DPIO_CHV_K_DIV_SHIFT); + + /* Feedback post-divider - m2 */ + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); + + /* Feedback refclk divider - n and m1 */ + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), + DPIO_CHV_M1_DIV_BY_2 | + 1 << DPIO_CHV_N_DIV_SHIFT); + + /* M2 fraction division */ + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); + + /* M2 fraction division enable */ + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); + dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); + dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); + if (bestm2_frac) + dpio_val |= DPIO_CHV_FRAC_DIV_EN; + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); + + /* Program digital lock detect threshold */ + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); + dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | + DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); + dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); + if (!bestm2_frac) + dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); + + /* Loop filter */ + if (vco == 5400000) { + loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x9; + } else if (vco <= 6200000) { + loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x9; + } else if (vco <= 6480000) { + loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x8; + } else { + /* Not supported. Apply the same limits as in the max case */ + loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0; + } + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); + + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); + dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; + dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); + + /* AFC Recal */ + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), + vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | + DPIO_AFC_RECAL); + + vlv_dpio_put(dev_priv); +} + +/** + * vlv_force_pll_on - forcibly enable just the PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to enable + * @dpll: PLL configuration + * + * Enable the PLL for @pipe using the supplied @dpll config. To be used + * in cases where we need the PLL enabled even when @pipe is not going to + * be enabled. + */ +int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, + const struct dpll *dpll) +{ + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc_state *pipe_config; + + pipe_config = intel_crtc_state_alloc(crtc); + if (!pipe_config) + return -ENOMEM; + + pipe_config->cpu_transcoder = (enum transcoder)pipe; + pipe_config->pixel_multiplier = 1; + pipe_config->dpll = *dpll; + + if (IS_CHERRYVIEW(dev_priv)) { + chv_compute_dpll(crtc, pipe_config); + chv_prepare_pll(crtc, pipe_config); + chv_enable_pll(crtc, pipe_config); + } else { + vlv_compute_dpll(crtc, pipe_config); + vlv_prepare_pll(crtc, pipe_config); + vlv_enable_pll(crtc, pipe_config); + } + + kfree(pipe_config); + + return 0; +} + +void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + u32 val; + + /* Make sure the pipe isn't still relying on us */ + assert_pipe_disabled(dev_priv, (enum transcoder)pipe); + + val = DPLL_INTEGRATED_REF_CLK_VLV | + DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (pipe != PIPE_A) + val |= DPLL_INTEGRATED_CRI_CLK_VLV; + + intel_de_write(dev_priv, DPLL(pipe), val); + intel_de_posting_read(dev_priv, DPLL(pipe)); +} + +void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + enum dpio_channel port = vlv_pipe_to_channel(pipe); + u32 val; + + /* Make sure the pipe isn't still relying on us */ + assert_pipe_disabled(dev_priv, (enum transcoder)pipe); + + val = DPLL_SSC_REF_CLK_CHV | + DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (pipe != PIPE_A) + val |= DPLL_INTEGRATED_CRI_CLK_VLV; + + intel_de_write(dev_priv, DPLL(pipe), val); + intel_de_posting_read(dev_priv, DPLL(pipe)); + + vlv_dpio_get(dev_priv); + + /* Disable 10bit clock to display controller */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); + val &= ~DPIO_DCLKP_EN; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); + + vlv_dpio_put(dev_priv); +} + +void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + /* Don't disable pipe or pipe PLLs if needed */ + if (IS_I830(dev_priv)) + return; + + /* Make sure the pipe isn't still relying on us */ + assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); + + intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); + intel_de_posting_read(dev_priv, DPLL(pipe)); +} + + +/** + * vlv_force_pll_off - forcibly disable just the PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to disable + * + * Disable the PLL for @pipe. To be used in cases where we need + * the PLL enabled even when @pipe is not going to be enabled. + */ +void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + if (IS_CHERRYVIEW(dev_priv)) + chv_disable_pll(dev_priv, pipe); + else + vlv_disable_pll(dev_priv, pipe); +} diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index caf4615092e1..7ff4b0d29ed1 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -10,6 +10,7 @@ struct dpll; struct drm_i915_private; struct intel_crtc; struct intel_crtc_state; +enum pipe; void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); int vlv_calc_dpll_params(int refclk, struct dpll *clock); @@ -20,4 +21,21 @@ void vlv_compute_dpll(struct intel_crtc *crtc, void chv_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); +int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, + const struct dpll *dpll); +void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); +void i9xx_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state); +void vlv_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config); +void chv_enable_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config); +void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); +void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); +void i9xx_disable_pll(const struct intel_crtc_state *crtc_state); +void vlv_prepare_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config); +void chv_prepare_pll(struct intel_crtc *crtc, + const struct intel_crtc_state *pipe_config); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index f6ad257a260e..22ee8e13b518 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -176,7 +176,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) return; mutex_lock(&dev_priv->dpll.lock); - drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask); + drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask); if (!pll->active_mask) { drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name); drm_WARN_ON(&dev_priv->drm, pll->on); @@ -198,7 +198,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - unsigned int crtc_mask = drm_crtc_mask(&crtc->base); + unsigned int pipe_mask = BIT(crtc->pipe); unsigned int old_mask; if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) @@ -207,16 +207,16 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) mutex_lock(&dev_priv->dpll.lock); old_mask = pll->active_mask; - if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) || - drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask)) + if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) || + drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask)) goto out; - pll->active_mask |= crtc_mask; + pll->active_mask |= pipe_mask; drm_dbg_kms(&dev_priv->drm, - "enable %s (active %x, on? %d) for crtc %d\n", + "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n", pll->info->name, pll->active_mask, pll->on, - crtc->base.base.id); + crtc->base.base.id, crtc->base.name); if (old_mask) { drm_WARN_ON(&dev_priv->drm, !pll->on); @@ -244,7 +244,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - unsigned int crtc_mask = drm_crtc_mask(&crtc->base); + unsigned int pipe_mask = BIT(crtc->pipe); /* PCH only available on ILK+ */ if (INTEL_GEN(dev_priv) < 5) @@ -254,18 +254,20 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) return; mutex_lock(&dev_priv->dpll.lock); - if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask))) + if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask), + "%s not used by [CRTC:%d:%s]\n", pll->info->name, + crtc->base.base.id, crtc->base.name)) goto out; drm_dbg_kms(&dev_priv->drm, - "disable %s (active %x, on? %d) for crtc %d\n", + "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n", pll->info->name, pll->active_mask, pll->on, - crtc->base.base.id); + crtc->base.base.id, crtc->base.name); assert_shared_dpll_enabled(dev_priv, pll); drm_WARN_ON(&dev_priv->drm, !pll->on); - pll->active_mask &= ~crtc_mask; + pll->active_mask &= ~pipe_mask; if (pll->active_mask) goto out; @@ -296,7 +298,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state, pll = &dev_priv->dpll.shared_dplls[i]; /* Only want to check enabled timings first */ - if (shared_dpll[i].crtc_mask == 0) { + if (shared_dpll[i].pipe_mask == 0) { if (!unused_pll) unused_pll = pll; continue; @@ -306,10 +308,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state, &shared_dpll[i].hw_state, sizeof(*pll_state)) == 0) { drm_dbg_kms(&dev_priv->drm, - "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", + "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n", crtc->base.base.id, crtc->base.name, pll->info->name, - shared_dpll[i].crtc_mask, + shared_dpll[i].pipe_mask, pll->active_mask); return pll; } @@ -338,13 +340,13 @@ intel_reference_shared_dpll(struct intel_atomic_state *state, shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - if (shared_dpll[id].crtc_mask == 0) + if (shared_dpll[id].pipe_mask == 0) shared_dpll[id].hw_state = *pll_state; drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name, pipe_name(crtc->pipe)); - shared_dpll[id].crtc_mask |= 1 << crtc->pipe; + shared_dpll[id].pipe_mask |= BIT(crtc->pipe); } static void intel_unreference_shared_dpll(struct intel_atomic_state *state, @@ -354,7 +356,7 @@ static void intel_unreference_shared_dpll(struct intel_atomic_state *state, struct intel_shared_dpll_state *shared_dpll; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe); + shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe); } static void intel_put_dpll(struct intel_atomic_state *state, @@ -3559,7 +3561,13 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); - if (IS_DG1(dev_priv)) { + if (IS_ALDERLAKE_S(dev_priv)) { + dpll_mask = + BIT(DPLL_ID_DG1_DPLL3) | + BIT(DPLL_ID_DG1_DPLL2) | + BIT(DPLL_ID_ICL_DPLL1) | + BIT(DPLL_ID_ICL_DPLL0); + } else if (IS_DG1(dev_priv)) { if (port == PORT_D || port == PORT_E) { dpll_mask = BIT(DPLL_ID_DG1_DPLL2) | @@ -3865,7 +3873,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!(val & PLL_ENABLE)) goto out; - if (IS_DG1(dev_priv)) { + if (IS_ALDERLAKE_S(dev_priv)) { + hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id)); + } else if (IS_DG1(dev_priv)) { hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id)); } else if (IS_ROCKETLAKE(dev_priv)) { @@ -3921,7 +3932,10 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; i915_reg_t cfgcr0_reg, cfgcr1_reg; - if (IS_DG1(dev_priv)) { + if (IS_ALDERLAKE_S(dev_priv)) { + cfgcr0_reg = ADLS_DPLL_CFGCR0(id); + cfgcr1_reg = ADLS_DPLL_CFGCR1(id); + } else if (IS_DG1(dev_priv)) { cfgcr0_reg = DG1_DPLL_CFGCR0(id); cfgcr1_reg = DG1_DPLL_CFGCR1(id); } else if (IS_ROCKETLAKE(dev_priv)) { @@ -4384,6 +4398,22 @@ static const struct intel_dpll_mgr dg1_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct dpll_info adls_plls[] = { + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 }, + { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 }, + { }, +}; + +static const struct intel_dpll_mgr adls_pll_mgr = { + .dpll_info = adls_plls, + .get_dplls = icl_get_dplls, + .put_dplls = icl_put_dplls, + .update_ref_clks = icl_update_dpll_ref_clks, + .dump_hw_state = icl_dump_hw_state, +}; + /** * intel_shared_dpll_init - Initialize shared DPLLs * @dev: drm device @@ -4397,7 +4427,9 @@ void intel_shared_dpll_init(struct drm_device *dev) const struct dpll_info *dpll_info; int i; - if (IS_DG1(dev_priv)) + if (IS_ALDERLAKE_S(dev_priv)) + dpll_mgr = &adls_pll_mgr; + else if (IS_DG1(dev_priv)) dpll_mgr = &dg1_pll_mgr; else if (IS_ROCKETLAKE(dev_priv)) dpll_mgr = &rkl_pll_mgr; @@ -4567,27 +4599,30 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, POWER_DOMAIN_DPLL_DC_OFF); } - pll->state.crtc_mask = 0; + pll->state.pipe_mask = 0; for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); if (crtc_state->hw.active && crtc_state->shared_dpll == pll) - pll->state.crtc_mask |= 1 << crtc->pipe; + pll->state.pipe_mask |= BIT(crtc->pipe); } - pll->active_mask = pll->state.crtc_mask; + pll->active_mask = pll->state.pipe_mask; drm_dbg_kms(&i915->drm, - "%s hw state readout: crtc_mask 0x%08x, on %i\n", - pll->info->name, pll->state.crtc_mask, pll->on); + "%s hw state readout: pipe_mask 0x%x, on %i\n", + pll->info->name, pll->state.pipe_mask, pll->on); } -void intel_dpll_readout_hw_state(struct drm_i915_private *i915) +void intel_dpll_update_ref_clks(struct drm_i915_private *i915) { - int i; - if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks) i915->dpll.mgr->update_ref_clks(i915); +} + +void intel_dpll_readout_hw_state(struct drm_i915_private *i915) +{ + int i; for (i = 0; i < i915->dpll.num_shared_dpll; i++) readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 2eb7618ef957..7fd031a70cfd 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -241,9 +241,9 @@ struct intel_dpll_hw_state { */ struct intel_shared_dpll_state { /** - * @crtc_mask: mask of CRTC using this DPLL, active or not + * @pipe_mask: mask of pipes using this DPLL, active or not */ - unsigned crtc_mask; + u8 pipe_mask; /** * @hw_state: hardware configuration for the DPLL stored in @@ -351,9 +351,9 @@ struct intel_shared_dpll { struct intel_shared_dpll_state state; /** - * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL + * @active_mask: mask of active pipes (i.e. DPMS on) using this DPLL */ - unsigned active_mask; + u8 active_mask; /** * @on: is the PLL actually active? Disabled during modeset @@ -410,6 +410,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); void intel_shared_dpll_init(struct drm_device *dev); +void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv); void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv); void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index eed037ec0b29..e349caef1926 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -425,7 +425,7 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, const u16 slave_addr) { struct drm_device *drm_dev = intel_dsi->base.base.dev; - struct device *dev = &drm_dev->pdev->dev; + struct device *dev = drm_dev->dev; struct acpi_device *acpi_dev; struct list_head resource_list; struct i2c_adapter_lookup lookup; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 84f853f113b9..07db8e83f98e 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -167,7 +167,7 @@ static int intelfb_create(struct drm_fb_helper *helper, struct intel_framebuffer *intel_fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; const struct i915_ggtt_view view = { .type = I915_GGTT_VIEW_NORMAL, diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index b2eb96ae10a2..60b29110099a 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -3,6 +3,8 @@ * Copyright © 2020 Intel Corporation */ #include "intel_atomic.h" +#include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" #include "intel_display_types.h" #include "intel_fdi.h" @@ -550,6 +552,142 @@ train_done: drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); } +/* Starting with Haswell, different DDI ports can work in FDI mode for + * connection to the PCH-located connectors. For this, it is necessary to train + * both the DDI port and PCH receiver for the desired DDI buffer settings. + * + * The recommended port to work in FDI mode is DDI E, which we use here. Also, + * please note that when FDI mode is active on DDI E, it shares 2 lines with + * DDI A (which is used for eDP) + */ +void hsw_fdi_link_train(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 temp, i, rx_ctl_val; + int n_entries; + + intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries); + + intel_prepare_dp_ddi_buffers(encoder, crtc_state); + + /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the + * mode set "sequence for CRT port" document: + * - TP1 to TP2 time with the default value + * - FDI delay to 90h + * + * WaFDIAutoLinkSetTimingOverrride:hsw + */ + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + + /* Enable the PCH Receiver FDI PLL */ + rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | + FDI_RX_PLL_ENABLE | + FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); + intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); + udelay(220); + + /* Switch from Rawclk to PCDclk */ + rx_ctl_val |= FDI_PCDCLK; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); + + /* Configure Port Clock Select */ + drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL); + intel_ddi_enable_clock(encoder, crtc_state); + + /* Start the training iterating through available voltages and emphasis, + * testing each value twice. */ + for (i = 0; i < n_entries * 2; i++) { + /* Configure DP_TP_CTL with auto-training */ + intel_de_write(dev_priv, DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_LINK_TRAIN_PAT1 | + DP_TP_CTL_ENABLE); + + /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. + * DDI E does not support port reversal, the functionality is + * achieved on the PCH side in FDI_RX_CTL, so no need to set the + * port reversal bit */ + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), + DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2)); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); + + udelay(600); + + /* Program PCH FDI Receiver TU */ + intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); + + /* Enable PCH FDI Receiver with auto-training */ + rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); + intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); + + /* Wait for FDI receiver lane calibration */ + udelay(30); + + /* Unset FDI_RX_MISC pwrdn lanes */ + temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); + temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); + + /* Wait for FDI auto training time */ + udelay(5); + + temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E)); + if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { + drm_dbg_kms(&dev_priv->drm, + "FDI link training done on step %d\n", i); + break; + } + + /* + * Leave things enabled even if we failed to train FDI. + * Results in less fireworks from the state checker. + */ + if (i == n_entries * 2 - 1) { + drm_err(&dev_priv->drm, "FDI link training failed!\n"); + break; + } + + rx_ctl_val &= ~FDI_RX_ENABLE; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); + intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); + + temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); + temp &= ~DDI_BUF_CTL_ENABLE; + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); + + /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ + temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E)); + temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + temp |= DP_TP_CTL_LINK_TRAIN_PAT1; + intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp); + intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); + + intel_wait_ddi_buf_idle(dev_priv, PORT_E); + + /* Reset FDI_RX_MISC pwrdn lanes */ + temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); + temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); + } + + /* Enable normal pixel sending for FDI */ + intel_de_write(dev_priv, DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); +} + void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h index a9cd21663eb8..af01d2c173a8 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.h +++ b/drivers/gpu/drm/i915/display/intel_fdi.h @@ -9,6 +9,7 @@ struct drm_i915_private; struct intel_crtc; struct intel_crtc_state; +struct intel_encoder; #define I915_DISPLAY_CONFIG_RETRY 1 int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, @@ -18,5 +19,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc); void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc); void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state); void intel_fdi_init_hook(struct drm_i915_private *dev_priv); +void hsw_fdi_link_train(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); #endif diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 7b38eee9980f..6fc6965b6133 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -224,6 +224,8 @@ static void frontbuffer_release(struct kref *ref) struct drm_i915_gem_object *obj = front->obj; struct i915_vma *vma; + drm_WARN_ON(obj->base.dev, atomic_read(&front->bits)); + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { i915_vma_clear_scanout(vma); diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index b0d71bbbf2ad..0c952e1d720e 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -840,7 +840,7 @@ static const struct i2c_lock_operations gmbus_lock_ops = { */ int intel_gmbus_setup(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_gmbus *bus; unsigned int pin; int ret; diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index d5f4b40a8460..7f384f259fc8 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2218,7 +2218,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, has_hdmi_sink)) return MODE_CLOCK_HIGH; - /* BXT DPLL can't generate 223-240 MHz */ + /* GLK DPLL can't generate 446-480 MHz */ + if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000) + return MODE_CLOCK_RANGE; + + /* BXT/GLK DPLL can't generate 223-240 MHz */ if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000) return MODE_CLOCK_RANGE; @@ -2229,6 +2233,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, return MODE_OK; } +static int intel_hdmi_port_clock(int clock, int bpc) +{ + /* + * Need to adjust the port link by: + * 1.5x for 12bpc + * 1.25x for 10bpc + */ + return clock * bpc / 8; +} + static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) @@ -2260,17 +2274,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector, clock /= 2; /* check if we can do 8bpc */ - status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink); + status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8), + true, has_hdmi_sink); if (has_hdmi_sink) { /* if we can't do 8bpc we may still be able to do 12bpc */ if (status != MODE_OK && !HAS_GMCH(dev_priv)) - status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, + status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12), true, has_hdmi_sink); /* if we can't do 8,12bpc we may still be able to do 10bpc */ if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11) - status = hdmi_port_clock_valid(hdmi, clock * 5 / 4, + status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10), true, has_hdmi_sink); } if (status != MODE_OK) @@ -2378,16 +2393,6 @@ intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state, return intel_pch_panel_fitting(crtc_state, conn_state); } -static int intel_hdmi_port_clock(int clock, int bpc) -{ - /* - * Need to adjust the port link by: - * 1.5x for 12bpc - * 1.25x for 10bpc - */ - return clock * bpc / 8; -} - static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int clock) @@ -3133,11 +3138,45 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) return GMBUS_PIN_1_BXT + phy; } +static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port) +{ + enum phy phy = intel_port_to_phy(i915, port); + + drm_WARN_ON(&i915->drm, port == PORT_A); + + /* + * Pin mapping for GEN9 BC depends on which PCH is present. With TGP, + * final two outputs use type-c pins, even though they're actually + * combo outputs. With CMP, the traditional DDI A-D pins are used for + * all outputs. + */ + if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C) + return GMBUS_PIN_9_TC1_ICP + phy - PHY_C; + + return GMBUS_PIN_1_BXT + phy; +} + static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { return intel_port_to_phy(dev_priv, port) + 1; } +static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +{ + enum phy phy = intel_port_to_phy(dev_priv, port); + + WARN_ON(port == PORT_B || port == PORT_C); + + /* + * Pin mapping for ADL-S requires TC pins for all combo phy outputs + * except first combo output. + */ + if (phy == PHY_A) + return GMBUS_PIN_1_BXT; + + return GMBUS_PIN_9_TC1_ICP + phy - PHY_B; +} + static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { @@ -3175,10 +3214,14 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) return ddc_pin; } - if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + if (HAS_PCH_ADP(dev_priv)) + ddc_pin = adls_port_to_ddc_pin(dev_priv, port); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) ddc_pin = dg1_port_to_ddc_pin(dev_priv, port); else if (IS_ROCKETLAKE(dev_priv)) ddc_pin = rkl_port_to_ddc_pin(dev_priv, port); + else if (IS_GEN9_BC(dev_priv) && HAS_PCH_TGP(dev_priv)) + ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_MCC(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 1c939f9c9bc9..7f3c638c8950 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -80,6 +80,7 @@ static struct platform_device * lpe_audio_platdev_create(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct platform_device_info pinfo = {}; struct resource *rsc; struct platform_device *platdev; @@ -99,9 +100,9 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) rsc[0].flags = IORESOURCE_IRQ; rsc[0].name = "hdmi-lpe-audio-irq"; - rsc[1].start = pci_resource_start(dev->pdev, 0) + + rsc[1].start = pci_resource_start(pdev, 0) + I915_HDMI_LPE_AUDIO_BASE; - rsc[1].end = pci_resource_start(dev->pdev, 0) + + rsc[1].end = pci_resource_start(pdev, 0) + I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1; rsc[1].flags = IORESOURCE_MEM; rsc[1].name = "hdmi-lpe-audio-mmio"; diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 4f77cf849171..dfd724e506b5 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -247,7 +247,7 @@ static int swsci(struct drm_i915_private *dev_priv, u32 function, u32 parm, u32 *parm_out) { struct opregion_swsci *swsci = dev_priv->opregion.swsci; - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 main_function, sub_function, scic; u16 swsci_val; u32 dslp; @@ -807,7 +807,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) if (!name || !*name) return -ENOENT; - ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev); + ret = request_firmware(&fw, name, dev_priv->drm.dev); if (ret) { drm_err(&dev_priv->drm, "Requesting VBT firmware \"%s\" failed (%d)\n", @@ -840,7 +840,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) int intel_opregion_setup(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 8edb9b2cdbeb..ef8f44f5e751 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -183,6 +183,7 @@ struct intel_overlay { struct intel_crtc *crtc; struct i915_vma *vma; struct i915_vma *old_vma; + struct intel_frontbuffer *frontbuffer; bool active; bool pfit_active; u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ @@ -202,7 +203,7 @@ struct intel_overlay { static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, bool enable) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u8 val; /* WA_OVERLAY_CLKGATE:alm */ @@ -283,21 +284,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, struct i915_vma *vma) { enum pipe pipe = overlay->crtc->pipe; - struct intel_frontbuffer *from = NULL, *to = NULL; + struct intel_frontbuffer *frontbuffer = NULL; drm_WARN_ON(&overlay->i915->drm, overlay->old_vma); - if (overlay->vma) - from = intel_frontbuffer_get(overlay->vma->obj); if (vma) - to = intel_frontbuffer_get(vma->obj); + frontbuffer = intel_frontbuffer_get(vma->obj); - intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe)); + intel_frontbuffer_track(overlay->frontbuffer, frontbuffer, + INTEL_FRONTBUFFER_OVERLAY(pipe)); - if (to) - intel_frontbuffer_put(to); - if (from) - intel_frontbuffer_put(from); + if (overlay->frontbuffer) + intel_frontbuffer_put(overlay->frontbuffer); + overlay->frontbuffer = frontbuffer; intel_frontbuffer_flip_prepare(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(pipe)); diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 5fdf52643150..4653b5ef382f 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -596,7 +596,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse if (panel->backlight.combination_mode) { u8 lbpc; - pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc); + pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc); val *= lbpc; } @@ -664,7 +664,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1; level /= lbpc; - pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc); + pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc); } if (IS_GEN(dev_priv, 4)) { diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index c4867a8020a5..f20ba71f4307 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dp.h" +#include "intel_dpll.h" #include "intel_pps.h" static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 850cb7f5b332..cd434285e3b7 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -32,6 +32,7 @@ #include "intel_hdmi.h" #include "intel_psr.h" #include "intel_sprite.h" +#include "skl_universal_plane.h" /** * DOC: Panel Self Refresh (PSR/SRD) @@ -80,9 +81,11 @@ * use page flips. */ -static bool psr_global_enabled(struct drm_i915_private *i915) +static bool psr_global_enabled(struct intel_dp *intel_dp) { - switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: return i915->params.enable_psr; case I915_PSR_DEBUG_DISABLE: @@ -92,9 +95,9 @@ static bool psr_global_enabled(struct drm_i915_private *i915) } } -static bool psr2_global_enabled(struct drm_i915_private *dev_priv) +static bool psr2_global_enabled(struct intel_dp *intel_dp) { - switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { + switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; @@ -103,11 +106,12 @@ static bool psr2_global_enabled(struct drm_i915_private *dev_priv) } } -static void psr_irq_control(struct drm_i915_private *dev_priv) +static void psr_irq_control(struct intel_dp *intel_dp) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder trans_shift; - u32 mask, val; i915_reg_t imr_reg; + u32 mask, val; /* * gen12+ has registers relative to transcoder and one per transcoder @@ -116,14 +120,14 @@ static void psr_irq_control(struct drm_i915_private *dev_priv) */ if (INTEL_GEN(dev_priv) >= 12) { trans_shift = 0; - imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); + imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); } else { - trans_shift = dev_priv->psr.transcoder; + trans_shift = intel_dp->psr.transcoder; imr_reg = EDP_PSR_IMR; } mask = EDP_PSR_ERROR(trans_shift); - if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) + if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) mask |= EDP_PSR_POST_EXIT(trans_shift) | EDP_PSR_PRE_ENTRY(trans_shift); @@ -172,30 +176,31 @@ static void psr_event_print(struct drm_i915_private *i915, drm_dbg_kms(&i915->drm, "\tPSR disabled\n"); } -void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) +void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) { - enum transcoder cpu_transcoder = dev_priv->psr.transcoder; + enum transcoder cpu_transcoder = intel_dp->psr.transcoder; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + ktime_t time_ns = ktime_get(); enum transcoder trans_shift; i915_reg_t imr_reg; - ktime_t time_ns = ktime_get(); if (INTEL_GEN(dev_priv) >= 12) { trans_shift = 0; - imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); + imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); } else { - trans_shift = dev_priv->psr.transcoder; + trans_shift = intel_dp->psr.transcoder; imr_reg = EDP_PSR_IMR; } if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { - dev_priv->psr.last_entry_attempt = time_ns; + intel_dp->psr.last_entry_attempt = time_ns; drm_dbg_kms(&dev_priv->drm, "[transcoder %s] PSR entry attempt in 2 vblanks\n", transcoder_name(cpu_transcoder)); } if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { - dev_priv->psr.last_exit = time_ns; + intel_dp->psr.last_exit = time_ns; drm_dbg_kms(&dev_priv->drm, "[transcoder %s] PSR exit completed\n", transcoder_name(cpu_transcoder)); @@ -203,7 +208,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) if (INTEL_GEN(dev_priv) >= 9) { u32 val = intel_de_read(dev_priv, PSR_EVENT(cpu_transcoder)); - bool psr2_enabled = dev_priv->psr.psr2_enabled; + bool psr2_enabled = intel_dp->psr.psr2_enabled; intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder), val); @@ -217,7 +222,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", transcoder_name(cpu_transcoder)); - dev_priv->psr.irq_aux_error = true; + intel_dp->psr.irq_aux_error = true; /* * If this interruption is not masked it will keep @@ -231,7 +236,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) val |= EDP_PSR_ERROR(trans_shift); intel_de_write(dev_priv, imr_reg, val); - schedule_work(&dev_priv->psr.work); + schedule_work(&intel_dp->psr.work); } } @@ -292,12 +297,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); - if (dev_priv->psr.dp) { - drm_warn(&dev_priv->drm, - "More than one eDP panel found, PSR support should be extended\n"); - return; - } - drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, sizeof(intel_dp->psr_dpcd)); @@ -318,12 +317,10 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) return; } - dev_priv->psr.sink_support = true; - dev_priv->psr.sink_sync_latency = + intel_dp->psr.sink_support = true; + intel_dp->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); - dev_priv->psr.dp = intel_dp; - if (INTEL_GEN(dev_priv) >= 9 && (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { bool y_req = intel_dp->psr_dpcd[1] & @@ -341,14 +338,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) * Y-coordinate requirement panels we would need to enable * GTC first. */ - dev_priv->psr.sink_psr2_support = y_req && alpm; + intel_dp->psr.sink_psr2_support = y_req && alpm; drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", - dev_priv->psr.sink_psr2_support ? "" : "not "); + intel_dp->psr.sink_psr2_support ? "" : "not "); - if (dev_priv->psr.sink_psr2_support) { - dev_priv->psr.colorimetry_support = + if (intel_dp->psr.sink_psr2_support) { + intel_dp->psr.colorimetry_support = intel_dp_get_colorimetry_status(intel_dp); - dev_priv->psr.su_x_granularity = + intel_dp->psr.su_x_granularity = intel_dp_get_su_x_granulartiy(intel_dp); } } @@ -374,7 +371,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) BUILD_BUG_ON(sizeof(aux_msg) > 20); for (i = 0; i < sizeof(aux_msg); i += 4) intel_de_write(dev_priv, - EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), + EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2), intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); @@ -385,7 +382,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) /* Select only valid bits for SRD_AUX_CTL */ aux_ctl &= psr_aux_mask; - intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), + intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder), aux_ctl); } @@ -395,14 +392,14 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) u8 dpcd_val = DP_PSR_ENABLE; /* Enable ALPM at sink for psr2 */ - if (dev_priv->psr.psr2_enabled) { + if (intel_dp->psr.psr2_enabled) { drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS; } else { - if (dev_priv->psr.link_standby) + if (intel_dp->psr.link_standby) dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; if (INTEL_GEN(dev_priv) >= 8) @@ -465,7 +462,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) * off-by-one issue that HW has in some cases. */ idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); + idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1); if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) idle_frames = 0xf; @@ -485,7 +482,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (IS_HASWELL(dev_priv)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; - if (dev_priv->psr.link_standby) + if (intel_dp->psr.link_standby) val |= EDP_PSR_LINK_STANDBY; val |= intel_psr1_get_tp_time(intel_dp); @@ -493,9 +490,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; - val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & + val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); - intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val); } static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) @@ -530,7 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) val |= EDP_Y_COORDINATE_ENABLE; - val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); + val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1); val |= intel_psr2_get_tp_time(intel_dp); if (INTEL_GEN(dev_priv) >= 12) { @@ -549,29 +546,29 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FAST_WAKE(7); } - if (dev_priv->psr.psr2_sel_fetch_enabled) { + if (intel_dp->psr.psr2_sel_fetch_enabled) { /* WA 1408330847 */ - if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) || IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, DIS_RAM_BYPASS_PSR2_MAN_TRACK); intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), + PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), PSR2_MAN_TRK_CTL_ENABLE); } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0); + PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0); } /* * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0); + intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0); - intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); } static bool @@ -594,55 +591,58 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) drm_mode_vrefresh(&cstate->hw.adjusted_mode)); } -static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, +static void psr2_program_idle_frames(struct intel_dp *intel_dp, u32 idle_frames) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val; idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; - val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)); + val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)); val &= ~EDP_PSR2_IDLE_FRAME_MASK; val |= idle_frames; - intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); } -static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) +static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) { - psr2_program_idle_frames(dev_priv, 0); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + psr2_program_idle_frames(intel_dp, 0); intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); } -static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) +static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp) { - struct intel_dp *intel_dp = dev_priv->psr.dp; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); - psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp)); + psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp)); } static void tgl_dc3co_disable_work(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), psr.dc3co_work.work); + struct intel_dp *intel_dp = + container_of(work, typeof(*intel_dp), psr.dc3co_work.work); - mutex_lock(&dev_priv->psr.lock); + mutex_lock(&intel_dp->psr.lock); /* If delayed work is pending, it is not idle */ - if (delayed_work_pending(&dev_priv->psr.dc3co_work)) + if (delayed_work_pending(&intel_dp->psr.dc3co_work)) goto unlock; - tgl_psr2_disable_dc3co(dev_priv); + tgl_psr2_disable_dc3co(intel_dp); unlock: - mutex_unlock(&dev_priv->psr.lock); + mutex_unlock(&intel_dp->psr.lock); } -static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) +static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp) { - if (!dev_priv->psr.dc3co_enabled) + if (!intel_dp->psr.dc3co_enabled) return; - cancel_delayed_work(&dev_priv->psr.dc3co_work); + cancel_delayed_work(&intel_dp->psr.dc3co_work); /* Before PSR2 exit disallow dc3co*/ - tgl_psr2_disable_dc3co(dev_priv); + tgl_psr2_disable_dc3co(intel_dp); } static void @@ -654,6 +654,13 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 exit_scanlines; + /* + * DMC's DC3CO exit mechanism has an issue with Selective Fecth + * TODO: when the issue is addressed, this restriction should be removed. + */ + if (crtc_state->enable_psr2_sel_fetch) + return; + if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO)) return; @@ -684,7 +691,8 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, struct intel_plane *plane; int i; - if (!dev_priv->params.enable_psr2_sel_fetch) { + if (!dev_priv->params.enable_psr2_sel_fetch && + intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { drm_dbg_kms(&dev_priv->drm, "PSR2 sel fetch not enabled, disabled by parameter\n"); return false; @@ -715,9 +723,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; - if (!dev_priv->psr.sink_psr2_support) + if (!intel_dp->psr.sink_psr2_support) return false; + /* JSL and EHL only supports eDP 1.3 */ + if (IS_JSL_EHL(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n"); + return false; + } + if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not supported in transcoder %s\n", @@ -725,7 +739,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (!psr2_global_enabled(dev_priv)) { + if (!psr2_global_enabled(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n"); return false; } @@ -774,10 +788,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * only need to validate the SU block width is a multiple of * x granularity. */ - if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { + if (crtc_hdisplay % intel_dp->psr.su_x_granularity) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, hdisplay(%d) not multiple of %d\n", - crtc_hdisplay, dev_priv->psr.su_x_granularity); + crtc_hdisplay, intel_dp->psr.su_x_granularity); return false; } @@ -806,7 +820,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -819,30 +832,15 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, if (crtc_state->vrr.enable) return; - if (!CAN_PSR(dev_priv)) + if (!CAN_PSR(intel_dp)) return; - if (intel_dp != dev_priv->psr.dp) - return; - - if (!psr_global_enabled(dev_priv)) { + if (!psr_global_enabled(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); return; } - /* - * HSW spec explicitly says PSR is tied to port A. - * BDW+ platforms have a instance of PSR registers per transcoder but - * for now it only supports one instance of PSR, so lets keep it - * hardcoded to PORT_A - */ - if (dig_port->base.port != PORT_A) { - drm_dbg_kms(&dev_priv->drm, - "PSR condition failed: Port not supported\n"); - return; - } - - if (dev_priv->psr.sink_not_reliable) { + if (intel_dp->psr.sink_not_reliable) { drm_dbg_kms(&dev_priv->drm, "PSR sink implementation is not reliable\n"); return; @@ -878,23 +876,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, static void intel_psr_activate(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum transcoder transcoder = intel_dp->psr.transcoder; - if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) + if (transcoder_has_psr2(dev_priv, transcoder)) drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); + intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); - drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active); - lockdep_assert_held(&dev_priv->psr.lock); + intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE); + drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active); + lockdep_assert_held(&intel_dp->psr.lock); /* psr1 and psr2 are mutually exclusive.*/ - if (dev_priv->psr.psr2_enabled) + if (intel_dp->psr.psr2_enabled) hsw_activate_psr2(intel_dp); else hsw_activate_psr1(intel_dp); - dev_priv->psr.active = true; + intel_dp->psr.active = true; } static void intel_psr_enable_source(struct intel_dp *intel_dp, @@ -910,7 +909,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hsw_psr_setup_aux(intel_dp); - if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && + if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))) { i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); u32 chicken = intel_de_read(dev_priv, reg); @@ -934,10 +933,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (INTEL_GEN(dev_priv) < 11) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; - intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder), + intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder), mask); - psr_irq_control(dev_priv); + psr_irq_control(intel_dp); if (crtc_state->dc3co_exitline) { u32 val; @@ -955,30 +954,30 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, - dev_priv->psr.psr2_sel_fetch_enabled ? + intel_dp->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); } -static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, +static void intel_psr_enable_locked(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = dev_priv->psr.dp; struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dig_port->base; u32 val; - drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled); + drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); - dev_priv->psr.psr2_enabled = crtc_state->has_psr2; - dev_priv->psr.busy_frontbuffer_bits = 0; - dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; - dev_priv->psr.transcoder = crtc_state->cpu_transcoder; + intel_dp->psr.psr2_enabled = crtc_state->has_psr2; + intel_dp->psr.busy_frontbuffer_bits = 0; + intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; + intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; + intel_dp->psr.transcoder = crtc_state->cpu_transcoder; /* DC5/DC6 requires at least 6 idle frames */ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); - dev_priv->psr.dc3co_exit_delay = val; - dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; + intel_dp->psr.dc3co_exit_delay = val; + intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch; /* * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR @@ -990,27 +989,27 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, */ if (INTEL_GEN(dev_priv) >= 12) { val = intel_de_read(dev_priv, - TRANS_PSR_IIR(dev_priv->psr.transcoder)); + TRANS_PSR_IIR(intel_dp->psr.transcoder)); val &= EDP_PSR_ERROR(0); } else { val = intel_de_read(dev_priv, EDP_PSR_IIR); - val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); + val &= EDP_PSR_ERROR(intel_dp->psr.transcoder); } if (val) { - dev_priv->psr.sink_not_reliable = true; + intel_dp->psr.sink_not_reliable = true; drm_dbg_kms(&dev_priv->drm, "PSR interruption error set, not enabling PSR\n"); return; } drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", - dev_priv->psr.psr2_enabled ? "2" : "1"); + intel_dp->psr.psr2_enabled ? "2" : "1"); intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, - &dev_priv->psr.vsc); - intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc); + &intel_dp->psr.vsc); + intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc); intel_psr_enable_sink(intel_dp); intel_psr_enable_source(intel_dp, crtc_state); - dev_priv->psr.enabled = true; + intel_dp->psr.enabled = true; intel_psr_activate(intel_dp); } @@ -1029,7 +1028,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp) + if (!CAN_PSR(intel_dp)) return; if (!crtc_state->has_psr) @@ -1037,46 +1036,47 @@ void intel_psr_enable(struct intel_dp *intel_dp, drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp); - mutex_lock(&dev_priv->psr.lock); - intel_psr_enable_locked(dev_priv, crtc_state, conn_state); - mutex_unlock(&dev_priv->psr.lock); + mutex_lock(&intel_dp->psr.lock); + intel_psr_enable_locked(intel_dp, crtc_state, conn_state); + mutex_unlock(&intel_dp->psr.lock); } -static void intel_psr_exit(struct drm_i915_private *dev_priv) +static void intel_psr_exit(struct intel_dp *intel_dp) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val; - if (!dev_priv->psr.active) { - if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { + if (!intel_dp->psr.active) { + if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) { val = intel_de_read(dev_priv, - EDP_PSR2_CTL(dev_priv->psr.transcoder)); + EDP_PSR2_CTL(intel_dp->psr.transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); } val = intel_de_read(dev_priv, - EDP_PSR_CTL(dev_priv->psr.transcoder)); + EDP_PSR_CTL(intel_dp->psr.transcoder)); drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); return; } - if (dev_priv->psr.psr2_enabled) { - tgl_disallow_dc3co_on_psr2_exit(dev_priv); + if (intel_dp->psr.psr2_enabled) { + tgl_disallow_dc3co_on_psr2_exit(intel_dp); val = intel_de_read(dev_priv, - EDP_PSR2_CTL(dev_priv->psr.transcoder)); + EDP_PSR2_CTL(intel_dp->psr.transcoder)); drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); val &= ~EDP_PSR2_ENABLE; intel_de_write(dev_priv, - EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + EDP_PSR2_CTL(intel_dp->psr.transcoder), val); } else { val = intel_de_read(dev_priv, - EDP_PSR_CTL(dev_priv->psr.transcoder)); + EDP_PSR_CTL(intel_dp->psr.transcoder)); drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); val &= ~EDP_PSR_ENABLE; intel_de_write(dev_priv, - EDP_PSR_CTL(dev_priv->psr.transcoder), val); + EDP_PSR_CTL(intel_dp->psr.transcoder), val); } - dev_priv->psr.active = false; + intel_dp->psr.active = false; } static void intel_psr_disable_locked(struct intel_dp *intel_dp) @@ -1085,21 +1085,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) i915_reg_t psr_status; u32 psr_status_mask; - lockdep_assert_held(&dev_priv->psr.lock); + lockdep_assert_held(&intel_dp->psr.lock); - if (!dev_priv->psr.enabled) + if (!intel_dp->psr.enabled) return; drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", - dev_priv->psr.psr2_enabled ? "2" : "1"); + intel_dp->psr.psr2_enabled ? "2" : "1"); - intel_psr_exit(dev_priv); + intel_psr_exit(intel_dp); - if (dev_priv->psr.psr2_enabled) { - psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); + if (intel_dp->psr.psr2_enabled) { + psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { - psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); + psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } @@ -1109,8 +1109,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); /* WA 1408330847 */ - if (dev_priv->psr.psr2_sel_fetch_enabled && - (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + if (intel_dp->psr.psr2_sel_fetch_enabled && + (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) || IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); @@ -1118,10 +1118,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); - if (dev_priv->psr.psr2_enabled) + if (intel_dp->psr.psr2_enabled) drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); - dev_priv->psr.enabled = false; + intel_dp->psr.enabled = false; } /** @@ -1139,20 +1139,22 @@ void intel_psr_disable(struct intel_dp *intel_dp, if (!old_crtc_state->has_psr) return; - if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp))) return; - mutex_lock(&dev_priv->psr.lock); + mutex_lock(&intel_dp->psr.lock); intel_psr_disable_locked(intel_dp); - mutex_unlock(&dev_priv->psr.lock); - cancel_work_sync(&dev_priv->psr.work); - cancel_delayed_work_sync(&dev_priv->psr.dc3co_work); + mutex_unlock(&intel_dp->psr.lock); + cancel_work_sync(&intel_dp->psr.work); + cancel_delayed_work_sync(&intel_dp->psr.dc3co_work); } -static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) +static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + if (IS_TIGERLAKE(dev_priv)) /* * Writes to CURSURFLIVE in TGL are causing IOMMU errors and @@ -1166,7 +1168,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) * So using this workaround until this issue is root caused * and a better fix is found. */ - intel_psr_exit(dev_priv); + intel_psr_exit(intel_dp); else if (INTEL_GEN(dev_priv) >= 9) /* * Display WA #0884: skl+ @@ -1177,13 +1179,13 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) * but it makes more sense write to the current active * pipe. */ - intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0); + intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); else /* * A write to CURSURFLIVE do not cause HW tracking to exit PSR * on older gens so doing the manual exit instead. */ - intel_psr_exit(dev_priv); + intel_psr_exit(intel_dp); } void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, @@ -1231,15 +1233,13 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct i915_psr *psr = &dev_priv->psr; + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (!HAS_PSR2_SEL_FETCH(dev_priv) || !crtc_state->enable_psr2_sel_fetch) return; - intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder), + intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder), crtc_state->psr2_man_track_ctl); } @@ -1435,29 +1435,30 @@ void intel_psr_update(struct intel_dp *intel_dp, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct i915_psr *psr = &dev_priv->psr; + struct intel_psr *psr = &intel_dp->psr; bool enable, psr2_enable; - if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) + if (!CAN_PSR(intel_dp)) return; - mutex_lock(&dev_priv->psr.lock); + mutex_lock(&intel_dp->psr.lock); enable = crtc_state->has_psr; psr2_enable = crtc_state->has_psr2; - if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { + if (enable == psr->enabled && psr2_enable == psr->psr2_enabled && + crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) { /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ if (crtc_state->crc_enabled && psr->enabled) - psr_force_hw_tracking_exit(dev_priv); + psr_force_hw_tracking_exit(intel_dp); else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) { /* * Activate PSR again after a force exit when enabling * CRC in older gens */ - if (!dev_priv->psr.active && - !dev_priv->psr.busy_frontbuffer_bits) - schedule_work(&dev_priv->psr.work); + if (!intel_dp->psr.active && + !intel_dp->psr.busy_frontbuffer_bits) + schedule_work(&intel_dp->psr.work); } goto unlock; @@ -1467,34 +1468,23 @@ void intel_psr_update(struct intel_dp *intel_dp, intel_psr_disable_locked(intel_dp); if (enable) - intel_psr_enable_locked(dev_priv, crtc_state, conn_state); + intel_psr_enable_locked(intel_dp, crtc_state, conn_state); unlock: - mutex_unlock(&dev_priv->psr.lock); + mutex_unlock(&intel_dp->psr.lock); } /** - * intel_psr_wait_for_idle - wait for PSR1 to idle - * @new_crtc_state: new CRTC state + * psr_wait_for_idle - wait for PSR1 to idle + * @intel_dp: Intel DP * @out_value: PSR status in case of failure * - * This function is expected to be called from pipe_update_start() where it is - * not expected to race with PSR enable or disable. - * * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. + * */ -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, - u32 *out_value) +static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) - return 0; - - /* FIXME: Update this for PSR2 if we need to wait for idle */ - if (READ_ONCE(dev_priv->psr.psr2_enabled)) - return 0; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* * From bspec: Panel Self Refresh (BDW+) @@ -1502,32 +1492,68 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, * exit training time + 1.5 ms of aux channel handshake. 50 ms is * defensive enough to cover everything. */ - return __intel_wait_for_register(&dev_priv->uncore, - EDP_PSR_STATUS(dev_priv->psr.transcoder), + EDP_PSR_STATUS(intel_dp->psr.transcoder), EDP_PSR_STATUS_STATE_MASK, EDP_PSR_STATUS_STATE_IDLE, 2, 50, out_value); } -static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) +/** + * intel_psr_wait_for_idle - wait for PSR1 to idle + * @new_crtc_state: new CRTC state + * + * This function is expected to be called from pipe_update_start() where it is + * not expected to race with PSR enable or disable. + */ +void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) { + struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); + struct intel_encoder *encoder; + + if (!new_crtc_state->has_psr) + return; + + for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, + new_crtc_state->uapi.encoder_mask) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + u32 psr_status; + + mutex_lock(&intel_dp->psr.lock); + if (!intel_dp->psr.enabled || + (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) { + mutex_unlock(&intel_dp->psr.lock); + continue; + } + + /* when the PSR1 is enabled */ + if (psr_wait_for_idle(intel_dp, &psr_status)) + drm_err(&dev_priv->drm, + "PSR idle timed out 0x%x, atomic update may fail\n", + psr_status); + mutex_unlock(&intel_dp->psr.lock); + } +} + +static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t reg; u32 mask; int err; - if (!dev_priv->psr.enabled) + if (!intel_dp->psr.enabled) return false; - if (dev_priv->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); + if (intel_dp->psr.psr2_enabled) { + reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { - reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); + reg = EDP_PSR_STATUS(intel_dp->psr.transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } - mutex_unlock(&dev_priv->psr.lock); + mutex_unlock(&intel_dp->psr.lock); err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); if (err) @@ -1535,8 +1561,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) "Timed out waiting for PSR Idle for re-enable\n"); /* After the unlocked wait, verify that PSR is still wanted! */ - mutex_lock(&dev_priv->psr.lock); - return err == 0 && dev_priv->psr.enabled; + mutex_lock(&intel_dp->psr.lock); + return err == 0 && intel_dp->psr.enabled; } static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) @@ -1602,33 +1628,34 @@ retry: return err; } -int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) +int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; u32 old_mode; int ret; if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || - mode > I915_PSR_DEBUG_FORCE_PSR1) { + mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) { drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val); return -EINVAL; } - ret = mutex_lock_interruptible(&dev_priv->psr.lock); + ret = mutex_lock_interruptible(&intel_dp->psr.lock); if (ret) return ret; - old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; - dev_priv->psr.debug = val; + old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK; + intel_dp->psr.debug = val; /* * Do it right away if it's already enabled, otherwise it will be done * when enabling the source. */ - if (dev_priv->psr.enabled) - psr_irq_control(dev_priv); + if (intel_dp->psr.enabled) + psr_irq_control(intel_dp); - mutex_unlock(&dev_priv->psr.lock); + mutex_unlock(&intel_dp->psr.lock); if (old_mode != mode) ret = intel_psr_fastset_force(dev_priv); @@ -1636,28 +1663,28 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) return ret; } -static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) +static void intel_psr_handle_irq(struct intel_dp *intel_dp) { - struct i915_psr *psr = &dev_priv->psr; + struct intel_psr *psr = &intel_dp->psr; - intel_psr_disable_locked(psr->dp); + intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; /* let's make sure that sink is awaken */ - drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } static void intel_psr_work(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), psr.work); + struct intel_dp *intel_dp = + container_of(work, typeof(*intel_dp), psr.work); - mutex_lock(&dev_priv->psr.lock); + mutex_lock(&intel_dp->psr.lock); - if (!dev_priv->psr.enabled) + if (!intel_dp->psr.enabled) goto unlock; - if (READ_ONCE(dev_priv->psr.irq_aux_error)) - intel_psr_handle_irq(dev_priv); + if (READ_ONCE(intel_dp->psr.irq_aux_error)) + intel_psr_handle_irq(intel_dp); /* * We have to make sure PSR is ready for re-enable @@ -1665,7 +1692,7 @@ static void intel_psr_work(struct work_struct *work) * PSR might take some time to get fully disabled * and be ready for re-enable. */ - if (!__psr_wait_for_idle_locked(dev_priv)) + if (!__psr_wait_for_idle_locked(intel_dp)) goto unlock; /* @@ -1673,12 +1700,12 @@ static void intel_psr_work(struct work_struct *work) * recheck. Since psr_flush first clears this and then reschedules we * won't ever miss a flush when bailing out here. */ - if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) + if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active) goto unlock; - intel_psr_activate(dev_priv->psr.dp); + intel_psr_activate(intel_dp); unlock: - mutex_unlock(&dev_priv->psr.lock); + mutex_unlock(&intel_dp->psr.lock); } /** @@ -1697,27 +1724,31 @@ unlock: void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { - if (!CAN_PSR(dev_priv)) - return; + struct intel_encoder *encoder; if (origin == ORIGIN_FLIP) return; - mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + unsigned int pipe_frontbuffer_bits = frontbuffer_bits; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + mutex_lock(&intel_dp->psr.lock); + if (!intel_dp->psr.enabled) { + mutex_unlock(&intel_dp->psr.lock); + continue; + } - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); - dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; + pipe_frontbuffer_bits &= + INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); + intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits; - if (frontbuffer_bits) - intel_psr_exit(dev_priv); + if (pipe_frontbuffer_bits) + intel_psr_exit(intel_dp); - mutex_unlock(&dev_priv->psr.lock); + mutex_unlock(&intel_dp->psr.lock); + } } - /* * When we will be completely rely on PSR2 S/W tracking in future, * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP @@ -1725,15 +1756,15 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, * accordingly in future. */ static void -tgl_dc3co_flush(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits, enum fb_op_origin origin) +tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, + enum fb_op_origin origin) { - mutex_lock(&dev_priv->psr.lock); + mutex_lock(&intel_dp->psr.lock); - if (!dev_priv->psr.dc3co_enabled) + if (!intel_dp->psr.dc3co_enabled) goto unlock; - if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) + if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active) goto unlock; /* @@ -1741,15 +1772,15 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv, * when delayed work schedules that means display has been idle. */ if (!(frontbuffer_bits & - INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe))) + INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe))) goto unlock; - tgl_psr2_enable_dc3co(dev_priv); - mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work, - dev_priv->psr.dc3co_exit_delay); + tgl_psr2_enable_dc3co(intel_dp); + mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work, + intel_dp->psr.dc3co_exit_delay); unlock: - mutex_unlock(&dev_priv->psr.lock); + mutex_unlock(&intel_dp->psr.lock); } /** @@ -1768,46 +1799,69 @@ unlock: void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { - if (!CAN_PSR(dev_priv)) - return; + struct intel_encoder *encoder; - if (origin == ORIGIN_FLIP) { - tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); - return; - } + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + unsigned int pipe_frontbuffer_bits = frontbuffer_bits; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } + if (origin == ORIGIN_FLIP) { + tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin); + continue; + } - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); - dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; + mutex_lock(&intel_dp->psr.lock); + if (!intel_dp->psr.enabled) { + mutex_unlock(&intel_dp->psr.lock); + continue; + } + + pipe_frontbuffer_bits &= + INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe); + intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits; - /* By definition flush = invalidate + flush */ - if (frontbuffer_bits) - psr_force_hw_tracking_exit(dev_priv); + /* By definition flush = invalidate + flush */ + if (pipe_frontbuffer_bits) + psr_force_hw_tracking_exit(intel_dp); - if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) - schedule_work(&dev_priv->psr.work); - mutex_unlock(&dev_priv->psr.lock); + if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits) + schedule_work(&intel_dp->psr.work); + mutex_unlock(&intel_dp->psr.lock); + } } /** * intel_psr_init - Init basic PSR work and mutex. - * @dev_priv: i915 device private + * @intel_dp: Intel DP * - * This function is called only once at driver load to initialize basic - * PSR stuff. + * This function is called after the initializing connector. + * (the initializing of connector treats the handling of connector capabilities) + * And it initializes basic PSR stuff for each DP Encoder. */ -void intel_psr_init(struct drm_i915_private *dev_priv) +void intel_psr_init(struct intel_dp *intel_dp) { + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + if (!HAS_PSR(dev_priv)) return; - if (!dev_priv->psr.sink_support) + /* + * HSW spec explicitly says PSR is tied to port A. + * BDW+ platforms have a instance of PSR registers per transcoder but + * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder + * than eDP one. + * For now it only supports one instance of PSR for BDW, GEN9 and GEN11. + * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11. + * But GEN12 supports a instance of PSR registers per transcoder. + */ + if (INTEL_GEN(dev_priv) < 12 && dig_port->base.port != PORT_A) { + drm_dbg_kms(&dev_priv->drm, + "PSR condition failed: Port not supported\n"); return; + } + + intel_dp->psr.source_support = true; if (IS_HASWELL(dev_priv)) /* @@ -1824,14 +1878,14 @@ void intel_psr_init(struct drm_i915_private *dev_priv) /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) /* HSW and BDW require workarounds that we don't implement. */ - dev_priv->psr.link_standby = false; + intel_dp->psr.link_standby = false; else if (INTEL_GEN(dev_priv) < 12) /* For new platforms up to TGL let's respect VBT back again */ - dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; + intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link; - INIT_WORK(&dev_priv->psr.work, intel_psr_work); - INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work); - mutex_init(&dev_priv->psr.lock); + INIT_WORK(&intel_dp->psr.work, intel_psr_work); + INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work); + mutex_init(&intel_dp->psr.lock); } static int psr_get_status_and_error_status(struct intel_dp *intel_dp, @@ -1857,7 +1911,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct drm_dp_aux *aux = &intel_dp->aux; - struct i915_psr *psr = &dev_priv->psr; + struct intel_psr *psr = &intel_dp->psr; u8 val; int r; @@ -1884,7 +1938,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp) static void psr_capability_changed_check(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct i915_psr *psr = &dev_priv->psr; + struct intel_psr *psr = &intel_dp->psr; u8 val; int r; @@ -1908,18 +1962,18 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp) void intel_psr_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct i915_psr *psr = &dev_priv->psr; + struct intel_psr *psr = &intel_dp->psr; u8 status, error_status; const u8 errors = DP_PSR_RFB_STORAGE_ERROR | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | DP_PSR_LINK_CRC_ERROR; - if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) + if (!CAN_PSR(intel_dp)) return; mutex_lock(&psr->lock); - if (!psr->enabled || psr->dp != intel_dp) + if (!psr->enabled) goto exit; if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { @@ -1962,15 +2016,14 @@ exit: bool intel_psr_enabled(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); bool ret; - if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) + if (!CAN_PSR(intel_dp)) return false; - mutex_lock(&dev_priv->psr.lock); - ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); - mutex_unlock(&dev_priv->psr.lock); + mutex_lock(&intel_dp->psr.lock); + ret = intel_dp->psr.enabled; + mutex_unlock(&intel_dp->psr.lock); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 0a517978e8af..0491a49ffd50 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -18,7 +18,6 @@ struct intel_atomic_state; struct intel_plane_state; struct intel_plane; -#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) void intel_psr_init_dpcd(struct intel_dp *intel_dp); void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, @@ -28,20 +27,19 @@ void intel_psr_disable(struct intel_dp *intel_dp, void intel_psr_update(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value); +int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); -void intel_psr_init(struct drm_i915_private *dev_priv); +void intel_psr_init(struct intel_dp *intel_dp); void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); -void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); +void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, - u32 *out_value); +void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state); bool intel_psr_enabled(struct intel_dp *intel_dp); int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 46beb155d835..98dd787b00e3 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -160,7 +160,7 @@ static struct intel_quirk intel_quirks[] = { void intel_init_quirks(struct drm_i915_private *i915) { - struct pci_dev *d = i915->drm.pdev; + struct pci_dev *d = to_pci_dev(i915->drm.dev); int i; for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 4eaa4aa86ecd..3fac60899d8e 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -3281,7 +3281,7 @@ static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); sdvo->ddc.owner = THIS_MODULE; sdvo->ddc.class = I2C_CLASS_DDC; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 993543334a1e..4cbdb8fd4bb1 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -45,252 +45,10 @@ #include "intel_atomic_plane.h" #include "intel_display_types.h" #include "intel_frontbuffer.h" -#include "intel_pm.h" -#include "intel_psr.h" -#include "intel_dsi.h" #include "intel_sprite.h" #include "i9xx_plane.h" #include "intel_vrr.h" -int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, - int usecs) -{ - /* paranoia */ - if (!adjusted_mode->crtc_htotal) - return 1; - - return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock, - 1000 * adjusted_mode->crtc_htotal); -} - -static int intel_mode_vblank_start(const struct drm_display_mode *mode) -{ - int vblank_start = mode->crtc_vblank_start; - - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - vblank_start = DIV_ROUND_UP(vblank_start, 2); - - return vblank_start; -} - -/** - * intel_pipe_update_start() - start update of a set of display registers - * @new_crtc_state: the new crtc state - * - * Mark the start of an update to pipe registers that should be updated - * atomically regarding vblank. If the next vblank will happens within - * the next 100 us, this function waits until the vblank passes. - * - * After a successful call to this function, interrupts will be disabled - * until a subsequent call to intel_pipe_update_end(). That is done to - * avoid random delays. - */ -void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode; - long timeout = msecs_to_jiffies_timeout(1); - int scanline, min, max, vblank_start; - wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); - bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); - DEFINE_WAIT(wait); - u32 psr_status; - - if (new_crtc_state->uapi.async_flip) - return; - - if (new_crtc_state->vrr.enable) - vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); - else - vblank_start = intel_mode_vblank_start(adjusted_mode); - - /* FIXME needs to be calibrated sensibly */ - min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, - VBLANK_EVASION_TIME_US); - max = vblank_start - 1; - - if (min <= 0 || max <= 0) - goto irq_disable; - - if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) - goto irq_disable; - - /* - * Wait for psr to idle out after enabling the VBL interrupts - * VBL interrupts will start the PSR exit and prevent a PSR - * re-entry as well. - */ - if (intel_psr_wait_for_idle(new_crtc_state, &psr_status)) - drm_err(&dev_priv->drm, - "PSR idle timed out 0x%x, atomic update may fail\n", - psr_status); - - local_irq_disable(); - - crtc->debug.min_vbl = min; - crtc->debug.max_vbl = max; - trace_intel_pipe_update_start(crtc); - - for (;;) { - /* - * prepare_to_wait() has a memory barrier, which guarantees - * other CPUs can see the task state update by the time we - * read the scanline. - */ - prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); - - scanline = intel_get_crtc_scanline(crtc); - if (scanline < min || scanline > max) - break; - - if (!timeout) { - drm_err(&dev_priv->drm, - "Potential atomic update failure on pipe %c\n", - pipe_name(crtc->pipe)); - break; - } - - local_irq_enable(); - - timeout = schedule_timeout(timeout); - - local_irq_disable(); - } - - finish_wait(wq, &wait); - - drm_crtc_vblank_put(&crtc->base); - - /* - * On VLV/CHV DSI the scanline counter would appear to - * increment approx. 1/3 of a scanline before start of vblank. - * The registers still get latched at start of vblank however. - * This means we must not write any registers on the first - * line of vblank (since not the whole line is actually in - * vblank). And unfortunately we can't use the interrupt to - * wait here since it will fire too soon. We could use the - * frame start interrupt instead since it will fire after the - * critical scanline, but that would require more changes - * in the interrupt code. So for now we'll just do the nasty - * thing and poll for the bad scanline to pass us by. - * - * FIXME figure out if BXT+ DSI suffers from this as well - */ - while (need_vlv_dsi_wa && scanline == vblank_start) - scanline = intel_get_crtc_scanline(crtc); - - crtc->debug.scanline_start = scanline; - crtc->debug.start_vbl_time = ktime_get(); - crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); - - trace_intel_pipe_update_vblank_evaded(crtc); - return; - -irq_disable: - local_irq_disable(); -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) -static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) -{ - u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); - unsigned int h; - - h = ilog2(delta >> 9); - if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) - h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; - crtc->debug.vbl.times[h]++; - - crtc->debug.vbl.sum += delta; - if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) - crtc->debug.vbl.min = delta; - if (delta > crtc->debug.vbl.max) - crtc->debug.vbl.max = delta; - - if (delta > 1000 * VBLANK_EVASION_TIME_US) { - drm_dbg_kms(crtc->base.dev, - "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", - pipe_name(crtc->pipe), - div_u64(delta, 1000), - VBLANK_EVASION_TIME_US); - crtc->debug.vbl.over++; - } -} -#else -static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} -#endif - -/** - * intel_pipe_update_end() - end update of a set of display registers - * @new_crtc_state: the new crtc state - * - * Mark the end of an update started with intel_pipe_update_start(). This - * re-enables interrupts and verifies the update was actually completed - * before a vblank. - */ -void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - enum pipe pipe = crtc->pipe; - int scanline_end = intel_get_crtc_scanline(crtc); - u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); - ktime_t end_vbl_time = ktime_get(); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (new_crtc_state->uapi.async_flip) - return; - - trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); - - /* - * Incase of mipi dsi command mode, we need to set frame update - * request for every commit. - */ - if (INTEL_GEN(dev_priv) >= 11 && - intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) - icl_dsi_frame_update(new_crtc_state); - - /* We're still in the vblank-evade critical section, this can't race. - * Would be slightly nice to just grab the vblank count and arm the - * event outside of the critical section - the spinlock might spin for a - * while ... */ - if (new_crtc_state->uapi.event) { - drm_WARN_ON(&dev_priv->drm, - drm_crtc_vblank_get(&crtc->base) != 0); - - spin_lock(&crtc->base.dev->event_lock); - drm_crtc_arm_vblank_event(&crtc->base, - new_crtc_state->uapi.event); - spin_unlock(&crtc->base.dev->event_lock); - - new_crtc_state->uapi.event = NULL; - } - - local_irq_enable(); - - /* Send VRR Push to terminate Vblank */ - intel_vrr_send_push(new_crtc_state); - - if (intel_vgpu_active(dev_priv)) - return; - - if (crtc->debug.start_vbl_count && - crtc->debug.start_vbl_count != end_vbl_count) { - drm_err(&dev_priv->drm, - "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", - pipe_name(pipe), crtc->debug.start_vbl_count, - end_vbl_count, - ktime_us_delta(end_vbl_time, - crtc->debug.start_vbl_time), - crtc->debug.min_vbl, crtc->debug.max_vbl, - crtc->debug.scanline_start, scanline_end); - } - - dbg_vblank_evade(crtc, end_vbl_time); -} - int intel_plane_check_stride(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); @@ -380,584 +138,6 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) return 0; } -static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915) -{ - if (IS_ROCKETLAKE(i915)) - return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3); - else - return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5); -} - -bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - return INTEL_GEN(dev_priv) >= 11 && - icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id); -} - -bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) -{ - return INTEL_GEN(dev_priv) >= 11 && - icl_hdr_plane_mask() & BIT(plane_id); -} - -static void -skl_plane_ratio(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - unsigned int *num, unsigned int *den) -{ - struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - - if (fb->format->cpp[0] == 8) { - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - *num = 10; - *den = 8; - } else { - *num = 9; - *den = 8; - } - } else { - *num = 1; - *den = 1; - } -} - -static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); - unsigned int num, den; - unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); - - skl_plane_ratio(crtc_state, plane_state, &num, &den); - - /* two pixels per clock on glk+ */ - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - den *= 2; - - return DIV_ROUND_UP(pixel_rate * num, den); -} - -static int skl_plane_max_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - int cpp = fb->format->cpp[color_plane]; - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - /* - * Validated limit is 4k, but has 5k should - * work apart from the following features: - * - Ytile (already limited to 4k) - * - FP16 (already limited to 4k) - * - render compression (already limited to 4k) - * - KVMR sprite and cursor (don't care) - * - horizontal panning (TODO verify this) - * - pipe and plane scaling (TODO verify this) - */ - if (cpp == 8) - return 4096; - else - return 5120; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - /* FIXME AUX plane? */ - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - if (cpp == 8) - return 2048; - else - return 4096; - default: - MISSING_CASE(fb->modifier); - return 2048; - } -} - -static int glk_plane_max_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - int cpp = fb->format->cpp[color_plane]; - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - if (cpp == 8) - return 4096; - else - return 5120; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - /* FIXME AUX plane? */ - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - if (cpp == 8) - return 2048; - else - return 5120; - default: - MISSING_CASE(fb->modifier); - return 2048; - } -} - -static int icl_plane_min_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - /* Wa_14011264657, Wa_14011050563: gen11+ */ - switch (fb->format->format) { - case DRM_FORMAT_C8: - return 18; - case DRM_FORMAT_RGB565: - return 10; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - return 6; - case DRM_FORMAT_NV12: - return 20; - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - return 12; - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ARGB16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - return 4; - default: - return 1; - } -} - -static int icl_plane_max_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - return 5120; -} - -static int skl_plane_max_height(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - return 4096; -} - -static int icl_plane_max_height(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - return 4320; -} - -static unsigned int -skl_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) -{ - const struct drm_format_info *info = drm_format_info(pixel_format); - int cpp = info->cpp[0]; - - /* - * "The stride in bytes must not exceed the - * of the size of 8K pixels and 32K bytes." - */ - if (drm_rotation_90_or_270(rotation)) - return min(8192, 32768 / cpp); - else - return min(8192 * cpp, 32768); -} - -static void -skl_program_scaler(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - enum pipe pipe = plane->pipe; - int scaler_id = plane_state->scaler_id; - const struct intel_scaler *scaler = - &crtc_state->scaler_state.scalers[scaler_id]; - int crtc_x = plane_state->uapi.dst.x1; - int crtc_y = plane_state->uapi.dst.y1; - u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); - u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u16 y_hphase, uv_rgb_hphase; - u16 y_vphase, uv_rgb_vphase; - int hscale, vscale; - u32 ps_ctrl; - - hscale = drm_rect_calc_hscale(&plane_state->uapi.src, - &plane_state->uapi.dst, - 0, INT_MAX); - vscale = drm_rect_calc_vscale(&plane_state->uapi.src, - &plane_state->uapi.dst, - 0, INT_MAX); - - /* TODO: handle sub-pixel coordinates */ - if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && - !icl_is_hdr_plane(dev_priv, plane->id)) { - y_hphase = skl_scaler_calc_phase(1, hscale, false); - y_vphase = skl_scaler_calc_phase(1, vscale, false); - - /* MPEG2 chroma siting convention */ - uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true); - uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false); - } else { - /* not used */ - y_hphase = 0; - y_vphase = 0; - - uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); - uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); - } - - ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0); - ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode; - - skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0, - plane_state->hw.scaling_filter); - - intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); - intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id), - PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); - intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id), - PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); - intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id), - (crtc_x << 16) | crtc_y); - intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id), - (crtc_w << 16) | crtc_h); -} - -/* Preoffset values for YUV to RGB Conversion */ -#define PREOFF_YUV_TO_RGB_HI 0x1800 -#define PREOFF_YUV_TO_RGB_ME 0x0000 -#define PREOFF_YUV_TO_RGB_LO 0x1800 - -#define ROFF(x) (((x) & 0xffff) << 16) -#define GOFF(x) (((x) & 0xffff) << 0) -#define BOFF(x) (((x) & 0xffff) << 16) - -/* - * Programs the input color space conversion stage for ICL HDR planes. - * Note that it is assumed that this stage always happens after YUV - * range correction. Thus, the input to this stage is assumed to be - * in full-range YCbCr. - */ -static void -icl_program_input_csc(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum pipe pipe = plane->pipe; - enum plane_id plane_id = plane->id; - - static const u16 input_csc_matrix[][9] = { - /* - * BT.601 full range YCbCr -> full range RGB - * The matrix required is : - * [1.000, 0.000, 1.371, - * 1.000, -0.336, -0.698, - * 1.000, 1.732, 0.0000] - */ - [DRM_COLOR_YCBCR_BT601] = { - 0x7AF8, 0x7800, 0x0, - 0x8B28, 0x7800, 0x9AC0, - 0x0, 0x7800, 0x7DD8, - }, - /* - * BT.709 full range YCbCr -> full range RGB - * The matrix required is : - * [1.000, 0.000, 1.574, - * 1.000, -0.187, -0.468, - * 1.000, 1.855, 0.0000] - */ - [DRM_COLOR_YCBCR_BT709] = { - 0x7C98, 0x7800, 0x0, - 0x9EF8, 0x7800, 0xAC00, - 0x0, 0x7800, 0x7ED8, - }, - /* - * BT.2020 full range YCbCr -> full range RGB - * The matrix required is : - * [1.000, 0.000, 1.474, - * 1.000, -0.1645, -0.5713, - * 1.000, 1.8814, 0.0000] - */ - [DRM_COLOR_YCBCR_BT2020] = { - 0x7BC8, 0x7800, 0x0, - 0x8928, 0x7800, 0xAA88, - 0x0, 0x7800, 0x7F10, - }, - }; - const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; - - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), - ROFF(csc[0]) | GOFF(csc[1])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), - BOFF(csc[2])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), - ROFF(csc[3]) | GOFF(csc[4])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), - BOFF(csc[5])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), - ROFF(csc[6]) | GOFF(csc[7])); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), - BOFF(csc[8])); - - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), - PREOFF_YUV_TO_RGB_HI); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); - intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), - PREOFF_YUV_TO_RGB_LO); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); -} - -static void -skl_plane_async_flip(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - bool async_flip) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - unsigned long irqflags; - enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; - u32 surf_addr = plane_state->color_plane[0].offset; - u32 plane_ctl = plane_state->ctl; - - plane_ctl |= skl_plane_ctl_crtc(crtc_state); - - if (async_flip) - plane_ctl |= PLANE_CTL_ASYNC_FLIP; - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -static void -skl_program_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr = plane_state->color_plane[color_plane].offset; - u32 stride = skl_plane_stride(plane_state, color_plane); - const struct drm_framebuffer *fb = plane_state->hw.fb; - int aux_plane = intel_main_to_aux_plane(fb, color_plane); - int crtc_x = plane_state->uapi.dst.x1; - int crtc_y = plane_state->uapi.dst.y1; - u32 x = plane_state->color_plane[color_plane].x; - u32 y = plane_state->color_plane[color_plane].y; - u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u8 alpha = plane_state->hw.alpha >> 8; - u32 plane_color_ctl = 0, aux_dist = 0; - unsigned long irqflags; - u32 keymsk, keymax; - u32 plane_ctl = plane_state->ctl; - - plane_ctl |= skl_plane_ctl_crtc(crtc_state); - - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - plane_color_ctl = plane_state->color_ctl | - glk_plane_color_ctl_crtc(crtc_state); - - /* Sizes are 0 based */ - src_w--; - src_h--; - - keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - - keymsk = key->channel_mask & 0x7ffffff; - if (alpha < 0xff) - keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; - - /* The scaler will handle the output position */ - if (plane_state->scaler_id >= 0) { - crtc_x = 0; - crtc_y = 0; - } - - if (aux_plane) { - aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr; - - if (INTEL_GEN(dev_priv) < 12) - aux_dist |= skl_plane_stride(plane_state, aux_plane); - } - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); - intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - (src_h << 16) | src_w); - - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); - - if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), - plane_state->cus_ctl); - - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), - plane_color_ctl); - - if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) - icl_program_input_csc(plane, crtc_state, plane_state); - - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) - intel_uncore_write64_fw(&dev_priv->uncore, - PLANE_CC_VAL(pipe, plane_id), plane_state->ccval); - - skl_write_plane_wm(plane, crtc_state); - - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), - key->min_value); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax); - - intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - (y << 16) | x); - - if (INTEL_GEN(dev_priv) < 11) - intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), - (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x); - - if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); - - /* - * The control register self-arms if the plane was previously - * disabled. Try to make the plane enable atomic by writing - * the control register just before the surface register. - */ - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); - - if (plane_state->scaler_id >= 0) - skl_program_scaler(plane, crtc_state, plane_state); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -static void -skl_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - int color_plane = 0; - - if (plane_state->planar_linked_plane && !plane_state->planar_slave) - /* Program the UV plane on planar master */ - color_plane = 1; - - skl_program_plane(plane, crtc_state, plane_state, color_plane); -} -static void -skl_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - if (icl_is_hdr_plane(dev_priv, plane_id)) - intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0); - - skl_write_plane_wm(plane, crtc_state); - - intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -static bool -skl_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum intel_display_power_domain power_domain; - enum plane_id plane_id = plane->id; - intel_wakeref_t wakeref; - bool ret; - - power_domain = POWER_DOMAIN_PIPE(plane->pipe); - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return false; - - ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; - - *pipe = plane->pipe; - - intel_display_power_put(dev_priv, power_domain, wakeref); - - return ret; -} - -static void -skl_plane_enable_flip_done(struct intel_plane *plane) -{ - struct drm_i915_private *i915 = to_i915(plane->base.dev); - enum pipe pipe = plane->pipe; - - spin_lock_irq(&i915->irq_lock); - bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); - spin_unlock_irq(&i915->irq_lock); -} - -static void -skl_plane_disable_flip_done(struct intel_plane *plane) -{ - struct drm_i915_private *i915 = to_i915(plane->base.dev); - enum pipe pipe = plane->pipe; - - spin_lock_irq(&i915->irq_lock); - bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); - spin_unlock_irq(&i915->irq_lock); -} - static void i9xx_plane_linear_gamma(u16 gamma[8]) { /* The points are not evenly spaced. */ @@ -2123,19 +1303,18 @@ g4x_plane_get_hw_state(struct intel_plane *plane, return ret; } -static bool intel_fb_scalable(const struct drm_framebuffer *fb) +static bool g4x_fb_scalable(const struct drm_framebuffer *fb) { if (!fb) return false; switch (fb->format->format) { case DRM_FORMAT_C8: - return false; case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: - return INTEL_GEN(to_i915(fb->dev)) >= 11; + return false; default: return true; } @@ -2212,7 +1391,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, int max_scale = DRM_PLANE_HELPER_NO_SCALING; int ret; - if (intel_fb_scalable(plane_state->hw.fb)) { + if (g4x_fb_scalable(plane_state->hw.fb)) { if (INTEL_GEN(dev_priv) < 7) { min_scale = 1; max_scale = 16 << 16; @@ -2301,240 +1480,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, return 0; } -static bool intel_format_is_p01x(u32 format) -{ - switch (format) { - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - return true; - default: - return false; - } -} - -static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int rotation = plane_state->hw.rotation; - struct drm_format_name_buf format_name; - - if (!fb) - return 0; - - if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && - is_ccs_modifier(fb->modifier)) { - drm_dbg_kms(&dev_priv->drm, - "RC support only with 0/180 degree rotation (%x)\n", - rotation); - return -EINVAL; - } - - if (rotation & DRM_MODE_REFLECT_X && - fb->modifier == DRM_FORMAT_MOD_LINEAR) { - drm_dbg_kms(&dev_priv->drm, - "horizontal flip is not supported with linear surface formats\n"); - return -EINVAL; - } - - if (drm_rotation_90_or_270(rotation)) { - if (fb->modifier != I915_FORMAT_MOD_Y_TILED && - fb->modifier != I915_FORMAT_MOD_Yf_TILED) { - drm_dbg_kms(&dev_priv->drm, - "Y/Yf tiling required for 90/270!\n"); - return -EINVAL; - } - - /* - * 90/270 is not allowed with RGB64 16:16:16:16 and - * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. - */ - switch (fb->format->format) { - case DRM_FORMAT_RGB565: - if (INTEL_GEN(dev_priv) >= 11) - break; - fallthrough; - case DRM_FORMAT_C8: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ARGB16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_Y210: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - drm_dbg_kms(&dev_priv->drm, - "Unsupported pixel format %s for 90/270!\n", - drm_get_format_name(fb->format->format, - &format_name)); - return -EINVAL; - default: - break; - } - } - - /* Y-tiling is not supported in IF-ID Interlace mode */ - if (crtc_state->hw.enable && - crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && - (fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED || - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) { - drm_dbg_kms(&dev_priv->drm, - "Y/Yf tiling not supported in IF-ID mode\n"); - return -EINVAL; - } - - /* Wa_1606054188:tgl */ - if (IS_TIGERLAKE(dev_priv) && - plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE && - intel_format_is_p01x(fb->format->format)) { - drm_dbg_kms(&dev_priv->drm, - "Source color keying not supported with P01x formats\n"); - return -EINVAL; - } - - return 0; -} - -static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - int crtc_x = plane_state->uapi.dst.x1; - int crtc_w = drm_rect_width(&plane_state->uapi.dst); - int pipe_src_w = crtc_state->pipe_src_w; - - /* - * Display WA #1175: cnl,glk - * Planes other than the cursor may cause FIFO underflow and display - * corruption if starting less than 4 pixels from the right edge of - * the screen. - * Besides the above WA fix the similar problem, where planes other - * than the cursor ending less than 4 pixels from the left edge of the - * screen may cause FIFO underflow and display corruption. - */ - if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && - (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) { - drm_dbg_kms(&dev_priv->drm, - "requested plane X %s position %d invalid (valid range %d-%d)\n", - crtc_x + crtc_w < 4 ? "end" : "start", - crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x, - 4, pipe_src_w - 4); - return -ERANGE; - } - - return 0; -} - -static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int rotation = plane_state->hw.rotation; - int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - - /* Display WA #1106 */ - if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && - src_w & 3 && - (rotation == DRM_MODE_ROTATE_270 || - rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { - DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); - return -EINVAL; - } - - return 0; -} - -static int skl_plane_max_scale(struct drm_i915_private *dev_priv, - const struct drm_framebuffer *fb) -{ - /* - * We don't yet know the final source width nor - * whether we can use the HQ scaler mode. Assume - * the best case. - * FIXME need to properly check this later. - */ - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || - !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) - return 0x30000 - 1; - else - return 0x20000 - 1; -} - -static int skl_plane_check(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - int min_scale = DRM_PLANE_HELPER_NO_SCALING; - int max_scale = DRM_PLANE_HELPER_NO_SCALING; - int ret; - - ret = skl_plane_check_fb(crtc_state, plane_state); - if (ret) - return ret; - - /* use scaler when colorkey is not required */ - if (!plane_state->ckey.flags && intel_fb_scalable(fb)) { - min_scale = 1; - max_scale = skl_plane_max_scale(dev_priv, fb); - } - - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - min_scale, max_scale, true); - if (ret) - return ret; - - ret = skl_check_plane_surface(plane_state); - if (ret) - return ret; - - if (!plane_state->uapi.visible) - return 0; - - ret = skl_plane_check_dst_coordinates(crtc_state, plane_state); - if (ret) - return ret; - - ret = intel_plane_check_src_coordinates(plane_state); - if (ret) - return ret; - - ret = skl_plane_check_nv12_rotation(plane_state); - if (ret) - return ret; - - /* HW only has 8 bits pixel precision, disable plane if invisible */ - if (!(plane_state->hw.alpha >> 8)) - plane_state->uapi.visible = false; - - plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); - - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - plane_state->color_ctl = glk_plane_color_ctl(crtc_state, - plane_state); - - if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && - icl_is_hdr_plane(dev_priv, plane->id)) - /* Enable and use MPEG-2 chroma siting */ - plane_state->cus_ctl = PLANE_CUS_ENABLE | - PLANE_CUS_HPHASE_0 | - PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25; - else - plane_state->cus_ctl = 0; - - return 0; -} - static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) { return INTEL_GEN(dev_priv) >= 9; @@ -2712,186 +1657,6 @@ static const u32 chv_pipe_b_sprite_formats[] = { DRM_FORMAT_VYUY, }; -static const u32 skl_plane_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_XRGB16161616F, - DRM_FORMAT_XBGR16161616F, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_XYUV8888, -}; - -static const u32 skl_planar_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_XRGB16161616F, - DRM_FORMAT_XBGR16161616F, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_NV12, - DRM_FORMAT_XYUV8888, -}; - -static const u32 glk_planar_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_XRGB16161616F, - DRM_FORMAT_XBGR16161616F, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_NV12, - DRM_FORMAT_XYUV8888, - DRM_FORMAT_P010, - DRM_FORMAT_P012, - DRM_FORMAT_P016, -}; - -static const u32 icl_sdr_y_plane_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_ARGB2101010, - DRM_FORMAT_ABGR2101010, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_Y210, - DRM_FORMAT_Y212, - DRM_FORMAT_Y216, - DRM_FORMAT_XYUV8888, - DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, -}; - -static const u32 icl_sdr_uv_plane_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_ARGB2101010, - DRM_FORMAT_ABGR2101010, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_NV12, - DRM_FORMAT_P010, - DRM_FORMAT_P012, - DRM_FORMAT_P016, - DRM_FORMAT_Y210, - DRM_FORMAT_Y212, - DRM_FORMAT_Y216, - DRM_FORMAT_XYUV8888, - DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, -}; - -static const u32 icl_hdr_plane_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_ARGB2101010, - DRM_FORMAT_ABGR2101010, - DRM_FORMAT_XRGB16161616F, - DRM_FORMAT_XBGR16161616F, - DRM_FORMAT_ARGB16161616F, - DRM_FORMAT_ABGR16161616F, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_NV12, - DRM_FORMAT_P010, - DRM_FORMAT_P012, - DRM_FORMAT_P016, - DRM_FORMAT_Y210, - DRM_FORMAT_Y212, - DRM_FORMAT_Y216, - DRM_FORMAT_XYUV8888, - DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, -}; - -static const u64 skl_plane_format_modifiers_noccs[] = { - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 skl_plane_format_modifiers_ccs[] = { - I915_FORMAT_MOD_Yf_TILED_CCS, - I915_FORMAT_MOD_Y_TILED_CCS, - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_mc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_rc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { @@ -2984,150 +1749,6 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, } } -static bool skl_plane_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) -{ - struct intel_plane *plane = to_intel_plane(_plane); - - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - if (!plane->has_ccs) - return false; - break; - default: - return false; - } - - switch (format) { - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) - return true; - fallthrough; - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_XYUV8888: - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - case DRM_FORMAT_XVYU2101010: - if (modifier == I915_FORMAT_MOD_Yf_TILED) - return true; - fallthrough; - case DRM_FORMAT_C8: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: - case DRM_FORMAT_Y210: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - if (modifier == DRM_FORMAT_MOD_LINEAR || - modifier == I915_FORMAT_MOD_X_TILED || - modifier == I915_FORMAT_MOD_Y_TILED) - return true; - fallthrough; - default: - return false; - } -} - -static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) - return false; - - return plane_id < PLANE_SPRITE4; -} - -static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) -{ - struct drm_i915_private *dev_priv = to_i915(_plane->dev); - struct intel_plane *plane = to_intel_plane(_plane); - - switch (modifier) { - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) - return false; - fallthrough; - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - break; - default: - return false; - } - - switch (format) { - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) - return true; - fallthrough; - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_XYUV8888: - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) - return true; - fallthrough; - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_C8: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: - case DRM_FORMAT_Y210: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - if (modifier == DRM_FORMAT_MOD_LINEAR || - modifier == I915_FORMAT_MOD_X_TILED || - modifier == I915_FORMAT_MOD_Y_TILED) - return true; - fallthrough; - default: - return false; - } -} - static const struct drm_plane_funcs g4x_sprite_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -3155,257 +1776,6 @@ static const struct drm_plane_funcs vlv_sprite_funcs = { .format_mod_supported = vlv_sprite_format_mod_supported, }; -static const struct drm_plane_funcs skl_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = intel_plane_destroy, - .atomic_duplicate_state = intel_plane_duplicate_state, - .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = skl_plane_format_mod_supported, -}; - -static const struct drm_plane_funcs gen12_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = intel_plane_destroy, - .atomic_duplicate_state = intel_plane_duplicate_state, - .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = gen12_plane_format_mod_supported, -}; - -static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - if (!HAS_FBC(dev_priv)) - return false; - - return pipe == PIPE_A && plane_id == PLANE_PRIMARY; -} - -static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - /* Display WA #0870: skl, bxt */ - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return false; - - if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) - return false; - - if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) - return false; - - return true; -} - -static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id, - int *num_formats) -{ - if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { - *num_formats = ARRAY_SIZE(skl_planar_formats); - return skl_planar_formats; - } else { - *num_formats = ARRAY_SIZE(skl_plane_formats); - return skl_plane_formats; - } -} - -static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id, - int *num_formats) -{ - if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { - *num_formats = ARRAY_SIZE(glk_planar_formats); - return glk_planar_formats; - } else { - *num_formats = ARRAY_SIZE(skl_plane_formats); - return skl_plane_formats; - } -} - -static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id, - int *num_formats) -{ - if (icl_is_hdr_plane(dev_priv, plane_id)) { - *num_formats = ARRAY_SIZE(icl_hdr_plane_formats); - return icl_hdr_plane_formats; - } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) { - *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats); - return icl_sdr_y_plane_formats; - } else { - *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats); - return icl_sdr_uv_plane_formats; - } -} - -static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) - return gen12_plane_format_modifiers_mc_ccs; - else - return gen12_plane_format_modifiers_rc_ccs; -} - -static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - if (plane_id == PLANE_CURSOR) - return false; - - if (INTEL_GEN(dev_priv) >= 10) - return true; - - if (IS_GEMINILAKE(dev_priv)) - return pipe != PIPE_C; - - return pipe != PIPE_C && - (plane_id == PLANE_PRIMARY || - plane_id == PLANE_SPRITE0); -} - -struct intel_plane * -skl_universal_plane_create(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - const struct drm_plane_funcs *plane_funcs; - struct intel_plane *plane; - enum drm_plane_type plane_type; - unsigned int supported_rotations; - unsigned int supported_csc; - const u64 *modifiers; - const u32 *formats; - int num_formats; - int ret; - - plane = intel_plane_alloc(); - if (IS_ERR(plane)) - return plane; - - plane->pipe = pipe; - plane->id = plane_id; - plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); - - plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } - - if (INTEL_GEN(dev_priv) >= 11) { - plane->min_width = icl_plane_min_width; - plane->max_width = icl_plane_max_width; - plane->max_height = icl_plane_max_height; - } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - plane->max_width = glk_plane_max_width; - plane->max_height = skl_plane_max_height; - } else { - plane->max_width = skl_plane_max_width; - plane->max_height = skl_plane_max_height; - } - - plane->max_stride = skl_plane_max_stride; - plane->update_plane = skl_update_plane; - plane->disable_plane = skl_disable_plane; - plane->get_hw_state = skl_plane_get_hw_state; - plane->check_plane = skl_plane_check; - plane->min_cdclk = skl_plane_min_cdclk; - - if (plane_id == PLANE_PRIMARY) { - plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10); - plane->async_flip = skl_plane_async_flip; - plane->enable_flip_done = skl_plane_enable_flip_done; - plane->disable_flip_done = skl_plane_disable_flip_done; - } - - if (INTEL_GEN(dev_priv) >= 11) - formats = icl_get_plane_formats(dev_priv, pipe, - plane_id, &num_formats); - else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - formats = glk_get_plane_formats(dev_priv, pipe, - plane_id, &num_formats); - else - formats = skl_get_plane_formats(dev_priv, pipe, - plane_id, &num_formats); - - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); - if (INTEL_GEN(dev_priv) >= 12) { - modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); - plane_funcs = &gen12_plane_funcs; - } else { - if (plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; - plane_funcs = &skl_plane_funcs; - } - - if (plane_id == PLANE_PRIMARY) - plane_type = DRM_PLANE_TYPE_PRIMARY; - else - plane_type = DRM_PLANE_TYPE_OVERLAY; - - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - 0, plane_funcs, - formats, num_formats, modifiers, - plane_type, - "plane %d%c", plane_id + 1, - pipe_name(pipe)); - if (ret) - goto fail; - - supported_rotations = - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | - DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; - - if (INTEL_GEN(dev_priv) >= 10) - supported_rotations |= DRM_MODE_REFLECT_X; - - drm_plane_create_rotation_property(&plane->base, - DRM_MODE_ROTATE_0, - supported_rotations); - - supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709); - - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020); - - drm_plane_create_color_properties(&plane->base, - supported_csc, - BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | - BIT(DRM_COLOR_YCBCR_FULL_RANGE), - DRM_COLOR_YCBCR_BT709, - DRM_COLOR_YCBCR_LIMITED_RANGE); - - drm_plane_create_alpha_property(&plane->base); - drm_plane_create_blend_mode_property(&plane->base, - BIT(DRM_MODE_BLEND_PIXEL_NONE) | - BIT(DRM_MODE_BLEND_PREMULTI) | - BIT(DRM_MODE_BLEND_COVERAGE)); - - drm_plane_create_zpos_immutable_property(&plane->base, plane_id); - - if (INTEL_GEN(dev_priv) >= 12) - drm_plane_enable_fb_damage_clips(&plane->base); - - if (INTEL_GEN(dev_priv) >= 10) - drm_plane_create_scaling_filter_property(&plane->base, - BIT(DRM_SCALING_FILTER_DEFAULT) | - BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); - - drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); - - return plane; - -fail: - intel_plane_free(plane); - - return ERR_PTR(ret); -} - struct intel_plane * intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int sprite) @@ -3418,10 +1788,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, int num_formats; int ret, zpos; - if (INTEL_GEN(dev_priv) >= 9) - return skl_universal_plane_create(dev_priv, pipe, - PLANE_SPRITE0 + sprite); - plane = intel_plane_alloc(); if (IS_ERR(plane)) return plane; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 76126dd8d584..f6989da2dc4b 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -38,9 +38,6 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); int intel_plane_check_stride(const struct intel_plane_state *plane_state); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); int chv_plane_check_rotation(const struct intel_plane_state *plane_state); -struct intel_plane * -skl_universal_plane_create(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id); static inline u8 icl_hdr_plane_mask(void) { @@ -48,10 +45,6 @@ static inline u8 icl_hdr_plane_mask(void) BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1); } -bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv, - enum plane_id plane_id); -bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id); - int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 27dc2dad6809..2cefc13535a0 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) return names[mode]; } -static void -tc_port_load_fia_params(struct drm_i915_private *i915, - struct intel_digital_port *dig_port) -{ - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(i915, port); - u32 modular_fia; - - if (INTEL_INFO(i915)->display.has_modular_fia) { - modular_fia = intel_uncore_read(&i915->uncore, - PORT_TX_DFLEXDPSP(FIA1)); - drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff); - modular_fia &= MODULAR_FIA_MASK; - } else { - modular_fia = 0; - } - - /* - * Each Modular FIA instance houses 2 TC ports. In SOC that has more - * than two TC ports, there are multiple instances of Modular FIA. - */ - if (modular_fia) { - dig_port->tc_phy_fia = tc_port / 2; - dig_port->tc_phy_fia_idx = tc_port % 2; - } else { - dig_port->tc_phy_fia = FIA1; - dig_port->tc_phy_fia_idx = tc_port; - } -} - static enum intel_display_power_domain tc_cold_get_power_domain(struct intel_digital_port *dig_port) { @@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port) mutex_unlock(&dig_port->tc_lock); } +static bool +tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port) +{ + intel_wakeref_t wakeref; + u32 val; + + if (!INTEL_INFO(i915)->display.has_modular_fia) + return false; + + wakeref = tc_cold_block(dig_port); + val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1)); + tc_cold_unblock(dig_port, wakeref); + + drm_WARN_ON(&i915->drm, val == 0xffffffff); + + return val & MODULAR_FIA_MASK; +} + +static void +tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port) +{ + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(i915, port); + + /* + * Each Modular FIA instance houses 2 TC ports. In SOC that has more + * than two TC ports, there are multiple instances of Modular FIA. + */ + if (tc_has_modular_fia(i915, dig_port)) { + dig_port->tc_phy_fia = tc_port / 2; + dig_port->tc_phy_fia_idx = tc_port % 2; + } else { + dig_port->tc_phy_fia = FIA1; + dig_port->tc_phy_fia_idx = tc_port; + } +} + void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 187ec573de59..dbe24d7e7375 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -327,6 +327,10 @@ enum vbt_gmbus_ddi { ICL_DDC_BUS_PORT_4, TGL_DDC_BUS_PORT_5, TGL_DDC_BUS_PORT_6, + ADLS_DDC_BUS_PORT_TC1 = 0x2, + ADLS_DDC_BUS_PORT_TC2, + ADLS_DDC_BUS_PORT_TC3, + ADLS_DDC_BUS_PORT_TC4 }; #define DP_AUX_A 0x40 @@ -339,10 +343,21 @@ enum vbt_gmbus_ddi { #define DP_AUX_H 0x80 #define DP_AUX_I 0x90 -#define VBT_DP_MAX_LINK_RATE_HBR3 0 -#define VBT_DP_MAX_LINK_RATE_HBR2 1 -#define VBT_DP_MAX_LINK_RATE_HBR 2 -#define VBT_DP_MAX_LINK_RATE_LBR 3 +/* DP max link rate 216+ */ +#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR3 0 +#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR2 1 +#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR 2 +#define BDB_216_VBT_DP_MAX_LINK_RATE_LBR 3 + +/* DP max link rate 230+ */ +#define BDB_230_VBT_DP_MAX_LINK_RATE_DEF 0 +#define BDB_230_VBT_DP_MAX_LINK_RATE_LBR 1 +#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR 2 +#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR2 3 +#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR3 4 +#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10 5 +#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6 +#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7 /* * The child device config, aka the display device data structure, provides a @@ -441,8 +456,8 @@ struct child_device_config { u16 dp_gpio_pin_num; /* 195 */ u8 dp_iboost_level:4; /* 196 */ u8 hdmi_iboost_level:4; /* 196 */ - u8 dp_max_link_rate:2; /* 216 CNL+ */ - u8 dp_max_link_rate_reserved:6; /* 216 */ + u8 dp_max_link_rate:3; /* 216/230 CNL+ */ + u8 dp_max_link_rate_reserved:5; /* 216/230 */ } __packed; struct bdb_general_definitions { diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index be333699c515..5f8e4f53649d 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -25,7 +25,7 @@ static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915) /* Disable the VGA plane that we never use */ void intel_vga_disable(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); u8 sr1; @@ -76,7 +76,7 @@ void intel_vga_redisable(struct drm_i915_private *i915) void intel_vga_reset_io_mem(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -136,7 +136,7 @@ intel_vga_set_decode(void *cookie, bool enable_decode) int intel_vga_register(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int ret; /* @@ -156,7 +156,7 @@ int intel_vga_register(struct drm_i915_private *i915) void intel_vga_unregister(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); vga_client_register(pdev, NULL, NULL, NULL); } diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c new file mode 100644 index 000000000000..b37a87bb190f --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -0,0 +1,556 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ +#include "intel_display_types.h" +#include "skl_scaler.h" +#include "skl_universal_plane.h" + +/* + * The hardware phase 0.0 refers to the center of the pixel. + * We want to start from the top/left edge which is phase + * -0.5. That matches how the hardware calculates the scaling + * factors (from top-left of the first pixel to bottom-right + * of the last pixel, as opposed to the pixel centers). + * + * For 4:2:0 subsampled chroma planes we obviously have to + * adjust that so that the chroma sample position lands in + * the right spot. + * + * Note that for packed YCbCr 4:2:2 formats there is no way to + * control chroma siting. The hardware simply replicates the + * chroma samples for both of the luma samples, and thus we don't + * actually get the expected MPEG2 chroma siting convention :( + * The same behaviour is observed on pre-SKL platforms as well. + * + * Theory behind the formula (note that we ignore sub-pixel + * source coordinates): + * s = source sample position + * d = destination sample position + * + * Downscaling 4:1: + * -0.5 + * | 0.0 + * | | 1.5 (initial phase) + * | | | + * v v v + * | s | s | s | s | + * | d | + * + * Upscaling 1:4: + * -0.5 + * | -0.375 (initial phase) + * | | 0.0 + * | | | + * v v v + * | s | + * | d | d | d | d | + */ +static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) +{ + int phase = -0x8000; + u16 trip = 0; + + if (chroma_cosited) + phase += (sub - 1) * 0x8000 / sub; + + phase += scale / (2 * sub); + + /* + * Hardware initial phase limited to [-0.5:1.5]. + * Since the max hardware scale factor is 3.0, we + * should never actually excdeed 1.0 here. + */ + WARN_ON(phase < -0x8000 || phase > 0x18000); + + if (phase < 0) + phase = 0x10000 + phase; + else + trip = PS_PHASE_TRIP; + + return ((phase >> 2) & PS_PHASE_MASK) | trip; +} + +#define SKL_MIN_SRC_W 8 +#define SKL_MAX_SRC_W 4096 +#define SKL_MIN_SRC_H 8 +#define SKL_MAX_SRC_H 4096 +#define SKL_MIN_DST_W 8 +#define SKL_MAX_DST_W 4096 +#define SKL_MIN_DST_H 8 +#define SKL_MAX_DST_H 4096 +#define ICL_MAX_SRC_W 5120 +#define ICL_MAX_SRC_H 4096 +#define ICL_MAX_DST_W 5120 +#define ICL_MAX_DST_H 4096 +#define SKL_MIN_YUV_420_SRC_W 16 +#define SKL_MIN_YUV_420_SRC_H 16 + +static int +skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, + unsigned int scaler_user, int *scaler_id, + int src_w, int src_h, int dst_w, int dst_h, + const struct drm_format_info *format, + u64 modifier, bool need_scaler) +{ + struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + struct intel_crtc *intel_crtc = + to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + if (src_w != dst_w || src_h != dst_h) + need_scaler = true; + + /* + * Scaling/fitting not supported in IF-ID mode in GEN9+ + * TODO: Interlace fetch mode doesn't support YUV420 planar formats. + * Once NV12 is enabled, handle it here while allocating scaler + * for NV12. + */ + if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && + need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + drm_dbg_kms(&dev_priv->drm, + "Pipe/Plane scaling not supported with IF-ID mode\n"); + return -EINVAL; + } + + /* + * if plane is being disabled or scaler is no more required or force detach + * - free scaler binded to this plane/crtc + * - in order to do this, update crtc->scaler_usage + * + * Here scaler state in crtc_state is set free so that + * scaler can be assigned to other user. Actual register + * update to free the scaler is done in plane/panel-fit programming. + * For this purpose crtc/plane_state->scaler_id isn't reset here. + */ + if (force_detach || !need_scaler) { + if (*scaler_id >= 0) { + scaler_state->scaler_users &= ~(1 << scaler_user); + scaler_state->scalers[*scaler_id].in_use = 0; + + drm_dbg_kms(&dev_priv->drm, + "scaler_user index %u.%u: " + "Staged freeing scaler id %d scaler_users = 0x%x\n", + intel_crtc->pipe, scaler_user, *scaler_id, + scaler_state->scaler_users); + *scaler_id = -1; + } + return 0; + } + + if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && + (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { + drm_dbg_kms(&dev_priv->drm, + "Planar YUV: src dimensions not met\n"); + return -EINVAL; + } + + /* range checks */ + if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || + dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || + (INTEL_GEN(dev_priv) >= 11 && + (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || + dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || + (INTEL_GEN(dev_priv) < 11 && + (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || + dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { + drm_dbg_kms(&dev_priv->drm, + "scaler_user index %u.%u: src %ux%u dst %ux%u " + "size is out of scaler range\n", + intel_crtc->pipe, scaler_user, src_w, src_h, + dst_w, dst_h); + return -EINVAL; + } + + /* mark this plane as a scaler user in crtc_state */ + scaler_state->scaler_users |= (1 << scaler_user); + drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " + "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", + intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, + scaler_state->scaler_users); + + return 0; +} + +int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + int width, height; + + if (crtc_state->pch_pfit.enabled) { + width = drm_rect_width(&crtc_state->pch_pfit.dst); + height = drm_rect_height(&crtc_state->pch_pfit.dst); + } else { + width = pipe_mode->crtc_hdisplay; + height = pipe_mode->crtc_vdisplay; + } + return skl_update_scaler(crtc_state, !crtc_state->hw.active, + SKL_CRTC_INDEX, + &crtc_state->scaler_state.scaler_id, + crtc_state->pipe_src_w, crtc_state->pipe_src_h, + width, height, NULL, 0, + crtc_state->pch_pfit.enabled); +} + +/** + * skl_update_scaler_plane - Stages update to scaler state for a given plane. + * @crtc_state: crtc's scaler state + * @plane_state: atomic plane state to update + * + * Return + * 0 - scaler_usage updated successfully + * error - requested scaling cannot be supported or other error condition + */ +int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) +{ + struct intel_plane *intel_plane = + to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); + struct drm_framebuffer *fb = plane_state->hw.fb; + int ret; + bool force_detach = !fb || !plane_state->uapi.visible; + bool need_scaler = false; + + /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ + if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && + fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + need_scaler = true; + + ret = skl_update_scaler(crtc_state, force_detach, + drm_plane_index(&intel_plane->base), + &plane_state->scaler_id, + drm_rect_width(&plane_state->uapi.src) >> 16, + drm_rect_height(&plane_state->uapi.src) >> 16, + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst), + fb ? fb->format : NULL, + fb ? fb->modifier : 0, + need_scaler); + + if (ret || plane_state->scaler_id < 0) + return ret; + + /* check colorkey */ + if (plane_state->ckey.flags) { + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] scaling with color key not allowed", + intel_plane->base.base.id, + intel_plane->base.name); + return -EINVAL; + } + + /* Check src format */ + switch (fb->format->format) { + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + break; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + if (INTEL_GEN(dev_priv) >= 11) + break; + fallthrough; + default: + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", + intel_plane->base.base.id, intel_plane->base.name, + fb->base.id, fb->format->format); + return -EINVAL; + } + + return 0; +} + +static int cnl_coef_tap(int i) +{ + return i % 7; +} + +static u16 cnl_nearest_filter_coef(int t) +{ + return t == 3 ? 0x0800 : 0x3000; +} + +/* + * Theory behind setting nearest-neighbor integer scaling: + * + * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. + * The letter represents the filter tap (D is the center tap) and the number + * represents the coefficient set for a phase (0-16). + * + * +------------+------------------------+------------------------+ + * |Index value | Data value coeffient 1 | Data value coeffient 2 | + * +------------+------------------------+------------------------+ + * | 00h | B0 | A0 | + * +------------+------------------------+------------------------+ + * | 01h | D0 | C0 | + * +------------+------------------------+------------------------+ + * | 02h | F0 | E0 | + * +------------+------------------------+------------------------+ + * | 03h | A1 | G0 | + * +------------+------------------------+------------------------+ + * | 04h | C1 | B1 | + * +------------+------------------------+------------------------+ + * | ... | ... | ... | + * +------------+------------------------+------------------------+ + * | 38h | B16 | A16 | + * +------------+------------------------+------------------------+ + * | 39h | D16 | C16 | + * +------------+------------------------+------------------------+ + * | 3Ah | F16 | C16 | + * +------------+------------------------+------------------------+ + * | 3Bh | Reserved | G16 | + * +------------+------------------------+------------------------+ + * + * To enable nearest-neighbor scaling: program scaler coefficents with + * the center tap (Dxx) values set to 1 and all other values set to 0 as per + * SCALER_COEFFICIENT_FORMAT + * + */ + +static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv, + enum pipe pipe, int id, int set) +{ + int i; + + intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), + PS_COEE_INDEX_AUTO_INC); + + for (i = 0; i < 17 * 7; i += 2) { + u32 tmp; + int t; + + t = cnl_coef_tap(i); + tmp = cnl_nearest_filter_coef(t); + + t = cnl_coef_tap(i + 1); + tmp |= cnl_nearest_filter_coef(t) << 16; + + intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set), + tmp); + } + + intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0); +} + +static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set) +{ + if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) { + return (PS_FILTER_PROGRAMMED | + PS_Y_VERT_FILTER_SELECT(set) | + PS_Y_HORZ_FILTER_SELECT(set) | + PS_UV_VERT_FILTER_SELECT(set) | + PS_UV_HORZ_FILTER_SELECT(set)); + } + + return PS_FILTER_MEDIUM; +} + +static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe, + int id, int set, enum drm_scaling_filter filter) +{ + switch (filter) { + case DRM_SCALING_FILTER_DEFAULT: + break; + case DRM_SCALING_FILTER_NEAREST_NEIGHBOR: + cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set); + break; + default: + MISSING_CASE(filter); + } +} + +void skl_pfit_enable(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + struct drm_rect src = { + .x2 = crtc_state->pipe_src_w << 16, + .y2 = crtc_state->pipe_src_h << 16, + }; + const struct drm_rect *dst = &crtc_state->pch_pfit.dst; + u16 uv_rgb_hphase, uv_rgb_vphase; + enum pipe pipe = crtc->pipe; + int width = drm_rect_width(dst); + int height = drm_rect_height(dst); + int x = dst->x1; + int y = dst->y1; + int hscale, vscale; + unsigned long irqflags; + int id; + u32 ps_ctrl; + + if (!crtc_state->pch_pfit.enabled) + return; + + if (drm_WARN_ON(&dev_priv->drm, + crtc_state->scaler_state.scaler_id < 0)) + return; + + hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); + vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); + + uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); + uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); + + id = scaler_state->scaler_id; + + ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); + ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + skl_scaler_setup_filter(dev_priv, pipe, id, 0, + crtc_state->hw.scaling_filter); + + intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl); + + intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), + x << 16 | y); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), + width << 16 | height); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +void +skl_program_plane_scaler(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + enum pipe pipe = plane->pipe; + int scaler_id = plane_state->scaler_id; + const struct intel_scaler *scaler = + &crtc_state->scaler_state.scalers[scaler_id]; + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); + u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); + u16 y_hphase, uv_rgb_hphase; + u16 y_vphase, uv_rgb_vphase; + int hscale, vscale; + u32 ps_ctrl; + + hscale = drm_rect_calc_hscale(&plane_state->uapi.src, + &plane_state->uapi.dst, + 0, INT_MAX); + vscale = drm_rect_calc_vscale(&plane_state->uapi.src, + &plane_state->uapi.dst, + 0, INT_MAX); + + /* TODO: handle sub-pixel coordinates */ + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && + !icl_is_hdr_plane(dev_priv, plane->id)) { + y_hphase = skl_scaler_calc_phase(1, hscale, false); + y_vphase = skl_scaler_calc_phase(1, vscale, false); + + /* MPEG2 chroma siting convention */ + uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true); + uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false); + } else { + /* not used */ + y_hphase = 0; + y_vphase = 0; + + uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); + uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); + } + + ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0); + ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode; + + skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0, + plane_state->hw.scaling_filter); + + intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); + intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id), + PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id), + PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id), + (crtc_x << 16) | crtc_y); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id), + (crtc_w << 16) | crtc_h); +} + +static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +/* + * This function detaches (aka. unbinds) unused scalers in hardware + */ +void skl_detach_scalers(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + int i; + + /* loop through and disable scalers that aren't in use */ + for (i = 0; i < intel_crtc->num_scalers; i++) { + if (!scaler_state->scalers[i].in_use) + skl_detach_scaler(intel_crtc, i); + } +} + +void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); + int i; + + for (i = 0; i < crtc->num_scalers; i++) + skl_detach_scaler(crtc, i); +} diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h new file mode 100644 index 000000000000..0097d5d08e10 --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_scaler.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ +#ifndef INTEL_SCALER_H +#define INTEL_SCALER_H + +#include <linux/types.h> + +enum drm_scaling_filter; +struct drm_i915_private; +struct intel_crtc_state; +struct intel_plane_state; +struct intel_plane; +enum pipe; + +int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); + +int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); + +void skl_pfit_enable(const struct intel_crtc_state *crtc_state); + +void skl_program_plane_scaler(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void skl_detach_scalers(const struct intel_crtc_state *crtc_state); +void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); +#endif diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c new file mode 100644 index 000000000000..1f335cb09149 --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -0,0 +1,2266 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_plane_helper.h> + +#include "i915_drv.h" +#include "intel_atomic_plane.h" +#include "intel_display_types.h" +#include "intel_pm.h" +#include "intel_psr.h" +#include "intel_sprite.h" +#include "skl_scaler.h" +#include "skl_universal_plane.h" + +static const u32 skl_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_XYUV8888, +}; + +static const u32 skl_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_XYUV8888, +}; + +static const u32 glk_planar_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_XYUV8888, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, +}; + +static const u32 icl_sdr_y_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XYUV8888, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const u32 icl_sdr_uv_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XYUV8888, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const u32 icl_hdr_plane_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, + DRM_FORMAT_ARGB16161616F, + DRM_FORMAT_ABGR16161616F, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XYUV8888, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, +}; + +static const u64 skl_plane_format_modifiers_noccs[] = { + I915_FORMAT_MOD_Yf_TILED, + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const u64 skl_plane_format_modifiers_ccs[] = { + I915_FORMAT_MOD_Yf_TILED_CCS, + I915_FORMAT_MOD_Y_TILED_CCS, + I915_FORMAT_MOD_Yf_TILED, + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const u64 gen12_plane_format_modifiers_mc_ccs[] = { + I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const u64 gen12_plane_format_modifiers_rc_ccs[] = { + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) +{ + switch (format) { + case PLANE_CTL_FORMAT_RGB_565: + return DRM_FORMAT_RGB565; + case PLANE_CTL_FORMAT_NV12: + return DRM_FORMAT_NV12; + case PLANE_CTL_FORMAT_XYUV: + return DRM_FORMAT_XYUV8888; + case PLANE_CTL_FORMAT_P010: + return DRM_FORMAT_P010; + case PLANE_CTL_FORMAT_P012: + return DRM_FORMAT_P012; + case PLANE_CTL_FORMAT_P016: + return DRM_FORMAT_P016; + case PLANE_CTL_FORMAT_Y210: + return DRM_FORMAT_Y210; + case PLANE_CTL_FORMAT_Y212: + return DRM_FORMAT_Y212; + case PLANE_CTL_FORMAT_Y216: + return DRM_FORMAT_Y216; + case PLANE_CTL_FORMAT_Y410: + return DRM_FORMAT_XVYU2101010; + case PLANE_CTL_FORMAT_Y412: + return DRM_FORMAT_XVYU12_16161616; + case PLANE_CTL_FORMAT_Y416: + return DRM_FORMAT_XVYU16161616; + default: + case PLANE_CTL_FORMAT_XRGB_8888: + if (rgb_order) { + if (alpha) + return DRM_FORMAT_ABGR8888; + else + return DRM_FORMAT_XBGR8888; + } else { + if (alpha) + return DRM_FORMAT_ARGB8888; + else + return DRM_FORMAT_XRGB8888; + } + case PLANE_CTL_FORMAT_XRGB_2101010: + if (rgb_order) { + if (alpha) + return DRM_FORMAT_ABGR2101010; + else + return DRM_FORMAT_XBGR2101010; + } else { + if (alpha) + return DRM_FORMAT_ARGB2101010; + else + return DRM_FORMAT_XRGB2101010; + } + case PLANE_CTL_FORMAT_XRGB_16161616F: + if (rgb_order) { + if (alpha) + return DRM_FORMAT_ABGR16161616F; + else + return DRM_FORMAT_XBGR16161616F; + } else { + if (alpha) + return DRM_FORMAT_ARGB16161616F; + else + return DRM_FORMAT_XRGB16161616F; + } + } +} + +static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915) +{ + if (HAS_D12_PLANE_MINIMIZATION(i915)) + return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3); + else + return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5); +} + +bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv, + enum plane_id plane_id) +{ + return INTEL_GEN(dev_priv) >= 11 && + icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id); +} + +bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) +{ + return INTEL_GEN(dev_priv) >= 11 && + icl_hdr_plane_mask() & BIT(plane_id); +} + +static void +skl_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + if (fb->format->cpp[0] == 8) { + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + *num = 10; + *den = 8; + } else { + *num = 9; + *den = 8; + } + } else { + *num = 1; + *den = 1; + } +} + +static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); + unsigned int num, den; + unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); + + skl_plane_ratio(crtc_state, plane_state, &num, &den); + + /* two pixels per clock on glk+ */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + den *= 2; + + return DIV_ROUND_UP(pixel_rate * num, den); +} + +static int skl_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + int cpp = fb->format->cpp[color_plane]; + + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + /* + * Validated limit is 4k, but has 5k should + * work apart from the following features: + * - Ytile (already limited to 4k) + * - FP16 (already limited to 4k) + * - render compression (already limited to 4k) + * - KVMR sprite and cursor (don't care) + * - horizontal panning (TODO verify this) + * - pipe and plane scaling (TODO verify this) + */ + if (cpp == 8) + return 4096; + else + return 5120; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + /* FIXME AUX plane? */ + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + if (cpp == 8) + return 2048; + else + return 4096; + default: + MISSING_CASE(fb->modifier); + return 2048; + } +} + +static int glk_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + int cpp = fb->format->cpp[color_plane]; + + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + if (cpp == 8) + return 4096; + else + return 5120; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + /* FIXME AUX plane? */ + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + if (cpp == 8) + return 2048; + else + return 5120; + default: + MISSING_CASE(fb->modifier); + return 2048; + } +} + +static int icl_plane_min_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + /* Wa_14011264657, Wa_14011050563: gen11+ */ + switch (fb->format->format) { + case DRM_FORMAT_C8: + return 18; + case DRM_FORMAT_RGB565: + return 10; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + return 6; + case DRM_FORMAT_NV12: + return 20; + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return 12; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + return 4; + default: + return 1; + } +} + +static int icl_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 5120; +} + +static int skl_plane_max_height(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 4096; +} + +static int icl_plane_max_height(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 4320; +} + +static unsigned int +skl_plane_max_stride(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation) +{ + const struct drm_format_info *info = drm_format_info(pixel_format); + int cpp = info->cpp[0]; + + /* + * "The stride in bytes must not exceed the + * of the size of 8K pixels and 32K bytes." + */ + if (drm_rotation_90_or_270(rotation)) + return min(8192, 32768 / cpp); + else + return min(8192 * cpp, 32768); +} + + +/* Preoffset values for YUV to RGB Conversion */ +#define PREOFF_YUV_TO_RGB_HI 0x1800 +#define PREOFF_YUV_TO_RGB_ME 0x0000 +#define PREOFF_YUV_TO_RGB_LO 0x1800 + +#define ROFF(x) (((x) & 0xffff) << 16) +#define GOFF(x) (((x) & 0xffff) << 0) +#define BOFF(x) (((x) & 0xffff) << 16) + +/* + * Programs the input color space conversion stage for ICL HDR planes. + * Note that it is assumed that this stage always happens after YUV + * range correction. Thus, the input to this stage is assumed to be + * in full-range YCbCr. + */ +static void +icl_program_input_csc(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + + static const u16 input_csc_matrix[][9] = { + /* + * BT.601 full range YCbCr -> full range RGB + * The matrix required is : + * [1.000, 0.000, 1.371, + * 1.000, -0.336, -0.698, + * 1.000, 1.732, 0.0000] + */ + [DRM_COLOR_YCBCR_BT601] = { + 0x7AF8, 0x7800, 0x0, + 0x8B28, 0x7800, 0x9AC0, + 0x0, 0x7800, 0x7DD8, + }, + /* + * BT.709 full range YCbCr -> full range RGB + * The matrix required is : + * [1.000, 0.000, 1.574, + * 1.000, -0.187, -0.468, + * 1.000, 1.855, 0.0000] + */ + [DRM_COLOR_YCBCR_BT709] = { + 0x7C98, 0x7800, 0x0, + 0x9EF8, 0x7800, 0xAC00, + 0x0, 0x7800, 0x7ED8, + }, + /* + * BT.2020 full range YCbCr -> full range RGB + * The matrix required is : + * [1.000, 0.000, 1.474, + * 1.000, -0.1645, -0.5713, + * 1.000, 1.8814, 0.0000] + */ + [DRM_COLOR_YCBCR_BT2020] = { + 0x7BC8, 0x7800, 0x0, + 0x8928, 0x7800, 0xAA88, + 0x0, 0x7800, 0x7F10, + }, + }; + const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; + + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), + ROFF(csc[0]) | GOFF(csc[1])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), + BOFF(csc[2])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), + ROFF(csc[3]) | GOFF(csc[4])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), + BOFF(csc[5])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), + ROFF(csc[6]) | GOFF(csc[7])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), + BOFF(csc[8])); + + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), + PREOFF_YUV_TO_RGB_HI); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), + PREOFF_YUV_TO_RGB_LO); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); +} + +static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) +{ + return fb->modifier == DRM_FORMAT_MOD_LINEAR || + is_gen12_ccs_plane(fb, color_plane); +} + +static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, + int color_plane, unsigned int rotation) +{ + /* + * The stride is either expressed as a multiple of 64 bytes chunks for + * linear buffers or in number of tiles for tiled buffers. + */ + if (is_surface_linear(fb, color_plane)) + return 64; + else if (drm_rotation_90_or_270(rotation)) + return intel_tile_height(fb, color_plane); + else + return intel_tile_width_bytes(fb, color_plane); +} + +static u32 skl_plane_stride(const struct intel_plane_state *plane_state, + int color_plane) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + u32 stride = plane_state->color_plane[color_plane].stride; + + if (color_plane >= fb->format->num_planes) + return 0; + + return stride / skl_plane_stride_mult(fb, color_plane, rotation); +} + +static void +skl_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + if (icl_is_hdr_plane(dev_priv, plane_id)) + intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0); + + skl_write_plane_wm(plane, crtc_state); + + intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static bool +skl_plane_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum plane_id plane_id = plane->id; + intel_wakeref_t wakeref; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(plane->pipe); + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) + return false; + + ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; + + *pipe = plane->pipe; + + intel_display_power_put(dev_priv, power_domain, wakeref); + + return ret; +} + +static u32 skl_plane_ctl_format(u32 pixel_format) +{ + switch (pixel_format) { + case DRM_FORMAT_C8: + return PLANE_CTL_FORMAT_INDEXED; + case DRM_FORMAT_RGB565: + return PLANE_CTL_FORMAT_RGB_565; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return PLANE_CTL_FORMAT_XRGB_8888; + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + return PLANE_CTL_FORMAT_XRGB_2101010; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + return PLANE_CTL_FORMAT_XRGB_16161616F; + case DRM_FORMAT_XYUV8888: + return PLANE_CTL_FORMAT_XYUV; + case DRM_FORMAT_YUYV: + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; + case DRM_FORMAT_YVYU: + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; + case DRM_FORMAT_UYVY: + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; + case DRM_FORMAT_VYUY: + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; + case DRM_FORMAT_NV12: + return PLANE_CTL_FORMAT_NV12; + case DRM_FORMAT_P010: + return PLANE_CTL_FORMAT_P010; + case DRM_FORMAT_P012: + return PLANE_CTL_FORMAT_P012; + case DRM_FORMAT_P016: + return PLANE_CTL_FORMAT_P016; + case DRM_FORMAT_Y210: + return PLANE_CTL_FORMAT_Y210; + case DRM_FORMAT_Y212: + return PLANE_CTL_FORMAT_Y212; + case DRM_FORMAT_Y216: + return PLANE_CTL_FORMAT_Y216; + case DRM_FORMAT_XVYU2101010: + return PLANE_CTL_FORMAT_Y410; + case DRM_FORMAT_XVYU12_16161616: + return PLANE_CTL_FORMAT_Y412; + case DRM_FORMAT_XVYU16161616: + return PLANE_CTL_FORMAT_Y416; + default: + MISSING_CASE(pixel_format); + } + + return 0; +} + +static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) +{ + if (!plane_state->hw.fb->format->has_alpha) + return PLANE_CTL_ALPHA_DISABLE; + + switch (plane_state->hw.pixel_blend_mode) { + case DRM_MODE_BLEND_PIXEL_NONE: + return PLANE_CTL_ALPHA_DISABLE; + case DRM_MODE_BLEND_PREMULTI: + return PLANE_CTL_ALPHA_SW_PREMULTIPLY; + case DRM_MODE_BLEND_COVERAGE: + return PLANE_CTL_ALPHA_HW_PREMULTIPLY; + default: + MISSING_CASE(plane_state->hw.pixel_blend_mode); + return PLANE_CTL_ALPHA_DISABLE; + } +} + +static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) +{ + if (!plane_state->hw.fb->format->has_alpha) + return PLANE_COLOR_ALPHA_DISABLE; + + switch (plane_state->hw.pixel_blend_mode) { + case DRM_MODE_BLEND_PIXEL_NONE: + return PLANE_COLOR_ALPHA_DISABLE; + case DRM_MODE_BLEND_PREMULTI: + return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; + case DRM_MODE_BLEND_COVERAGE: + return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; + default: + MISSING_CASE(plane_state->hw.pixel_blend_mode); + return PLANE_COLOR_ALPHA_DISABLE; + } +} + +static u32 skl_plane_ctl_tiling(u64 fb_modifier) +{ + switch (fb_modifier) { + case DRM_FORMAT_MOD_LINEAR: + break; + case I915_FORMAT_MOD_X_TILED: + return PLANE_CTL_TILED_X; + case I915_FORMAT_MOD_Y_TILED: + return PLANE_CTL_TILED_Y; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: + return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + return PLANE_CTL_TILED_Y | + PLANE_CTL_RENDER_DECOMPRESSION_ENABLE | + PLANE_CTL_CLEAR_COLOR_DISABLE; + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE; + case I915_FORMAT_MOD_Yf_TILED: + return PLANE_CTL_TILED_YF; + case I915_FORMAT_MOD_Yf_TILED_CCS: + return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; + default: + MISSING_CASE(fb_modifier); + } + + return 0; +} + +static u32 skl_plane_ctl_rotate(unsigned int rotate) +{ + switch (rotate) { + case DRM_MODE_ROTATE_0: + break; + /* + * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr + * while i915 HW rotation is clockwise, thats why this swapping. + */ + case DRM_MODE_ROTATE_90: + return PLANE_CTL_ROTATE_270; + case DRM_MODE_ROTATE_180: + return PLANE_CTL_ROTATE_180; + case DRM_MODE_ROTATE_270: + return PLANE_CTL_ROTATE_90; + default: + MISSING_CASE(rotate); + } + + return 0; +} + +static u32 cnl_plane_ctl_flip(unsigned int reflect) +{ + switch (reflect) { + case 0: + break; + case DRM_MODE_REFLECT_X: + return PLANE_CTL_FLIP_HORIZONTAL; + case DRM_MODE_REFLECT_Y: + default: + MISSING_CASE(reflect); + } + + return 0; +} + +static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + u32 plane_ctl = 0; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + return plane_ctl; + + if (crtc_state->gamma_enable) + plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; + + return plane_ctl; +} + +static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 plane_ctl; + + plane_ctl = PLANE_CTL_ENABLE; + + if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { + plane_ctl |= skl_plane_ctl_alpha(plane_state); + plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; + + if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) + plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709; + + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE; + } + + plane_ctl |= skl_plane_ctl_format(fb->format->format); + plane_ctl |= skl_plane_ctl_tiling(fb->modifier); + plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK); + + if (INTEL_GEN(dev_priv) >= 10) + plane_ctl |= cnl_plane_ctl_flip(rotation & + DRM_MODE_REFLECT_MASK); + + if (key->flags & I915_SET_COLORKEY_DESTINATION) + plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; + + return plane_ctl; +} + +static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + u32 plane_color_ctl = 0; + + if (INTEL_GEN(dev_priv) >= 11) + return plane_color_ctl; + + if (crtc_state->gamma_enable) + plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; + + return plane_color_ctl; +} + +static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + u32 plane_color_ctl = 0; + + plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; + plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); + + if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { + switch (plane_state->hw.color_encoding) { + case DRM_COLOR_YCBCR_BT709: + plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; + break; + case DRM_COLOR_YCBCR_BT2020: + plane_color_ctl |= + PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020; + break; + default: + plane_color_ctl |= + PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601; + } + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; + } else if (fb->format->is_yuv) { + plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; + } + + return plane_color_ctl; +} + +static int +main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) +{ + drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + (main_plane && main_plane >= fb->format->num_planes / 2)); + + return fb->format->num_planes / 2 + main_plane; +} + +int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) +{ + drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + ccs_plane < fb->format->num_planes / 2); + + if (is_gen12_ccs_cc_plane(fb, ccs_plane)) + return 0; + + return ccs_plane - fb->format->num_planes / 2; +} + +static int +skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) +{ + struct drm_i915_private *i915 = to_i915(fb->dev); + + if (is_ccs_modifier(fb->modifier)) + return main_to_ccs_plane(fb, main_plane); + else if (INTEL_GEN(i915) < 11 && + intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 1; + else + return 0; +} + +static void +skl_program_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 surf_addr = plane_state->color_plane[color_plane].offset; + u32 stride = skl_plane_stride(plane_state, color_plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = skl_main_to_aux_plane(fb, color_plane); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + u32 x = plane_state->color_plane[color_plane].x; + u32 y = plane_state->color_plane[color_plane].y; + u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + u8 alpha = plane_state->hw.alpha >> 8; + u32 plane_color_ctl = 0, aux_dist = 0; + unsigned long irqflags; + u32 keymsk, keymax; + u32 plane_ctl = plane_state->ctl; + + plane_ctl |= skl_plane_ctl_crtc(crtc_state); + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + plane_color_ctl = plane_state->color_ctl | + glk_plane_color_ctl_crtc(crtc_state); + + /* Sizes are 0 based */ + src_w--; + src_h--; + + keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); + + keymsk = key->channel_mask & 0x7ffffff; + if (alpha < 0xff) + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; + + /* The scaler will handle the output position */ + if (plane_state->scaler_id >= 0) { + crtc_x = 0; + crtc_y = 0; + } + + if (aux_plane) { + aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr; + + if (INTEL_GEN(dev_priv) < 12) + aux_dist |= skl_plane_stride(plane_state, aux_plane); + } + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); + intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), + (src_h << 16) | src_w); + + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + + if (icl_is_hdr_plane(dev_priv, plane_id)) + intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), + plane_state->cus_ctl); + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); + + if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) + icl_program_input_csc(plane, crtc_state, plane_state); + + if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) + intel_uncore_write64_fw(&dev_priv->uncore, + PLANE_CC_VAL(pipe, plane_id), plane_state->ccval); + + skl_write_plane_wm(plane, crtc_state); + + intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), + key->min_value); + intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); + intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax); + + intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), + (y << 16) | x); + + if (INTEL_GEN(dev_priv) < 11) + intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), + (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x); + + if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + + /* + * The control register self-arms if the plane was previously + * disabled. Try to make the plane enable atomic by writing + * the control register just before the surface register. + */ + intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), + intel_plane_ggtt_offset(plane_state) + surf_addr); + + if (plane_state->scaler_id >= 0) + skl_program_plane_scaler(plane, crtc_state, plane_state); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +skl_plane_async_flip(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + bool async_flip) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + unsigned long irqflags; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + u32 surf_addr = plane_state->color_plane[0].offset; + u32 plane_ctl = plane_state->ctl; + + plane_ctl |= skl_plane_ctl_crtc(crtc_state); + + if (async_flip) + plane_ctl |= PLANE_CTL_ASYNC_FLIP; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), + intel_plane_ggtt_offset(plane_state) + surf_addr); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +skl_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int color_plane = 0; + + if (plane_state->planar_linked_plane && !plane_state->planar_slave) + /* Program the UV plane on planar master */ + color_plane = 1; + + skl_program_plane(plane, crtc_state, plane_state, color_plane); +} + +static bool intel_format_is_p01x(u32 format) +{ + switch (format) { + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return true; + default: + return false; + } +} + +static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + struct drm_format_name_buf format_name; + + if (!fb) + return 0; + + if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && + is_ccs_modifier(fb->modifier)) { + drm_dbg_kms(&dev_priv->drm, + "RC support only with 0/180 degree rotation (%x)\n", + rotation); + return -EINVAL; + } + + if (rotation & DRM_MODE_REFLECT_X && + fb->modifier == DRM_FORMAT_MOD_LINEAR) { + drm_dbg_kms(&dev_priv->drm, + "horizontal flip is not supported with linear surface formats\n"); + return -EINVAL; + } + + if (drm_rotation_90_or_270(rotation)) { + if (fb->modifier != I915_FORMAT_MOD_Y_TILED && + fb->modifier != I915_FORMAT_MOD_Yf_TILED) { + drm_dbg_kms(&dev_priv->drm, + "Y/Yf tiling required for 90/270!\n"); + return -EINVAL; + } + + /* + * 90/270 is not allowed with RGB64 16:16:16:16 and + * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. + */ + switch (fb->format->format) { + case DRM_FORMAT_RGB565: + if (INTEL_GEN(dev_priv) >= 11) + break; + fallthrough; + case DRM_FORMAT_C8: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + drm_dbg_kms(&dev_priv->drm, + "Unsupported pixel format %s for 90/270!\n", + drm_get_format_name(fb->format->format, + &format_name)); + return -EINVAL; + default: + break; + } + } + + /* Y-tiling is not supported in IF-ID Interlace mode */ + if (crtc_state->hw.enable && + crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && + (fb->modifier == I915_FORMAT_MOD_Y_TILED || + fb->modifier == I915_FORMAT_MOD_Yf_TILED || + fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || + fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) { + drm_dbg_kms(&dev_priv->drm, + "Y/Yf tiling not supported in IF-ID mode\n"); + return -EINVAL; + } + + /* Wa_1606054188:tgl,adl-s */ + if ((IS_ALDERLAKE_S(dev_priv) || IS_TIGERLAKE(dev_priv)) && + plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE && + intel_format_is_p01x(fb->format->format)) { + drm_dbg_kms(&dev_priv->drm, + "Source color keying not supported with P01x formats\n"); + return -EINVAL; + } + + return 0; +} + +static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + int crtc_x = plane_state->uapi.dst.x1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int pipe_src_w = crtc_state->pipe_src_w; + + /* + * Display WA #1175: cnl,glk + * Planes other than the cursor may cause FIFO underflow and display + * corruption if starting less than 4 pixels from the right edge of + * the screen. + * Besides the above WA fix the similar problem, where planes other + * than the cursor ending less than 4 pixels from the left edge of the + * screen may cause FIFO underflow and display corruption. + */ + if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && + (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) { + drm_dbg_kms(&dev_priv->drm, + "requested plane X %s position %d invalid (valid range %d-%d)\n", + crtc_x + crtc_w < 4 ? "end" : "start", + crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x, + 4, pipe_src_w - 4); + return -ERANGE; + } + + return 0; +} + +static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + + /* Display WA #1106 */ + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && + src_w & 3 && + (rotation == DRM_MODE_ROTATE_270 || + rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { + DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); + return -EINVAL; + } + + return 0; +} + +static int skl_plane_max_scale(struct drm_i915_private *dev_priv, + const struct drm_framebuffer *fb) +{ + /* + * We don't yet know the final source width nor + * whether we can use the HQ scaler mode. Assume + * the best case. + * FIXME need to properly check this later. + */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || + !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 0x30000 - 1; + else + return 0x20000 - 1; +} + +static int intel_plane_min_width(struct intel_plane *plane, + const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (plane->min_width) + return plane->min_width(fb, color_plane, rotation); + else + return 1; +} + +static int intel_plane_max_width(struct intel_plane *plane, + const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (plane->max_width) + return plane->max_width(fb, color_plane, rotation); + else + return INT_MAX; +} + +static int intel_plane_max_height(struct intel_plane *plane, + const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (plane->max_height) + return plane->max_height(fb, color_plane, rotation); + else + return INT_MAX; +} + +static bool +skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, + int main_x, int main_y, u32 main_offset, + int ccs_plane) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_x = plane_state->color_plane[ccs_plane].x; + int aux_y = plane_state->color_plane[ccs_plane].y; + u32 aux_offset = plane_state->color_plane[ccs_plane].offset; + u32 alignment = intel_surf_alignment(fb, ccs_plane); + int hsub; + int vsub; + + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); + while (aux_offset >= main_offset && aux_y <= main_y) { + int x, y; + + if (aux_x == main_x && aux_y == main_y) + break; + + if (aux_offset == 0) + break; + + x = aux_x / hsub; + y = aux_y / vsub; + aux_offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + ccs_plane, + aux_offset, + aux_offset - + alignment); + aux_x = x * hsub + aux_x % hsub; + aux_y = y * vsub + aux_y % vsub; + } + + if (aux_x != main_x || aux_y != main_y) + return false; + + plane_state->color_plane[ccs_plane].offset = aux_offset; + plane_state->color_plane[ccs_plane].x = aux_x; + plane_state->color_plane[ccs_plane].y = aux_y; + + return true; +} + + +int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, + int *x, int *y, u32 *offset) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + const int aux_plane = skl_main_to_aux_plane(fb, 0); + const u32 aux_offset = plane_state->color_plane[aux_plane].offset; + const u32 alignment = intel_surf_alignment(fb, 0); + const int w = drm_rect_width(&plane_state->uapi.src) >> 16; + + intel_add_fb_offsets(x, y, plane_state, 0); + *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0); + if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) + return -EINVAL; + + /* + * AUX surface offset is specified as the distance from the + * main surface offset, and it must be non-negative. Make + * sure that is what we will get. + */ + if (aux_plane && *offset > aux_offset) + *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, + *offset, + aux_offset & ~(alignment - 1)); + + /* + * When using an X-tiled surface, the plane blows up + * if the x offset + width exceed the stride. + * + * TODO: linear and Y-tiled seem fine, Yf untested, + */ + if (fb->modifier == I915_FORMAT_MOD_X_TILED) { + int cpp = fb->format->cpp[0]; + + while ((*x + w) * cpp > plane_state->color_plane[0].stride) { + if (*offset == 0) { + drm_dbg_kms(&dev_priv->drm, + "Unable to find suitable display surface offset due to X-tiling\n"); + return -EINVAL; + } + + *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, + *offset, + *offset - alignment); + } + } + + return 0; +} + +static int skl_check_main_surface(struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + const unsigned int rotation = plane_state->hw.rotation; + int x = plane_state->uapi.src.x1 >> 16; + int y = plane_state->uapi.src.y1 >> 16; + const int w = drm_rect_width(&plane_state->uapi.src) >> 16; + const int h = drm_rect_height(&plane_state->uapi.src) >> 16; + const int min_width = intel_plane_min_width(plane, fb, 0, rotation); + const int max_width = intel_plane_max_width(plane, fb, 0, rotation); + const int max_height = intel_plane_max_height(plane, fb, 0, rotation); + const int aux_plane = skl_main_to_aux_plane(fb, 0); + const u32 alignment = intel_surf_alignment(fb, 0); + u32 offset; + int ret; + + if (w > max_width || w < min_width || h > max_height) { + drm_dbg_kms(&dev_priv->drm, + "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", + w, h, min_width, max_width, max_height); + return -EINVAL; + } + + ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); + if (ret) + return ret; + + /* + * CCS AUX surface doesn't have its own x/y offsets, we must make sure + * they match with the main surface x/y offsets. + */ + if (is_ccs_modifier(fb->modifier)) { + while (!skl_check_main_ccs_coordinates(plane_state, x, y, + offset, aux_plane)) { + if (offset == 0) + break; + + offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, + offset, offset - alignment); + } + + if (x != plane_state->color_plane[aux_plane].x || + y != plane_state->color_plane[aux_plane].y) { + drm_dbg_kms(&dev_priv->drm, + "Unable to find suitable display surface offset due to CCS\n"); + return -EINVAL; + } + } + + drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191); + + plane_state->color_plane[0].offset = offset; + plane_state->color_plane[0].x = x; + plane_state->color_plane[0].y = y; + + /* + * Put the final coordinates back so that the src + * coordinate checks will see the right values. + */ + drm_rect_translate_to(&plane_state->uapi.src, + x << 16, y << 16); + + return 0; +} + +static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *i915 = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + int uv_plane = 1; + int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation); + int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation); + int x = plane_state->uapi.src.x1 >> 17; + int y = plane_state->uapi.src.y1 >> 17; + int w = drm_rect_width(&plane_state->uapi.src) >> 17; + int h = drm_rect_height(&plane_state->uapi.src) >> 17; + u32 offset; + + /* FIXME not quite sure how/if these apply to the chroma plane */ + if (w > max_width || h > max_height) { + drm_dbg_kms(&i915->drm, + "CbCr source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); + return -EINVAL; + } + + intel_add_fb_offsets(&x, &y, plane_state, uv_plane); + offset = intel_plane_compute_aligned_offset(&x, &y, + plane_state, uv_plane); + + if (is_ccs_modifier(fb->modifier)) { + int ccs_plane = main_to_ccs_plane(fb, uv_plane); + u32 aux_offset = plane_state->color_plane[ccs_plane].offset; + u32 alignment = intel_surf_alignment(fb, uv_plane); + + if (offset > aux_offset) + offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + uv_plane, + offset, + aux_offset & ~(alignment - 1)); + + while (!skl_check_main_ccs_coordinates(plane_state, x, y, + offset, ccs_plane)) { + if (offset == 0) + break; + + offset = intel_plane_adjust_aligned_offset(&x, &y, + plane_state, + uv_plane, + offset, offset - alignment); + } + + if (x != plane_state->color_plane[ccs_plane].x || + y != plane_state->color_plane[ccs_plane].y) { + drm_dbg_kms(&i915->drm, + "Unable to find suitable display surface offset due to CCS\n"); + return -EINVAL; + } + } + + drm_WARN_ON(&i915->drm, x > 8191 || y > 8191); + + plane_state->color_plane[uv_plane].offset = offset; + plane_state->color_plane[uv_plane].x = x; + plane_state->color_plane[uv_plane].y = y; + + return 0; +} + +static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + int src_x = plane_state->uapi.src.x1 >> 16; + int src_y = plane_state->uapi.src.y1 >> 16; + u32 offset; + int ccs_plane; + + for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) { + int main_hsub, main_vsub; + int hsub, vsub; + int x, y; + + if (!is_ccs_plane(fb, ccs_plane) || + is_gen12_ccs_cc_plane(fb, ccs_plane)) + continue; + + intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, + skl_ccs_to_main_plane(fb, ccs_plane)); + intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); + + hsub *= main_hsub; + vsub *= main_vsub; + x = src_x / hsub; + y = src_y / vsub; + + intel_add_fb_offsets(&x, &y, plane_state, ccs_plane); + + offset = intel_plane_compute_aligned_offset(&x, &y, + plane_state, + ccs_plane); + + plane_state->color_plane[ccs_plane].offset = offset; + plane_state->color_plane[ccs_plane].x = (x * hsub + + src_x % hsub) / + main_hsub; + plane_state->color_plane[ccs_plane].y = (y * vsub + + src_y % vsub) / + main_vsub; + } + + return 0; +} + +static int skl_check_plane_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + int ret, i; + + ret = intel_plane_compute_gtt(plane_state); + if (ret) + return ret; + + if (!plane_state->uapi.visible) + return 0; + + /* + * Handle the AUX surface first since the main surface setup depends on + * it. + */ + if (is_ccs_modifier(fb->modifier)) { + ret = skl_check_ccs_aux_surface(plane_state); + if (ret) + return ret; + } + + if (intel_format_info_is_yuv_semiplanar(fb->format, + fb->modifier)) { + ret = skl_check_nv12_aux_surface(plane_state); + if (ret) + return ret; + } + + for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) { + plane_state->color_plane[i].offset = 0; + plane_state->color_plane[i].x = 0; + plane_state->color_plane[i].y = 0; + } + + ret = skl_check_main_surface(plane_state); + if (ret) + return ret; + + return 0; +} + +static bool skl_fb_scalable(const struct drm_framebuffer *fb) +{ + if (!fb) + return false; + + switch (fb->format->format) { + case DRM_FORMAT_C8: + return false; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + return INTEL_GEN(to_i915(fb->dev)) >= 11; + default: + return true; + } +} + +static int skl_plane_check(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int min_scale = DRM_PLANE_HELPER_NO_SCALING; + int max_scale = DRM_PLANE_HELPER_NO_SCALING; + int ret; + + ret = skl_plane_check_fb(crtc_state, plane_state); + if (ret) + return ret; + + /* use scaler when colorkey is not required */ + if (!plane_state->ckey.flags && skl_fb_scalable(fb)) { + min_scale = 1; + max_scale = skl_plane_max_scale(dev_priv, fb); + } + + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + min_scale, max_scale, true); + if (ret) + return ret; + + ret = skl_check_plane_surface(plane_state); + if (ret) + return ret; + + if (!plane_state->uapi.visible) + return 0; + + ret = skl_plane_check_dst_coordinates(crtc_state, plane_state); + if (ret) + return ret; + + ret = intel_plane_check_src_coordinates(plane_state); + if (ret) + return ret; + + ret = skl_plane_check_nv12_rotation(plane_state); + if (ret) + return ret; + + /* HW only has 8 bits pixel precision, disable plane if invisible */ + if (!(plane_state->hw.alpha >> 8)) + plane_state->uapi.visible = false; + + plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + plane_state->color_ctl = glk_plane_color_ctl(crtc_state, + plane_state); + + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && + icl_is_hdr_plane(dev_priv, plane->id)) + /* Enable and use MPEG-2 chroma siting */ + plane_state->cus_ctl = PLANE_CUS_ENABLE | + PLANE_CUS_HPHASE_0 | + PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25; + else + plane_state->cus_ctl = 0; + + return 0; +} + +static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + if (!HAS_FBC(dev_priv)) + return false; + + return pipe == PIPE_A && plane_id == PLANE_PRIMARY; +} + +static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + /* Display WA #0870: skl, bxt */ + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) + return false; + + if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) + return false; + + if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) + return false; + + return true; +} + +static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { + *num_formats = ARRAY_SIZE(skl_planar_formats); + return skl_planar_formats; + } else { + *num_formats = ARRAY_SIZE(skl_plane_formats); + return skl_plane_formats; + } +} + +static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { + *num_formats = ARRAY_SIZE(glk_planar_formats); + return glk_planar_formats; + } else { + *num_formats = ARRAY_SIZE(skl_plane_formats); + return skl_plane_formats; + } +} + +static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (icl_is_hdr_plane(dev_priv, plane_id)) { + *num_formats = ARRAY_SIZE(icl_hdr_plane_formats); + return icl_hdr_plane_formats; + } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) { + *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats); + return icl_sdr_y_plane_formats; + } else { + *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats); + return icl_sdr_uv_plane_formats; + } +} + +static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + if (plane_id == PLANE_CURSOR) + return false; + + if (INTEL_GEN(dev_priv) >= 10) + return true; + + if (IS_GEMINILAKE(dev_priv)) + return pipe != PIPE_C; + + return pipe != PIPE_C && + (plane_id == PLANE_PRIMARY || + plane_id == PLANE_SPRITE0); +} + +static bool skl_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + struct intel_plane *plane = to_intel_plane(_plane); + + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + break; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + if (!plane->has_ccs) + return false; + break; + default: + return false; + } + + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + if (is_ccs_modifier(modifier)) + return true; + fallthrough; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_XVYU2101010: + if (modifier == I915_FORMAT_MOD_Yf_TILED) + return true; + fallthrough; + case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED || + modifier == I915_FORMAT_MOD_Y_TILED) + return true; + fallthrough; + default: + return false; + } +} + +static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, + enum plane_id plane_id) +{ + /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ + if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) || + IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_C0)) + return false; + + return plane_id < PLANE_SPRITE4; +} + +static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + struct drm_i915_private *dev_priv = to_i915(_plane->dev); + struct intel_plane *plane = to_intel_plane(_plane); + + switch (modifier) { + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) + return false; + fallthrough; + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: + break; + default: + return false; + } + + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + if (is_ccs_modifier(modifier)) + return true; + fallthrough; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) + return true; + fallthrough; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED || + modifier == I915_FORMAT_MOD_Y_TILED) + return true; + fallthrough; + default: + return false; + } +} + +static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, + enum plane_id plane_id) +{ + if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) + return gen12_plane_format_modifiers_mc_ccs; + else + return gen12_plane_format_modifiers_rc_ccs; +} + +static const struct drm_plane_funcs skl_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = skl_plane_format_mod_supported, +}; + +static const struct drm_plane_funcs gen12_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = gen12_plane_format_mod_supported, +}; + +static void +skl_plane_enable_flip_done(struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + spin_lock_irq(&i915->irq_lock); + bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); + spin_unlock_irq(&i915->irq_lock); +} + +static void +skl_plane_disable_flip_done(struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + spin_lock_irq(&i915->irq_lock); + bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); + spin_unlock_irq(&i915->irq_lock); +} + +struct intel_plane * +skl_universal_plane_create(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + const struct drm_plane_funcs *plane_funcs; + struct intel_plane *plane; + enum drm_plane_type plane_type; + unsigned int supported_rotations; + unsigned int supported_csc; + const u64 *modifiers; + const u32 *formats; + int num_formats; + int ret; + + plane = intel_plane_alloc(); + if (IS_ERR(plane)) + return plane; + + plane->pipe = pipe; + plane->id = plane_id; + plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); + + plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); + if (plane->has_fbc) { + struct intel_fbc *fbc = &dev_priv->fbc; + + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; + } + + if (INTEL_GEN(dev_priv) >= 11) { + plane->min_width = icl_plane_min_width; + plane->max_width = icl_plane_max_width; + plane->max_height = icl_plane_max_height; + } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + plane->max_width = glk_plane_max_width; + plane->max_height = skl_plane_max_height; + } else { + plane->max_width = skl_plane_max_width; + plane->max_height = skl_plane_max_height; + } + + plane->max_stride = skl_plane_max_stride; + plane->update_plane = skl_update_plane; + plane->disable_plane = skl_disable_plane; + plane->get_hw_state = skl_plane_get_hw_state; + plane->check_plane = skl_plane_check; + plane->min_cdclk = skl_plane_min_cdclk; + + if (plane_id == PLANE_PRIMARY) { + plane->need_async_flip_disable_wa = IS_GEN_RANGE(dev_priv, 9, 10); + plane->async_flip = skl_plane_async_flip; + plane->enable_flip_done = skl_plane_enable_flip_done; + plane->disable_flip_done = skl_plane_disable_flip_done; + } + + if (INTEL_GEN(dev_priv) >= 11) + formats = icl_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); + else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + formats = glk_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); + else + formats = skl_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); + + plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); + if (INTEL_GEN(dev_priv) >= 12) { + modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); + plane_funcs = &gen12_plane_funcs; + } else { + if (plane->has_ccs) + modifiers = skl_plane_format_modifiers_ccs; + else + modifiers = skl_plane_format_modifiers_noccs; + plane_funcs = &skl_plane_funcs; + } + + if (plane_id == PLANE_PRIMARY) + plane_type = DRM_PLANE_TYPE_PRIMARY; + else + plane_type = DRM_PLANE_TYPE_OVERLAY; + + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + 0, plane_funcs, + formats, num_formats, modifiers, + plane_type, + "plane %d%c", plane_id + 1, + pipe_name(pipe)); + if (ret) + goto fail; + + supported_rotations = + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; + + if (INTEL_GEN(dev_priv) >= 10) + supported_rotations |= DRM_MODE_REFLECT_X; + + drm_plane_create_rotation_property(&plane->base, + DRM_MODE_ROTATE_0, + supported_rotations); + + supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709); + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020); + + drm_plane_create_color_properties(&plane->base, + supported_csc, + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_LIMITED_RANGE); + + drm_plane_create_alpha_property(&plane->base); + drm_plane_create_blend_mode_property(&plane->base, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + + drm_plane_create_zpos_immutable_property(&plane->base, plane_id); + + if (INTEL_GEN(dev_priv) >= 12) + drm_plane_enable_fb_damage_clips(&plane->base); + + if (INTEL_GEN(dev_priv) >= 10) + drm_plane_create_scaling_filter_property(&plane->base, + BIT(DRM_SCALING_FILTER_DEFAULT) | + BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); + + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); + + return plane; + +fail: + intel_plane_free(plane); + + return ERR_PTR(ret); +} + +void +skl_get_initial_plane_config(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) +{ + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + enum plane_id plane_id = plane->id; + enum pipe pipe; + u32 val, base, offset, stride_mult, tiling, alpha; + int fourcc, pixel_format; + unsigned int aligned_height; + struct drm_framebuffer *fb; + struct intel_framebuffer *intel_fb; + + if (!plane->get_hw_state(plane, &pipe)) + return; + + drm_WARN_ON(dev, pipe != crtc->pipe); + + if (crtc_state->bigjoiner) { + drm_dbg_kms(&dev_priv->drm, + "Unsupported bigjoiner configuration for initial FB\n"); + return; + } + + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + if (!intel_fb) { + drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); + return; + } + + fb = &intel_fb->base; + + fb->dev = dev; + + val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); + + if (INTEL_GEN(dev_priv) >= 11) + pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; + else + pixel_format = val & PLANE_CTL_FORMAT_MASK; + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + alpha = intel_de_read(dev_priv, + PLANE_COLOR_CTL(pipe, plane_id)); + alpha &= PLANE_COLOR_ALPHA_MASK; + } else { + alpha = val & PLANE_CTL_ALPHA_MASK; + } + + fourcc = skl_format_to_fourcc(pixel_format, + val & PLANE_CTL_ORDER_RGBX, alpha); + fb->format = drm_format_info(fourcc); + + tiling = val & PLANE_CTL_TILED_MASK; + switch (tiling) { + case PLANE_CTL_TILED_LINEAR: + fb->modifier = DRM_FORMAT_MOD_LINEAR; + break; + case PLANE_CTL_TILED_X: + plane_config->tiling = I915_TILING_X; + fb->modifier = I915_FORMAT_MOD_X_TILED; + break; + case PLANE_CTL_TILED_Y: + plane_config->tiling = I915_TILING_Y; + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) + fb->modifier = INTEL_GEN(dev_priv) >= 12 ? + I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS : + I915_FORMAT_MOD_Y_TILED_CCS; + else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE) + fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; + else + fb->modifier = I915_FORMAT_MOD_Y_TILED; + break; + case PLANE_CTL_TILED_YF: + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) + fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; + else + fb->modifier = I915_FORMAT_MOD_Yf_TILED; + break; + default: + MISSING_CASE(tiling); + goto error; + } + + /* + * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr + * while i915 HW rotation is clockwise, thats why this swapping. + */ + switch (val & PLANE_CTL_ROTATE_MASK) { + case PLANE_CTL_ROTATE_0: + plane_config->rotation = DRM_MODE_ROTATE_0; + break; + case PLANE_CTL_ROTATE_90: + plane_config->rotation = DRM_MODE_ROTATE_270; + break; + case PLANE_CTL_ROTATE_180: + plane_config->rotation = DRM_MODE_ROTATE_180; + break; + case PLANE_CTL_ROTATE_270: + plane_config->rotation = DRM_MODE_ROTATE_90; + break; + } + + if (INTEL_GEN(dev_priv) >= 10 && + val & PLANE_CTL_FLIP_HORIZONTAL) + plane_config->rotation |= DRM_MODE_REFLECT_X; + + /* 90/270 degree rotation would require extra work */ + if (drm_rotation_90_or_270(plane_config->rotation)) + goto error; + + base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; + plane_config->base = base; + + offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); + + val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); + fb->height = ((val >> 16) & 0xffff) + 1; + fb->width = ((val >> 0) & 0xffff) + 1; + + val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); + stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); + fb->pitches[0] = (val & 0x3ff) * stride_mult; + + aligned_height = intel_fb_align_height(fb, 0, fb->height); + + plane_config->size = fb->pitches[0] * aligned_height; + + drm_dbg_kms(&dev_priv->drm, + "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", + crtc->base.name, plane->base.name, fb->width, fb->height, + fb->format->cpp[0] * 8, base, fb->pitches[0], + plane_config->size); + + plane_config->fb = intel_fb; + return; + +error: + kfree(intel_fb); +} + diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h new file mode 100644 index 000000000000..818266653630 --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef _SKL_UNIVERSAL_PLANE_H_ +#define _SKL_UNIVERSAL_PLANE_H_ + +#include <linux/types.h> + +struct drm_framebuffer; +struct drm_i915_private; +struct intel_crtc; +struct intel_initial_plane_config; +struct intel_plane_state; + +enum pipe; +enum plane_id; + +struct intel_plane * +skl_universal_plane_create(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id); + +void skl_get_initial_plane_config(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config); + +int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); + +int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane); +int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, + int *x, int *y, u32 *offset); + +bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv, + enum plane_id plane_id); +bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id); + +#endif diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index f94025ec603a..1059a26c1f58 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -38,6 +38,7 @@ #include "intel_fifo_underrun.h" #include "intel_panel.h" #include "intel_sideband.h" +#include "skl_scaler.h" /* return pixels in terms of txbyteclkhs */ static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 3c0b157e2a35..01fe89afe8c0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -35,7 +35,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) * to handle all possible callers, and given typical object sizes, * the alignment of the buddy allocation will naturally match. */ - vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, + vaddr = dma_alloc_coherent(obj->base.dev->dev, roundup_pow_of_two(obj->base.size), &dma, GFP_KERNEL); if (!vaddr) @@ -83,7 +83,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) err_st: kfree(st); err_pci: - dma_free_coherent(&obj->base.dev->pdev->dev, + dma_free_coherent(obj->base.dev->dev, roundup_pow_of_two(obj->base.size), vaddr, dma); return -ENOMEM; @@ -129,7 +129,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, sg_free_table(pages); kfree(pages); - dma_free_coherent(&obj->base.dev->pdev->dev, + dma_free_coherent(obj->base.dev->dev, roundup_pow_of_two(obj->base.size), vaddr, dma); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 215766cc22bf..000e1cd8e920 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -85,6 +85,47 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) wbinvd_on_all_cpus(); } +int i915_gem_freeze(struct drm_i915_private *i915) +{ + /* Discard all purgeable objects, let userspace recover those as + * required after resuming. + */ + i915_gem_shrink_all(i915); + + return 0; +} + +int i915_gem_freeze_late(struct drm_i915_private *i915) +{ + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + + /* + * Called just before we write the hibernation image. + * + * We need to update the domain tracking to reflect that the CPU + * will be accessing all the pages to create and restore from the + * hibernation, and so upon restoration those pages will be in the + * CPU domain. + * + * To make sure the hibernation image contains the latest state, + * we update that state just before writing out the image. + * + * To try and reduce the hibernation image, we manually shrink + * the objects as well, see i915_gem_freeze() + */ + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + i915_gem_shrink(i915, -1UL, NULL, ~0); + i915_gem_drain_freed_objects(i915); + + wbinvd_on_all_cpus(); + list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) + __start_cpu_write(obj); + + return 0; +} + void i915_gem_resume(struct drm_i915_private *i915) { GEM_TRACE("%s\n", dev_name(i915->drm.dev)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h index 26b78dbdc225..c9a66630e92e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h @@ -19,4 +19,7 @@ void i915_gem_idle_work_handler(struct work_struct *work); void i915_gem_suspend(struct drm_i915_private *i915); void i915_gem_suspend_late(struct drm_i915_private *i915); +int i915_gem_freeze(struct drm_i915_private *i915); +int i915_gem_freeze_late(struct drm_i915_private *i915); + #endif /* __I915_GEM_PM_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index cf83c208688c..680b370a8ef3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -172,7 +172,7 @@ rebuild_st: max_segment = PAGE_SIZE; goto rebuild_st; } else { - dev_warn(&i915->drm.pdev->dev, + dev_warn(i915->drm.dev, "Failed to DMA remap %lu pages\n", page_count); goto err_pages; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 551935348ad8..a1e197a6e999 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -753,22 +753,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, mutex_lock(&i915->mm.stolen_lock); ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); mutex_unlock(&i915->mm.stolen_lock); - if (ret) { - obj = ERR_PTR(ret); + if (ret) goto err_free; - } obj = i915_gem_object_alloc(); if (!obj) { - obj = ERR_PTR(-ENOMEM); + ret = -ENOMEM; goto err_stolen; } ret = __i915_gem_object_create_stolen(mem, obj, stolen); - if (ret) { - obj = ERR_PTR(ret); + if (ret) goto err_object_free; - } i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); return obj; @@ -779,7 +775,7 @@ err_stolen: i915_gem_stolen_remove_node(i915, stolen); err_free: kfree(stolen); - return obj; + return ERR_PTR(ret); } bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index 8551e6de50e8..de575fdb033f 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -240,7 +240,7 @@ gen7_emit_state_base_address(struct batch_chunk *batch, /* general */ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; /* surface */ - *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY; + *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY; /* dynamic */ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; /* indirect */ @@ -393,6 +393,7 @@ static void emit_batch(struct i915_vma * const vma, desc_count); /* Reset inherited context registers */ + gen7_emit_pipeline_flush(&cmds); gen7_emit_pipeline_invalidate(&cmds); batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index fb1b1d096975..40024f448c1c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -713,9 +713,12 @@ static int engine_setup_common(struct intel_engine_cs *engine) goto err_status; } + err = intel_engine_init_cmd_parser(engine); + if (err) + goto err_cmd_parser; + intel_engine_init_active(engine, ENGINE_PHYSICAL); intel_engine_init_execlists(engine); - intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); intel_engine_init_retire(engine); @@ -732,6 +735,8 @@ static int engine_setup_common(struct intel_engine_cs *engine) return 0; +err_cmd_parser: + intel_breadcrumbs_free(engine->breadcrumbs); err_status: cleanup_status_page(engine); return err; @@ -1269,7 +1274,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) /* Waiting to drain ELSP? */ if (execlists_active(&engine->execlists)) { - synchronize_hardirq(engine->i915->drm.pdev->irq); + synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq); intel_engine_flush_submission(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index eece0844fbe9..ec2bf963ced9 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -535,16 +535,39 @@ static int init_ggtt(struct i915_ggtt *ggtt) mutex_init(&ggtt->error_mutex); if (ggtt->mappable_end) { - /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, - &ggtt->error_capture, - PAGE_SIZE, 0, - I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (ret) - return ret; + /* + * Reserve a mappable slot for our lockless error capture. + * + * We strongly prefer taking address 0x0 in order to protect + * other critical buffers against accidental overwrites, + * as writing to address 0 is a very common mistake. + * + * Since 0 may already be in use by the system (e.g. the BIOS + * framebuffer), we let the reservation fail quietly and hope + * 0 remains reserved always. + * + * If we fail to reserve 0, and then fail to find any space + * for an error-capture, remain silent. We can afford not + * to reserve an error_capture node as we have fallback + * paths, and we trust that 0 will remain reserved. However, + * the only likely reason for failure to insert is a driver + * bug, which we expect to cause other failures... + */ + ggtt->error_capture.size = I915_GTT_PAGE_SIZE; + ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; + if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) + drm_mm_insert_node_in_range(&ggtt->vm.mm, + &ggtt->error_capture, + ggtt->error_capture.size, 0, + ggtt->error_capture.color, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); } + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_dbg(&ggtt->vm.i915->drm, + "Reserved GGTT:[%llx, %llx] for use by error capture\n", + ggtt->error_capture.start, + ggtt->error_capture.start + ggtt->error_capture.size); /* * The upper portion of the GuC address space has a sizeable hole @@ -557,9 +580,9 @@ static int init_ggtt(struct i915_ggtt *ggtt) /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - drm_dbg_kms(&ggtt->vm.i915->drm, - "clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); + drm_dbg(&ggtt->vm.i915->drm, + "clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); ggtt->vm.clear_range(&ggtt->vm, hole_start, hole_end - hole_start); } @@ -769,7 +792,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { struct drm_i915_private *i915 = ggtt->vm.i915; - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); phys_addr_t phys_addr; int ret; @@ -839,7 +862,7 @@ static struct resource pci_resource(struct pci_dev *pdev, int bar) static int gen8_gmch_probe(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); unsigned int size; u16 snb_gmch_ctl; @@ -983,7 +1006,7 @@ static u64 iris_pte_encode(dma_addr_t addr, static int gen6_gmch_probe(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); unsigned int size; u16 snb_gmch_ctl; @@ -1046,7 +1069,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) phys_addr_t gmadr_base; int ret; - ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL); + ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL); if (!ret) { drm_err(&i915->drm, "failed to set up gmch\n"); return -EIO; @@ -1091,7 +1114,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) ggtt->vm.gt = gt; ggtt->vm.i915 = i915; - ggtt->vm.dma = &i915->drm.pdev->dev; + ggtt->vm.dma = i915->drm.dev; if (INTEL_GEN(i915) <= 5) ret = i915_gmch_probe(ggtt); diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 96b85a10ef33..3f940ae27028 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -301,7 +301,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) ppgtt->vm.gt = gt; ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.dma = i915->drm.dev; ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 35504c97f11d..9843e1d4327f 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -485,14 +485,14 @@ static bool rc6_supported(struct intel_rc6 *rc6) static void rpm_get(struct intel_rc6 *rc6) { GEM_BUG_ON(rc6->wakeref); - pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev); + pm_runtime_get_sync(rc6_to_i915(rc6)->drm.dev); rc6->wakeref = true; } static void rpm_put(struct intel_rc6 *rc6) { GEM_BUG_ON(!rc6->wakeref); - pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev); + pm_runtime_put(rc6_to_i915(rc6)->drm.dev); rc6->wakeref = false; } diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 60393ce5614d..e326d3c0bc10 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -26,12 +26,12 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem) if (ret) return ret; - mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev, + mem->remap_addr = dma_map_resource(i915->drm.dev, mem->region.start, mem->fake_mappable.size, PCI_DMA_BIDIRECTIONAL, DMA_ATTR_FORCE_CONTIGUOUS); - if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) { + if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) { drm_mm_remove_node(&mem->fake_mappable); return -EINVAL; } @@ -56,7 +56,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) drm_mm_remove_node(&mem->fake_mappable); - dma_unmap_resource(&mem->i915->drm.pdev->dev, + dma_unmap_resource(mem->i915->drm.dev, mem->remap_addr, mem->fake_mappable.size, PCI_DMA_BIDIRECTIONAL, @@ -104,7 +104,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = { struct intel_memory_region * intel_setup_fake_lmem(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct intel_memory_region *mem; resource_size_t mappable_end; resource_size_t io_start; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 61410cd62927..afe0342dcd47 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -178,7 +178,7 @@ static int i915_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); int err; /* Assert reset for at least 20 usec, and wait for acknowledgement. */ @@ -207,7 +207,7 @@ static int g33_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); return wait_for_atomic(g4x_reset_complete(pdev), 50); @@ -217,7 +217,7 @@ static int g4x_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); struct intel_uncore *uncore = gt->uncore; int ret; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 3fdcd5ff71dd..8c0c050c4af9 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -71,17 +71,25 @@ const struct i915_rev_steppings kbl_revids[] = { [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 }, }; -const struct i915_rev_steppings tgl_uy_revids[] = { - [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 }, - [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 }, - [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 }, - [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 }, +const struct i915_rev_steppings tgl_uy_revid_step_tbl[] = { + [0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 }, + [1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_C0 }, + [2] = { .gt_stepping = STEP_B1, .disp_stepping = STEP_C0 }, + [3] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_D0 }, }; /* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ -const struct i915_rev_steppings tgl_revids[] = { - [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 }, - [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 }, +const struct i915_rev_steppings tgl_revid_step_tbl[] = { + [0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_B0 }, + [1] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_D0 }, +}; + +const struct i915_rev_steppings adls_revid_step_tbl[] = { + [0x0] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A0 }, + [0x1] = { .gt_stepping = STEP_A0, .disp_stepping = STEP_A2 }, + [0x4] = { .gt_stepping = STEP_B0, .disp_stepping = STEP_B0 }, + [0x8] = { .gt_stepping = STEP_C0, .disp_stepping = STEP_B0 }, + [0xC] = { .gt_stepping = STEP_D0, .disp_stepping = STEP_C0 }, }; static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) @@ -722,7 +730,8 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (IS_DG1(i915)) dg1_ctx_workarounds_init(engine, wal); - else if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) + else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) || + IS_TIGERLAKE(i915)) tgl_ctx_workarounds_init(engine, wal); else if (IS_GEN(i915, 12)) gen12_ctx_workarounds_init(engine, wal); @@ -1123,19 +1132,19 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) gen12_gt_workarounds_init(i915, wal); /* Wa_1409420604:tgl */ - if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); /* Wa_1408615072:tgl[a0] */ - if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); } @@ -1613,7 +1622,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) struct drm_i915_private *i915 = engine->i915; if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) || - IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { + IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) { /* * Wa_1607138336:tgl[a0],dg1[a0] * Wa_1607063988:tgl[a0],dg1[a0] @@ -1623,7 +1632,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); } - if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { + if (IS_TGL_UY_GT_STEPPING(i915, STEP_A0, STEP_A0)) { /* * Wa_1606679103:tgl * (see also Wa_1606682166:icl) @@ -1633,45 +1642,45 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_DISABLE_SAMPLER_PREFETCH); } - if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { - /* Wa_1606931601:tgl,rkl,dg1 */ + if (IS_ALDERLAKE_S(i915) || IS_DG1(i915) || + IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { + /* Wa_1606931601:tgl,rkl,dg1,adl-s */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ); /* * Wa_1407928979:tgl A* * Wa_18011464164:tgl[B0+],dg1[B0+] * Wa_22010931296:tgl[B0+],dg1[B0+] - * Wa_14010919138:rkl, dg1 + * Wa_14010919138:rkl,dg1,adl-s */ wa_write_or(wal, GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE); /* * Wa_1606700617:tgl,dg1 - * Wa_22010271021:tgl,rkl,dg1 + * Wa_22010271021:tgl,rkl,dg1, adl-s */ wa_masked_en(wal, GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE); - - /* Wa_1406941453:tgl,rkl,dg1 */ - wa_masked_en(wal, - GEN10_SAMPLER_MODE, - ENABLE_SMALLPL); } - if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) || + if (IS_ALDERLAKE_S(i915) || IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { - /* Wa_1409804808:tgl,rkl,dg1[a0] */ + /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_PUSH_CONST_DEREF_HOLD_DIS); /* * Wa_1409085225:tgl - * Wa_14010229206:tgl,rkl,dg1[a0] + * Wa_14010229206:tgl,rkl,dg1[a0],adl-s */ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); + } + + if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) || + IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* * Wa_1607030317:tgl * Wa_1607186500:tgl @@ -1688,6 +1697,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_RC_SEMA_IDLE_MSG_DISABLE); } + if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { + /* Wa_1406941453:tgl,rkl,dg1 */ + wa_masked_en(wal, + GEN10_SAMPLER_MODE, + ENABLE_SMALLPL); + } + if (IS_GEN(i915, 11)) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, @@ -1834,6 +1850,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, GEN8_L3SQCREG4, GEN8_LQSC_FLUSH_COHERENT_LINES); + + /* Disable atomics in L3 to prevent unrecoverable hangs */ + wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1, + GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0); + wa_write_clr_set(wal, GEN8_L3SQCREG4, + GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0); + wa_write_clr_set(wal, GEN9_SCRATCH1, + EVICTION_PERF_FIX_ENABLE, 0); } if (IS_HASWELL(i915)) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 67b06fde1225..984fa79e0fa7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -44,9 +44,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * List of required GuC and HuC binaries per-platform. * Must be ordered based on platform + revid, from newer to older. * - * Note that RKL uses the same firmware as TGL. + * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same + * firmware as TGL. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ + fw_def(ALDERLAKE_S, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \ fw_def(ROCKETLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \ fw_def(TIGERLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \ fw_def(JASPERLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \ diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index ad86c5eb5bba..b490e3db2e38 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -374,6 +374,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, bool primary) { struct intel_gvt *gvt = vgpu->gvt; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); const struct intel_gvt_device_info *info = &gvt->device_info; u16 *gmch_ctl; u8 next; @@ -407,9 +408,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = - pci_resource_len(gvt->gt->i915->drm.pdev, 0); + pci_resource_len(pdev, 0); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = - pci_resource_len(gvt->gt->i915->drm.pdev, 2); + pci_resource_len(pdev, 2); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 7fb91de06557..fef1e857cefc 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -41,6 +41,7 @@ #include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "gt/intel_gt_requests.h" +#include "gt/shmem_utils.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" @@ -3094,70 +3095,28 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) */ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) { + const unsigned long start = LRC_STATE_PN * PAGE_SIZE; struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; - const unsigned long start = LRC_STATE_PN * PAGE_SIZE; - struct i915_request *rq; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_request *requests[I915_NUM_ENGINES] = {}; - bool is_ctx_pinned[I915_NUM_ENGINES] = {}; - int ret; if (gvt->is_reg_whitelist_updated) return; - for_each_engine(engine, &dev_priv->gt, id) { - ret = intel_context_pin(s->shadow[id]); - if (ret) { - gvt_vgpu_err("fail to pin shadow ctx\n"); - goto out; - } - is_ctx_pinned[id] = true; - - rq = i915_request_create(s->shadow[id]); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to alloc default request\n"); - ret = -EIO; - goto out; - } - requests[id] = i915_request_get(rq); - i915_request_add(rq); - } - - if (intel_gt_wait_for_idle(&dev_priv->gt, - I915_GEM_IDLE_TIMEOUT) == -ETIME) { - ret = -EIO; - goto out; - } - /* scan init ctx to update cmd accessible list */ - for_each_engine(engine, &dev_priv->gt, id) { - int size = engine->context_size - PAGE_SIZE; - void *vaddr; + for_each_engine(engine, gvt->gt, id) { struct parser_exec_state s; - struct drm_i915_gem_object *obj; - struct i915_request *rq; - - rq = requests[id]; - GEM_BUG_ON(!i915_request_completed(rq)); - GEM_BUG_ON(!intel_context_is_pinned(rq->context)); - obj = rq->context->state->obj; - - if (!obj) { - ret = -EIO; - goto out; - } + void *vaddr; + int ret; - i915_gem_object_set_cache_coherency(obj, - I915_CACHE_LLC); + if (!engine->default_state) + continue; - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + vaddr = shmem_pin_map(engine->default_state); if (IS_ERR(vaddr)) { - gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n", - id, PTR_ERR(vaddr)); - goto out; + gvt_err("failed to map %s->default state, err:%zd\n", + engine->name, PTR_ERR(vaddr)); + return; } s.buf_type = RING_BUFFER_CTX; @@ -3165,9 +3124,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) s.vgpu = vgpu; s.engine = engine; s.ring_start = 0; - s.ring_size = size; + s.ring_size = engine->context_size - start; s.ring_head = 0; - s.ring_tail = size; + s.ring_tail = s.ring_size; s.rb_va = vaddr + start; s.workload = NULL; s.is_ctx_wa = false; @@ -3175,29 +3134,18 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) /* skipping the first RING_CTX_SIZE(0x50) dwords */ ret = ip_gma_set(&s, RING_CTX_SIZE); - if (ret) { - i915_gem_object_unpin_map(obj); - goto out; + if (ret == 0) { + ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size); + if (ret) + gvt_err("Scan init ctx error\n"); } - ret = command_scan(&s, 0, size, 0, size); + shmem_unpin_map(engine->default_state, vaddr); if (ret) - gvt_err("Scan init ctx error\n"); - - i915_gem_object_unpin_map(obj); + return; } -out: - if (!ret) - gvt->is_reg_whitelist_updated = true; - - for (id = 0; id < I915_NUM_ENGINES ; id++) { - if (requests[id]) - i915_request_put(requests[id]); - - if (is_ctx_pinned[id]) - intel_context_unpin(s->shadow[id]); - } + gvt->is_reg_whitelist_updated = true; } int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 62a5b0dd2003..034c060f89d4 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -516,11 +516,27 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) port->dpcd = NULL; } +static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data) +{ + struct intel_vgpu_vblank_timer *vblank_timer; + struct intel_vgpu *vgpu; + + vblank_timer = container_of(data, struct intel_vgpu_vblank_timer, timer); + vgpu = container_of(vblank_timer, struct intel_vgpu, vblank_timer); + + /* Set vblank emulation request per-vGPU bit */ + intel_gvt_request_service(vgpu->gvt, + INTEL_GVT_REQUEST_EMULATE_VBLANK + vgpu->id); + hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period); + return HRTIMER_RESTART; +} + static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, int type, unsigned int resolution) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); + struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer; if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM)) return -EINVAL; @@ -544,6 +560,14 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, port->dpcd->data[DPCD_SINK_COUNT] = 0x1; port->type = type; port->id = resolution; + port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC; + vgpu->display.port_num = port_num; + + /* Init hrtimer based on default refresh rate */ + hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + vblank_timer->timer.function = vblank_timer_fn; + vblank_timer->vrefresh_k = port->vrefresh_k; + vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k); emulate_monitor_status_change(vgpu); @@ -551,41 +575,44 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, } /** - * intel_gvt_check_vblank_emulation - check if vblank emulation timer should - * be turned on/off when a virtual pipe is enabled/disabled. - * @gvt: a GVT device + * vgpu_update_vblank_emulation - Update per-vGPU vblank_timer + * @vgpu: vGPU operated + * @turnon: Turn ON/OFF vblank_timer * - * This function is used to turn on/off vblank timer according to currently - * enabled/disabled virtual pipes. + * This function is used to turn on/off or update the per-vGPU vblank_timer + * when PIPECONF is enabled or disabled. vblank_timer period is also updated + * if guest changed the refresh rate. * */ -void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) +void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon) { - struct intel_gvt_irq *irq = &gvt->irq; - struct intel_vgpu *vgpu; - int pipe, id; - int found = false; - - mutex_lock(&gvt->lock); - for_each_active_vgpu(gvt, vgpu, id) { - for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { - if (pipe_is_enabled(vgpu, pipe)) { - found = true; - break; - } + struct intel_vgpu_vblank_timer *vblank_timer = &vgpu->vblank_timer; + struct intel_vgpu_port *port = + intel_vgpu_port(vgpu, vgpu->display.port_num); + + if (turnon) { + /* + * Skip the re-enable if already active and vrefresh unchanged. + * Otherwise, stop timer if already active and restart with new + * period. + */ + if (vblank_timer->vrefresh_k != port->vrefresh_k || + !hrtimer_active(&vblank_timer->timer)) { + /* Stop timer before start with new period if active */ + if (hrtimer_active(&vblank_timer->timer)) + hrtimer_cancel(&vblank_timer->timer); + + /* Make sure new refresh rate updated to timer period */ + vblank_timer->vrefresh_k = port->vrefresh_k; + vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k); + hrtimer_start(&vblank_timer->timer, + ktime_add_ns(ktime_get(), vblank_timer->period), + HRTIMER_MODE_ABS); } - if (found) - break; + } else { + /* Caller request to stop vblank */ + hrtimer_cancel(&vblank_timer->timer); } - - /* all the pipes are disabled */ - if (!found) - hrtimer_cancel(&irq->vblank_timer.timer); - else - hrtimer_start(&irq->vblank_timer.timer, - ktime_add_ns(ktime_get(), irq->vblank_timer.period), - HRTIMER_MODE_ABS); - mutex_unlock(&gvt->lock); } static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) @@ -617,7 +644,7 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) } } -static void emulate_vblank(struct intel_vgpu *vgpu) +void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu) { int pipe; @@ -628,24 +655,6 @@ static void emulate_vblank(struct intel_vgpu *vgpu) } /** - * intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device - * @gvt: a GVT device - * - * This function is used to trigger vblank interrupts for vGPUs on GVT device - * - */ -void intel_gvt_emulate_vblank(struct intel_gvt *gvt) -{ - struct intel_vgpu *vgpu; - int id; - - mutex_lock(&gvt->lock); - for_each_active_vgpu(gvt, vgpu, id) - emulate_vblank(vgpu); - mutex_unlock(&gvt->lock); -} - -/** * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU * @vgpu: a vGPU * @connected: link state @@ -753,6 +762,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); + + vgpu_update_vblank_emulation(vgpu, false); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index b59b34046e1e..f5616f99ef2f 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -36,6 +36,7 @@ #define _GVT_DISPLAY_H_ #include <linux/types.h> +#include <linux/hrtimer.h> struct intel_gvt; struct intel_vgpu; @@ -157,6 +158,7 @@ enum intel_vgpu_edid { GVT_EDID_NUM, }; +#define GVT_DEFAULT_REFRESH_RATE 60 struct intel_vgpu_port { /* per display EDID information */ struct intel_vgpu_edid_data *edid; @@ -164,6 +166,14 @@ struct intel_vgpu_port { struct intel_vgpu_dpcd_data *dpcd; int type; enum intel_vgpu_edid id; + /* x1000 to get accurate 59.94, 24.976, 29.94, etc. in timing std. */ + u32 vrefresh_k; +}; + +struct intel_vgpu_vblank_timer { + struct hrtimer timer; + u32 vrefresh_k; + u64 period; }; static inline char *vgpu_edid_str(enum intel_vgpu_edid id) @@ -202,8 +212,8 @@ static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id) } } -void intel_gvt_emulate_vblank(struct intel_gvt *gvt); -void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); +void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu); +void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon); int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution); void intel_vgpu_reset_display(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 158873f269b1..c8dcda6d4f0d 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, static void clean_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { - struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; - struct intel_engine_cs *engine; struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_engine_cs *engine; intel_engine_mask_t tmp; - for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { + for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { kfree(s->ring_scan_buffer[engine->id]); s->ring_scan_buffer[engine->id] = NULL; s->ring_scan_buffer_size[engine->id] = 0; @@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu, static void reset_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { - struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; intel_engine_mask_t tmp; - for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) + for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) init_vgpu_execlist(vgpu, engine); } diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 990a181094e3..1a8274a3f4b1 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -76,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) static int expose_firmware_sysfs(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = gvt->gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); struct gvt_firmware_header *h; void *firmware; void *p; @@ -127,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) static void clean_firmware_sysfs(struct intel_gvt *gvt) { - struct pci_dev *pdev = gvt->gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); device_remove_bin_file(&pdev->dev, &firmware_attr); vfree(firmware_attr.private); @@ -151,7 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt, const struct firmware *fw) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = gvt->gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); struct gvt_firmware_header *h; unsigned long id, crc32_start; const void *mem; @@ -205,7 +205,7 @@ invalid_firmware: int intel_gvt_load_firmware(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = gvt->gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); struct intel_gvt_firmware *firmware = &gvt->firmware; struct gvt_firmware_header *h; const struct firmware *fw; @@ -240,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) gvt_dbg_core("request hw state firmware %s...\n", path); - ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev); + ret = request_firmware(&fw, path, gvt->gt->i915->drm.dev); kfree(path); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 897c007ea96a..67a26923aa0e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -746,7 +746,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu, static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) { - struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev; + struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev; trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); @@ -831,7 +831,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) { - struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev; + struct device *kdev = vgpu->gvt->gt->i915->drm.dev; struct intel_vgpu_ppgtt_spt *spt = NULL; dma_addr_t daddr; int ret; @@ -1159,8 +1159,8 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, * @vgpu: target vgpu * @entry: target pfn's gtt entry * - * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, - * negtive if found err. + * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition, + * negative if found err. */ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) @@ -2402,7 +2402,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; int i; - struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; + struct device *dev = vgpu->gvt->gt->i915->drm.dev; dma_addr_t daddr; if (drm_WARN_ON(&i915->drm, @@ -2460,7 +2460,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, static int release_scratch_page_tree(struct intel_vgpu *vgpu) { int i; - struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; + struct device *dev = vgpu->gvt->gt->i915->drm.dev; dma_addr_t daddr; for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { @@ -2732,7 +2732,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; void *page; - struct device *dev = &gvt->gt->i915->drm.pdev->dev; + struct device *dev = gvt->gt->i915->drm.dev; dma_addr_t daddr; gvt_dbg_core("init gtt\n"); @@ -2781,7 +2781,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) */ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { - struct device *dev = &gvt->gt->i915->drm.pdev->dev; + struct device *dev = gvt->gt->i915->drm.dev; dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << I915_GTT_PAGE_SHIFT); diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index d1d8ee4a5f16..2ecb8534930b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -50,7 +50,7 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, const char *name) { const char *driver_name = - dev_driver_string(&gvt->gt->i915->drm.pdev->dev); + dev_driver_string(gvt->gt->i915->drm.dev); int i; name += strlen(driver_name) + 1; @@ -189,7 +189,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { static void init_device_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = gvt->gt->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); info->max_support_vgpus = 8; info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; @@ -203,6 +203,22 @@ static void init_device_info(struct intel_gvt *gvt) info->msi_cap_offset = pdev->msi_cap; } +static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int id; + + mutex_lock(&gvt->lock); + idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { + if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, + (void *)&gvt->service_request)) { + if (vgpu->active) + intel_vgpu_emulate_vblank(vgpu); + } + } + mutex_unlock(&gvt->lock); +} + static int gvt_service_thread(void *data) { struct intel_gvt *gvt = (struct intel_gvt *)data; @@ -220,9 +236,7 @@ static int gvt_service_thread(void *data) if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) continue; - if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK, - (void *)&gvt->service_request)) - intel_gvt_emulate_vblank(gvt); + intel_gvt_test_and_emulate_vblank(gvt); if (test_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request) || @@ -278,7 +292,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915) intel_gvt_clean_sched_policy(gvt); intel_gvt_clean_workload_scheduler(gvt); intel_gvt_clean_gtt(gvt); - intel_gvt_clean_irq(gvt); intel_gvt_free_firmware(gvt); intel_gvt_clean_mmio_info(gvt); idr_destroy(&gvt->vgpu_idr); @@ -337,7 +350,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915) ret = intel_gvt_init_gtt(gvt); if (ret) - goto out_clean_irq; + goto out_free_firmware; ret = intel_gvt_init_workload_scheduler(gvt); if (ret) @@ -376,7 +389,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); gvt_dbg_core("gvt device initialization is done\n"); - intel_gvt_host.dev = &i915->drm.pdev->dev; + intel_gvt_host.dev = i915->drm.dev; intel_gvt_host.initialized = true; return 0; @@ -392,8 +405,6 @@ out_clean_workload_scheduler: intel_gvt_clean_workload_scheduler(gvt); out_clean_gtt: intel_gvt_clean_gtt(gvt); -out_clean_irq: - intel_gvt_clean_irq(gvt); out_free_firmware: intel_gvt_free_firmware(gvt); out_clean_mmio_info: diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 03c993d68f10..8dc8170ba00f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -133,6 +133,7 @@ struct intel_vgpu_display { struct intel_vgpu_i2c_edid i2c_edid; struct intel_vgpu_port ports[I915_MAX_PORTS]; struct intel_vgpu_sbi sbi; + enum port port_num; }; struct vgpu_sched_ctl { @@ -214,6 +215,7 @@ struct intel_vgpu { struct list_head dmabuf_obj_list_head; struct mutex dmabuf_lock; struct idr object_idr; + struct intel_vgpu_vblank_timer vblank_timer; u32 scan_nonprivbb; }; @@ -346,13 +348,16 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) } enum { - INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, - /* Scheduling trigger by timer */ - INTEL_GVT_REQUEST_SCHED = 1, + INTEL_GVT_REQUEST_SCHED = 0, /* Scheduling trigger by event */ - INTEL_GVT_REQUEST_EVENT_SCHED = 2, + INTEL_GVT_REQUEST_EVENT_SCHED = 1, + + /* per-vGPU vblank emulation request */ + INTEL_GVT_REQUEST_EMULATE_VBLANK = 2, + INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK + + GVT_MAX_VGPU, }; static inline void intel_gvt_request_service(struct intel_gvt *gvt, diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 6eeaeecb7f85..477badfcb258 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -39,6 +39,7 @@ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" +#include "display/intel_display_types.h" /* XXX FIXME i915 has changed PP_XXX definition */ #define PCH_PP_STATUS _MMIO(0xc7200) @@ -443,6 +444,254 @@ static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +/* + * Only PIPE_A is enabled in current vGPU display and PIPE_A is tied to + * TRANSCODER_A in HW. DDI/PORT could be PORT_x depends on + * setup_virtual_dp_monitor(). + * emulate_monitor_status_change() set up PLL for PORT_x as the initial enabled + * DPLL. Later guest driver may setup a different DPLLx when setting mode. + * So the correct sequence to find DP stream clock is: + * Check TRANS_DDI_FUNC_CTL on TRANSCODER_A to get PORT_x. + * Check correct PLLx for PORT_x to get PLL frequency and DP bitrate. + * Then Refresh rate then can be calculated based on follow equations: + * Pixel clock = h_total * v_total * refresh_rate + * stream clock = Pixel clock + * ls_clk = DP bitrate + * Link M/N = strm_clk / ls_clk + */ + +static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) +{ + u32 dp_br = 0; + u32 ddi_pll_sel = vgpu_vreg_t(vgpu, PORT_CLK_SEL(port)); + + switch (ddi_pll_sel) { + case PORT_CLK_SEL_LCPLL_2700: + dp_br = 270000 * 2; + break; + case PORT_CLK_SEL_LCPLL_1350: + dp_br = 135000 * 2; + break; + case PORT_CLK_SEL_LCPLL_810: + dp_br = 81000 * 2; + break; + case PORT_CLK_SEL_SPLL: + { + switch (vgpu_vreg_t(vgpu, SPLL_CTL) & SPLL_FREQ_MASK) { + case SPLL_FREQ_810MHz: + dp_br = 81000 * 2; + break; + case SPLL_FREQ_1350MHz: + dp_br = 135000 * 2; + break; + case SPLL_FREQ_2700MHz: + dp_br = 270000 * 2; + break; + default: + gvt_dbg_dpy("vgpu-%d PORT_%c can't get freq from SPLL 0x%08x\n", + vgpu->id, port_name(port), vgpu_vreg_t(vgpu, SPLL_CTL)); + break; + } + break; + } + case PORT_CLK_SEL_WRPLL1: + case PORT_CLK_SEL_WRPLL2: + { + u32 wrpll_ctl; + int refclk, n, p, r; + + if (ddi_pll_sel == PORT_CLK_SEL_WRPLL1) + wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL1)); + else + wrpll_ctl = vgpu_vreg_t(vgpu, WRPLL_CTL(DPLL_ID_WRPLL2)); + + switch (wrpll_ctl & WRPLL_REF_MASK) { + case WRPLL_REF_PCH_SSC: + refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc; + break; + case WRPLL_REF_LCPLL: + refclk = 2700000; + break; + default: + gvt_dbg_dpy("vgpu-%d PORT_%c WRPLL can't get refclk 0x%08x\n", + vgpu->id, port_name(port), wrpll_ctl); + goto out; + } + + r = wrpll_ctl & WRPLL_DIVIDER_REF_MASK; + p = (wrpll_ctl & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; + n = (wrpll_ctl & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; + + dp_br = (refclk * n / 10) / (p * r) * 2; + break; + } + default: + gvt_dbg_dpy("vgpu-%d PORT_%c has invalid clock select 0x%08x\n", + vgpu->id, port_name(port), vgpu_vreg_t(vgpu, PORT_CLK_SEL(port))); + break; + } + +out: + return dp_br; +} + +static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) +{ + u32 dp_br = 0; + int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc; + enum dpio_phy phy = DPIO_PHY0; + enum dpio_channel ch = DPIO_CH0; + struct dpll clock = {0}; + u32 temp; + + /* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */ + switch (port) { + case PORT_A: + phy = DPIO_PHY1; + ch = DPIO_CH0; + break; + case PORT_B: + phy = DPIO_PHY0; + ch = DPIO_CH0; + break; + case PORT_C: + phy = DPIO_PHY0; + ch = DPIO_CH1; + break; + default: + gvt_dbg_dpy("vgpu-%d no PHY for PORT_%c\n", vgpu->id, port_name(port)); + goto out; + } + + temp = vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)); + if (!(temp & PORT_PLL_ENABLE) || !(temp & PORT_PLL_LOCK)) { + gvt_dbg_dpy("vgpu-%d PORT_%c PLL_ENABLE 0x%08x isn't enabled or locked\n", + vgpu->id, port_name(port), temp); + goto out; + } + + clock.m1 = 2; + clock.m2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0)) & PORT_PLL_M2_MASK) << 22; + if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)) & PORT_PLL_M2_FRAC_MASK; + clock.n = (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)) & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; + clock.p1 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; + clock.p2 = (vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)) & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; + clock.m = clock.m1 * clock.m2; + clock.p = clock.p1 * clock.p2; + + if (clock.n == 0 || clock.p == 0) { + gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port)); + goto out; + } + + clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22); + clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p); + + dp_br = clock.dot / 5; + +out: + return dp_br; +} + +static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) +{ + u32 dp_br = 0; + enum intel_dpll_id dpll_id = DPLL_ID_SKL_DPLL0; + + /* Find the enabled DPLL for the DDI/PORT */ + if (!(vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port)) && + (vgpu_vreg_t(vgpu, DPLL_CTRL2) & DPLL_CTRL2_DDI_SEL_OVERRIDE(port))) { + dpll_id += (vgpu_vreg_t(vgpu, DPLL_CTRL2) & + DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >> + DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port); + } else { + gvt_dbg_dpy("vgpu-%d DPLL for PORT_%c isn't turned on\n", + vgpu->id, port_name(port)); + return dp_br; + } + + /* Find PLL output frequency from correct DPLL, and get bir rate */ + switch ((vgpu_vreg_t(vgpu, DPLL_CTRL1) & + DPLL_CTRL1_LINK_RATE_MASK(dpll_id)) >> + DPLL_CTRL1_LINK_RATE_SHIFT(dpll_id)) { + case DPLL_CTRL1_LINK_RATE_810: + dp_br = 81000 * 2; + break; + case DPLL_CTRL1_LINK_RATE_1080: + dp_br = 108000 * 2; + break; + case DPLL_CTRL1_LINK_RATE_1350: + dp_br = 135000 * 2; + break; + case DPLL_CTRL1_LINK_RATE_1620: + dp_br = 162000 * 2; + break; + case DPLL_CTRL1_LINK_RATE_2160: + dp_br = 216000 * 2; + break; + case DPLL_CTRL1_LINK_RATE_2700: + dp_br = 270000 * 2; + break; + default: + dp_br = 0; + gvt_dbg_dpy("vgpu-%d PORT_%c fail to get DPLL-%d freq\n", + vgpu->id, port_name(port), dpll_id); + } + + return dp_br; +} + +static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + enum port port; + u32 dp_br, link_m, link_n, htotal, vtotal; + + /* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */ + port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) & + TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; + if (port != PORT_B && port != PORT_D) { + gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port)); + return; + } + + /* Calculate DP bitrate from PLL */ + if (IS_BROADWELL(dev_priv)) + dp_br = bdw_vgpu_get_dp_bitrate(vgpu, port); + else if (IS_BROXTON(dev_priv)) + dp_br = bxt_vgpu_get_dp_bitrate(vgpu, port); + else + dp_br = skl_vgpu_get_dp_bitrate(vgpu, port); + + /* Get DP link symbol clock M/N */ + link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)); + link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)); + + /* Get H/V total from transcoder timing */ + htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT) + 1; + vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT) + 1; + + if (dp_br && link_n && htotal && vtotal) { + u64 pixel_clk = 0; + u32 new_rate = 0; + u32 *old_rate = &(intel_vgpu_port(vgpu, vgpu->display.port_num)->vrefresh_k); + + /* Calcuate pixel clock by (ls_clk * M / N) */ + pixel_clk = div_u64(mul_u32_u32(link_m, dp_br), link_n); + pixel_clk *= MSEC_PER_SEC; + + /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */ + new_rate = DIV64_U64_ROUND_CLOSEST(pixel_clk, div64_u64(mul_u32_u32(htotal, vtotal), MSEC_PER_SEC)); + + if (*old_rate != new_rate) + *old_rate = new_rate; + + gvt_dbg_dpy("vgpu-%d PIPE_%c refresh rate updated to %d\n", + vgpu->id, pipe_name(PIPE_A), new_rate); + } +} + static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -451,14 +700,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & PIPECONF_ENABLE) + if (data & PIPECONF_ENABLE) { vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; - else + vgpu_update_refresh_rate(vgpu); + vgpu_update_vblank_emulation(vgpu, true); + } else { vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; - /* vgpu_lock already hold by emulate mmio r/w */ - mutex_unlock(&vgpu->vgpu_lock); - intel_gvt_check_vblank_emulation(vgpu->gvt); - mutex_lock(&vgpu->vgpu_lock); + vgpu_update_vblank_emulation(vgpu, false); + } return 0; } diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 7498878e6289..497d28ce47df 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -647,38 +647,6 @@ static void init_events( } } -static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data) -{ - struct intel_gvt_vblank_timer *vblank_timer; - struct intel_gvt_irq *irq; - struct intel_gvt *gvt; - - vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer); - irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer); - gvt = container_of(irq, struct intel_gvt, irq); - - intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK); - hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period); - return HRTIMER_RESTART; -} - -/** - * intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem - * @gvt: a GVT device - * - * This function is called at driver unloading stage, to clean up GVT-g IRQ - * emulation subsystem. - * - */ -void intel_gvt_clean_irq(struct intel_gvt *gvt) -{ - struct intel_gvt_irq *irq = &gvt->irq; - - hrtimer_cancel(&irq->vblank_timer.timer); -} - -#define VBLANK_TIMER_PERIOD 16000000 - /** * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem * @gvt: a GVT device @@ -692,7 +660,6 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt) int intel_gvt_init_irq(struct intel_gvt *gvt) { struct intel_gvt_irq *irq = &gvt->irq; - struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer; gvt_dbg_core("init irq framework\n"); @@ -707,9 +674,5 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) init_irq_map(irq); - hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - vblank_timer->timer.function = vblank_timer_fn; - vblank_timer->period = VBLANK_TIMER_PERIOD; - return 0; } diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 287cd142629e..6c47d3e33161 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -201,11 +201,6 @@ struct intel_gvt_irq_map { u32 down_irq_bitmask; }; -struct intel_gvt_vblank_timer { - struct hrtimer timer; - u64 period; -}; - /* structure containing device specific IRQ state */ struct intel_gvt_irq { struct intel_gvt_irq_ops *ops; @@ -214,11 +209,9 @@ struct intel_gvt_irq { struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX]; DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX); struct intel_gvt_irq_map *irq_map; - struct intel_gvt_vblank_timer vblank_timer; }; int intel_gvt_init_irq(struct intel_gvt *gvt); -void intel_gvt_clean_irq(struct intel_gvt *gvt); void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu, enum intel_gvt_event_type event); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 60f1a386dd06..d089770795b8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -221,7 +221,7 @@ err: static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t *dma_addr, unsigned long size) { - struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; + struct device *dev = vgpu->gvt->gt->i915->drm.dev; struct page *page = NULL; int ret; @@ -244,7 +244,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t dma_addr, unsigned long size) { - struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; + struct device *dev = vgpu->gvt->gt->i915->drm.dev; dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); gvt_unpin_guest_page(vgpu, gfn, size); @@ -1703,7 +1703,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) return -EINVAL; } - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvmgt_gfn_is_write_protected(info, gfn)) goto out; @@ -1712,7 +1712,7 @@ static int kvmgt_page_track_add(unsigned long handle, u64 gfn) kvmgt_protect_table_add(info, gfn); out: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } @@ -1737,7 +1737,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) return -EINVAL; } - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (!kvmgt_gfn_is_write_protected(info, gfn)) goto out; @@ -1746,7 +1746,7 @@ static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) kvmgt_protect_table_del(info, gfn); out: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); return 0; } @@ -1772,7 +1772,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, struct kvmgt_guest_info *info = container_of(node, struct kvmgt_guest_info, track_node); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < slot->npages; i++) { gfn = slot->base_gfn + i; if (kvmgt_gfn_is_write_protected(info, gfn)) { @@ -1781,7 +1781,7 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, kvmgt_protect_table_del(info, gfn); } } - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 43f31c2eab14..fc735692f21f 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -412,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) if (!wa_ctx->indirect_ctx.obj) return; + i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL); i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); + i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); i915_gem_object_put(wa_ctx->indirect_ctx.obj); wa_ctx->indirect_ctx.obj = NULL; @@ -520,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) struct intel_gvt *gvt = workload->vgpu->gvt; const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd; struct intel_vgpu_shadow_bb *bb; + struct i915_gem_ww_ctx ww; int ret; list_for_each_entry(bb, &workload->shadow_bb, list) { @@ -544,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) * directly */ if (!bb->ppgtt) { - bb->vma = i915_gem_object_ggtt_pin(bb->obj, - NULL, 0, 0, 0); + i915_gem_ww_ctx_init(&ww, false); +retry: + i915_gem_object_lock(bb->obj, &ww); + + bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww, + NULL, 0, 0, 0); if (IS_ERR(bb->vma)) { ret = PTR_ERR(bb->vma); + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } goto err; } @@ -561,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 0); if (ret) goto err; - } - /* No one is going to touch shadow bb from now on. */ - i915_gem_object_flush_map(bb->obj); + /* No one is going to touch shadow bb from now on. */ + i915_gem_object_flush_map(bb->obj); + i915_gem_object_unlock(bb->obj); + } } return 0; err: + i915_gem_ww_ctx_fini(&ww); release_shadow_batch_buffer(workload); return ret; } @@ -594,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) unsigned char *per_ctx_va = (unsigned char *)wa_ctx->indirect_ctx.shadow_va + wa_ctx->indirect_ctx.size; + struct i915_gem_ww_ctx ww; + int ret; if (wa_ctx->indirect_ctx.size == 0) return 0; - vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, - 0, CACHELINE_BYTES, 0); - if (IS_ERR(vma)) - return PTR_ERR(vma); + i915_gem_ww_ctx_init(&ww, false); +retry: + i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww); + + vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL, + 0, CACHELINE_BYTES, 0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } + return ret; + } + + i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); /* FIXME: we are not tracking our pinned VMA leaving it * up to the core to fix up the stray pin_count upon @@ -635,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { + i915_gem_object_lock(bb->obj, NULL); if (bb->va && !IS_ERR(bb->va)) i915_gem_object_unpin_map(bb->obj); if (bb->vma && !IS_ERR(bb->vma)) i915_vma_unpin(bb->vma); + i915_gem_object_unlock(bb->obj); i915_gem_object_put(bb->obj); } list_del(&bb->list); @@ -1015,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; struct intel_vgpu_workload *pos, *n; intel_engine_mask_t tmp; /* free the unsubmited workloads in the queues. */ - for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { + for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 6a16d0ca7cda..9039787f123a 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -300,8 +300,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) mutex_unlock(&vgpu->vgpu_lock); mutex_lock(&gvt->lock); - if (idr_is_empty(&gvt->vgpu_idr)) - intel_gvt_clean_irq(gvt); intel_gvt_update_vgpu_types(gvt); mutex_unlock(&gvt->lock); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index ab4382841c6b..3bc616cc1ad2 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -628,24 +628,26 @@ static int flush_lazy_signals(struct i915_active *ref) int __i915_active_wait(struct i915_active *ref, int state) { - int err; - might_sleep(); - if (!i915_active_acquire_if_busy(ref)) - return 0; - /* Any fence added after the wait begins will not be auto-signaled */ - err = flush_lazy_signals(ref); - i915_active_release(ref); - if (err) - return err; + if (i915_active_acquire_if_busy(ref)) { + int err; - if (!i915_active_is_idle(ref) && - ___wait_var_event(ref, i915_active_is_idle(ref), - state, 0, 0, schedule())) - return -EINTR; + err = flush_lazy_signals(ref); + i915_active_release(ref); + if (err) + return err; + if (___wait_var_event(ref, i915_active_is_idle(ref), + state, 0, 0, schedule())) + return -EINTR; + } + + /* + * After the wait is complete, the caller may free the active. + * We have to flush any concurrent retirement before returning. + */ flush_work(&ref->work); return 0; } diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index ced9a96d7c34..5f86f5b2caf6 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -940,7 +940,7 @@ static void fini_hash_table(struct intel_engine_cs *engine) * struct intel_engine_cs based on whether the platform requires software * command parsing. */ -void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine) { const struct drm_i915_cmd_table *cmd_tables; int cmd_table_count; @@ -948,7 +948,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) && engine->class == COPY_ENGINE_CLASS)) - return; + return 0; switch (engine->class) { case RENDER_CLASS: @@ -1013,19 +1013,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) break; default: MISSING_CASE(engine->class); - return; + goto out; } if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) { drm_err(&engine->i915->drm, "%s: command descriptions are not sorted\n", engine->name); - return; + goto out; } if (!validate_regs_sorted(engine)) { drm_err(&engine->i915->drm, "%s: registers are not sorted\n", engine->name); - return; + goto out; } ret = init_hash_table(engine, cmd_tables, cmd_table_count); @@ -1033,10 +1033,17 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) drm_err(&engine->i915->drm, "%s: initialised failed!\n", engine->name); fini_hash_table(engine); - return; + goto out; } engine->flags |= I915_ENGINE_USING_CMD_PARSER; + +out: + if (intel_engine_requires_cmd_parser(engine) && + !intel_engine_using_cmd_parser(engine)) + return -EINVAL; + + return 0; } /** diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 88336ff4bf09..51133b8fabb4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -677,7 +677,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) static int i915_runtime_pm_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); if (!HAS_RUNTIME_PM(dev_priv)) seq_puts(m, "Runtime power management not supported\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2febda554bca..43ac73861a4c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -38,7 +38,6 @@ #include <linux/slab.h> #include <linux/vga_switcheroo.h> #include <linux/vt.h> -#include <acpi/video.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_ioctl.h> @@ -47,11 +46,9 @@ #include <drm/drm_probe_helper.h> #include "display/intel_acpi.h" -#include "display/intel_audio.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" #include "display/intel_csr.h" -#include "display/intel_display_debugfs.h" #include "display/intel_display_types.h" #include "display/intel_dp.h" #include "display/intel_fbdev.h" @@ -65,6 +62,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_mman.h" +#include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_rc6.h" @@ -92,7 +90,7 @@ static const struct drm_driver driver; static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { - int domain = pci_domain_nr(dev_priv->drm.pdev->bus); + int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus); dev_priv->bridge_dev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); @@ -351,7 +349,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_irq_init(dev_priv); intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); - intel_init_audio_hooks(dev_priv); intel_detect_preproduction_hw(dev_priv); @@ -460,7 +457,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) */ static int i915_set_dma_info(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; int ret; @@ -470,9 +466,9 @@ static int i915_set_dma_info(struct drm_i915_private *i915) * We don't have a max segment size, so set it to the max so sg's * debugging layer doesn't complain */ - dma_set_max_seg_size(&pdev->dev, UINT_MAX); + dma_set_max_seg_size(i915->drm.dev, UINT_MAX); - ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); + ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); if (ret) goto mask_err; @@ -492,7 +488,7 @@ static int i915_set_dma_info(struct drm_i915_private *i915) if (IS_I965G(i915) || IS_I965GM(i915)) mask_size = 32; - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size)); + ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); if (ret) goto mask_err; @@ -512,7 +508,7 @@ mask_err: */ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (i915_inject_probe_failure(dev_priv)) @@ -640,7 +636,7 @@ err_perf: */ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); i915_perf_fini(dev_priv); @@ -665,43 +661,21 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) intel_vgpu_register(dev_priv); /* Reveal our presence to userspace */ - if (drm_dev_register(dev, 0) == 0) { - i915_debugfs_register(dev_priv); - if (HAS_DISPLAY(dev_priv)) - intel_display_debugfs_register(dev_priv); - i915_setup_sysfs(dev_priv); - - /* Depends on sysfs having been initialized */ - i915_perf_register(dev_priv); - } else + if (drm_dev_register(dev, 0)) { drm_err(&dev_priv->drm, "Failed to register driver for userspace access!\n"); - - if (HAS_DISPLAY(dev_priv)) { - /* Must be done after probing outputs */ - intel_opregion_register(dev_priv); - acpi_video_register(); + return; } - intel_gt_driver_register(&dev_priv->gt); + i915_debugfs_register(dev_priv); + i915_setup_sysfs(dev_priv); - intel_audio_init(dev_priv); + /* Depends on sysfs having been initialized */ + i915_perf_register(dev_priv); - /* - * Some ports require correctly set-up hpd registers for detection to - * work properly (leading to ghost connected connector status), e.g. VGA - * on gm45. Hence we can only set up the initial fbdev config after hpd - * irqs are fully enabled. We do it last so that the async config - * cannot run before the connectors are registered. - */ - intel_fbdev_initial_config_async(dev); + intel_gt_driver_register(&dev_priv->gt); - /* - * We need to coordinate the hotplugs with the asynchronous fbdev - * configuration, for which we use the fbdev->async_cookie. - */ - if (HAS_DISPLAY(dev_priv)) - drm_kms_helper_poll_init(dev); + intel_display_driver_register(dev_priv); intel_power_domains_enable(dev_priv); intel_runtime_pm_enable(&dev_priv->runtime_pm); @@ -725,20 +699,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) intel_runtime_pm_disable(&dev_priv->runtime_pm); intel_power_domains_disable(dev_priv); - intel_fbdev_unregister(dev_priv); - intel_audio_deinit(dev_priv); - - /* - * After flushing the fbdev (incl. a late async config which will - * have delayed queuing of a hotplug event), then flush the hotplug - * events. - */ - drm_kms_helper_poll_fini(&dev_priv->drm); - drm_atomic_helper_shutdown(&dev_priv->drm); + intel_display_driver_unregister(dev_priv); intel_gt_driver_unregister(&dev_priv->gt); - acpi_video_unregister(); - intel_opregion_unregister(dev_priv); i915_perf_unregister(dev_priv); i915_pmu_unregister(dev_priv); @@ -1048,6 +1011,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) void i915_driver_shutdown(struct drm_i915_private *i915) { disable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_runtime_pm_disable(&i915->runtime_pm); + intel_power_domains_disable(i915); i915_gem_suspend(i915); @@ -1063,7 +1028,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_suspend_encoders(i915); intel_shutdown_encoders(i915); + /* + * The only requirement is to reboot with display DC states disabled, + * for now leaving all display power wells in the INIT power domain + * enabled matching the driver reload sequence. + */ + intel_power_domains_driver_remove(i915); enable_rpm_wakeref_asserts(&i915->runtime_pm); + + intel_runtime_pm_driver_release(&i915->runtime_pm); } static bool suspend_to_idle(struct drm_i915_private *dev_priv) @@ -1093,7 +1066,7 @@ static int i915_drm_prepare(struct drm_device *dev) static int i915_drm_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); pci_power_t opregion_target_state; disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -1150,7 +1123,7 @@ get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret; @@ -1280,7 +1253,7 @@ static int i915_drm_resume(struct drm_device *dev) static int i915_drm_resume_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; /* diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 574f3575879d..b8c5c7d867bb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -475,42 +475,6 @@ struct i915_drrs { enum drrs_support_type type; }; -struct i915_psr { - struct mutex lock; - -#define I915_PSR_DEBUG_MODE_MASK 0x0f -#define I915_PSR_DEBUG_DEFAULT 0x00 -#define I915_PSR_DEBUG_DISABLE 0x01 -#define I915_PSR_DEBUG_ENABLE 0x02 -#define I915_PSR_DEBUG_FORCE_PSR1 0x03 -#define I915_PSR_DEBUG_IRQ 0x10 - - u32 debug; - bool sink_support; - bool enabled; - struct intel_dp *dp; - enum pipe pipe; - enum transcoder transcoder; - bool active; - struct work_struct work; - unsigned busy_frontbuffer_bits; - bool sink_psr2_support; - bool link_standby; - bool colorimetry_support; - bool psr2_enabled; - bool psr2_sel_fetch_enabled; - u8 sink_sync_latency; - ktime_t last_entry_attempt; - ktime_t last_exit; - bool sink_not_reliable; - bool irq_aux_error; - u16 su_x_granularity; - bool dc3co_enabled; - u32 dc3co_exit_delay; - struct delayed_work dc3co_work; - struct drm_dp_vsc_sdp vsc; -}; - #define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_INVERT_BRIGHTNESS (1<<2) #define QUIRK_BACKLIGHT_PRESENT (1<<3) @@ -1038,8 +1002,6 @@ struct drm_i915_private { struct i915_power_domains power_domains; - struct i915_psr psr; - struct i915_gpu_error gpu_error; struct drm_i915_gem_object *vlv_pctx; @@ -1133,7 +1095,9 @@ struct drm_i915_private { INTEL_DRAM_DDR3, INTEL_DRAM_DDR4, INTEL_DRAM_LPDDR3, - INTEL_DRAM_LPDDR4 + INTEL_DRAM_LPDDR4, + INTEL_DRAM_DDR5, + INTEL_DRAM_LPDDR5, } type; u8 num_qgv_points; } dram_info; @@ -1280,7 +1244,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id) #define REVID_FOREVER 0xff -#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) +#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision) #define INTEL_GEN_MASK(s, e) ( \ BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ @@ -1334,7 +1298,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) { const unsigned int pi = __platform_mask_index(info, p); - return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; + return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1); } static __always_inline bool @@ -1408,6 +1372,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1) +#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -1550,54 +1515,60 @@ extern const struct i915_rev_steppings kbl_revids[]; (IS_JSL_EHL(p) && IS_REVID(p, since, until)) enum { - TGL_REVID_A0, - TGL_REVID_B0, - TGL_REVID_B1, - TGL_REVID_C0, - TGL_REVID_D0, + STEP_A0, + STEP_A2, + STEP_B0, + STEP_B1, + STEP_C0, + STEP_D0, }; -#define TGL_UY_REVIDS_SIZE 4 -#define TGL_REVIDS_SIZE 2 +#define TGL_UY_REVID_STEP_TBL_SIZE 4 +#define TGL_REVID_STEP_TBL_SIZE 2 +#define ADLS_REVID_STEP_TBL_SIZE 13 -extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE]; -extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE]; +extern const struct i915_rev_steppings tgl_uy_revid_step_tbl[TGL_UY_REVID_STEP_TBL_SIZE]; +extern const struct i915_rev_steppings tgl_revid_step_tbl[TGL_REVID_STEP_TBL_SIZE]; +extern const struct i915_rev_steppings adls_revid_step_tbl[ADLS_REVID_STEP_TBL_SIZE]; static inline const struct i915_rev_steppings * -tgl_revids_get(struct drm_i915_private *dev_priv) +tgl_stepping_get(struct drm_i915_private *dev_priv) { u8 revid = INTEL_REVID(dev_priv); u8 size; - const struct i915_rev_steppings *tgl_revid_tbl; - - if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { - tgl_revid_tbl = tgl_uy_revids; - size = ARRAY_SIZE(tgl_uy_revids); + const struct i915_rev_steppings *revid_step_tbl; + + if (IS_ALDERLAKE_S(dev_priv)) { + revid_step_tbl = adls_revid_step_tbl; + size = ARRAY_SIZE(adls_revid_step_tbl); + } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { + revid_step_tbl = tgl_uy_revid_step_tbl; + size = ARRAY_SIZE(tgl_uy_revid_step_tbl); } else { - tgl_revid_tbl = tgl_revids; - size = ARRAY_SIZE(tgl_revids); + revid_step_tbl = tgl_revid_step_tbl; + size = ARRAY_SIZE(tgl_revid_step_tbl); } revid = min_t(u8, revid, size - 1); - return &tgl_revid_tbl[revid]; + return &revid_step_tbl[revid]; } -#define IS_TGL_DISP_REVID(p, since, until) \ +#define IS_TGL_DISP_STEPPING(p, since, until) \ (IS_TIGERLAKE(p) && \ - tgl_revids_get(p)->disp_stepping >= (since) && \ - tgl_revids_get(p)->disp_stepping <= (until)) + tgl_stepping_get(p)->disp_stepping >= (since) && \ + tgl_stepping_get(p)->disp_stepping <= (until)) -#define IS_TGL_UY_GT_REVID(p, since, until) \ +#define IS_TGL_UY_GT_STEPPING(p, since, until) \ ((IS_TGL_U(p) || IS_TGL_Y(p)) && \ - tgl_revids_get(p)->gt_stepping >= (since) && \ - tgl_revids_get(p)->gt_stepping <= (until)) + tgl_stepping_get(p)->gt_stepping >= (since) && \ + tgl_stepping_get(p)->gt_stepping <= (until)) -#define IS_TGL_GT_REVID(p, since, until) \ +#define IS_TGL_GT_STEPPING(p, since, until) \ (IS_TIGERLAKE(p) && \ !(IS_TGL_U(p) || IS_TGL_Y(p)) && \ - tgl_revids_get(p)->gt_stepping >= (since) && \ - tgl_revids_get(p)->gt_stepping <= (until)) + tgl_stepping_get(p)->gt_stepping >= (since) && \ + tgl_stepping_get(p)->gt_stepping <= (until)) #define RKL_REVID_A0 0x0 #define RKL_REVID_B0 0x1 @@ -1612,6 +1583,22 @@ tgl_revids_get(struct drm_i915_private *dev_priv) #define IS_DG1_REVID(p, since, until) \ (IS_DG1(p) && IS_REVID(p, since, until)) +#define ADLS_REVID_A0 0x0 +#define ADLS_REVID_A2 0x1 +#define ADLS_REVID_B0 0x4 +#define ADLS_REVID_G0 0x8 +#define ADLS_REVID_C0 0xC /*Same as H0 ADLS SOC stepping*/ + +#define IS_ADLS_DISP_STEPPING(p, since, until) \ + (IS_ALDERLAKE_S(p) && \ + tgl_stepping_get(p)->disp_stepping >= (since) && \ + tgl_stepping_get(p)->disp_stepping <= (until)) + +#define IS_ADLS_GT_STEPPING(p, since, until) \ + (IS_ALDERLAKE_S(p) && \ + tgl_stepping_get(p)->gt_stepping >= (since) && \ + tgl_stepping_get(p)->gt_stepping <= (until)) + #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) @@ -1703,7 +1690,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv) #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) -#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg) +#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg) #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) @@ -1718,6 +1705,8 @@ tgl_revids_get(struct drm_i915_private *dev_priv) #define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr) +#define HAS_MSO(i915) (INTEL_GEN(i915) >= 12) + #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) @@ -1735,7 +1724,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv) #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) -#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) +#define HAS_LSPCON(dev_priv) (IS_GEN_RANGE(dev_priv, 9, 10)) /* DPF == dynamic parity feature */ #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) @@ -1760,6 +1749,9 @@ static inline bool run_as_guest(void) return !hypervisor_is_type(X86_HYPER_NATIVE); } +#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ + IS_ALDERLAKE_S(dev_priv)) + static inline bool intel_vtd_active(void) { #ifdef CONFIG_INTEL_IOMMU @@ -1800,8 +1792,6 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv); void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); -int i915_gem_freeze(struct drm_i915_private *dev_priv); -int i915_gem_freeze_late(struct drm_i915_private *dev_priv); struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915); @@ -1954,7 +1944,7 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); -void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9b04dff5eb32..aa4490934469 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1145,47 +1145,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count); } -int i915_gem_freeze(struct drm_i915_private *dev_priv) -{ - /* Discard all purgeable objects, let userspace recover those as - * required after resuming. - */ - i915_gem_shrink_all(dev_priv); - - return 0; -} - -int i915_gem_freeze_late(struct drm_i915_private *i915) -{ - struct drm_i915_gem_object *obj; - intel_wakeref_t wakeref; - - /* - * Called just before we write the hibernation image. - * - * We need to update the domain tracking to reflect that the CPU - * will be accessing all the pages to create and restore from the - * hibernation, and so upon restoration those pages will be in the - * CPU domain. - * - * To make sure the hibernation image contains the latest state, - * we update that state just before writing out the image. - * - * To try and reduce the hibernation image, we manually shrink - * the objects as well, see i915_gem_freeze() - */ - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - i915_gem_shrink(i915, -1UL, NULL, ~0); - i915_gem_drain_freed_objects(i915); - - wbinvd_on_all_cpus(); - list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) - __start_cpu_write(obj); - - return 0; -} - int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3ee2f682eff6..486c9953e5b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -28,7 +28,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { do { - if (dma_map_sg_attrs(&obj->base.dev->pdev->dev, + if (dma_map_sg_attrs(obj->base.dev->dev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC | @@ -63,8 +63,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, /* Wait a bit, in the hope it avoids the hang */ usleep_range(100, 250); - dma_unmap_sg(&i915->drm.pdev->dev, - pages->sgl, pages->nents, + dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); } diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 75c3bfc2486e..24e18219eb50 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -12,6 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_private *i915 = to_i915(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); const struct sseu_dev_info *sseu = &i915->gt.info.sseu; drm_i915_getparam_t *param = data; int value; @@ -24,10 +25,10 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, /* Reject all old ums/dri params. */ return -ENODEV; case I915_PARAM_CHIPSET_ID: - value = i915->drm.pdev->device; + value = pdev->device; break; case I915_PARAM_REVISION: - value = i915->drm.pdev->revision; + value = pdev->revision; break; case I915_PARAM_NUM_FENCES_AVAIL: value = i915->ggtt.num_fences; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index f962693404b7..bb181fe5d47e 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -644,7 +644,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m, static void err_print_pciid(struct drm_i915_error_state_buf *m, struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); err_printf(m, "PCI ID: 0x%04x\n", pdev->device); err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1a701367a718..67c6d71f2675 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -209,8 +209,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) if (HAS_PCH_DG1(dev_priv)) hpd->pch_hpd = hpd_sde_dg1; - else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) || - HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv)) + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) hpd->pch_hpd = hpd_icp; else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) hpd->pch_hpd = hpd_spt; @@ -795,7 +794,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) int position, vtotal; if (!crtc->active) - return -1; + return 0; vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; mode = &vblank->hwmode; @@ -2095,10 +2094,19 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, ivb_err_int_handler(dev_priv); if (de_iir & DE_EDP_PSR_INT_HSW) { - u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR); + struct intel_encoder *encoder; + + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + u32 psr_iir = intel_uncore_read(&dev_priv->uncore, + EDP_PSR_IIR); - intel_psr_irq_handler(dev_priv, psr_iir); - intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir); + intel_psr_irq_handler(intel_dp, psr_iir); + intel_uncore_write(&dev_priv->uncore, + EDP_PSR_IIR, psr_iir); + break; + } } if (de_iir & DE_AUX_CHANNEL_A_IVB) @@ -2290,7 +2298,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) { - if (IS_ROCKETLAKE(dev_priv)) + if (HAS_D12_PLANE_MINIMIZATION(dev_priv)) return RKL_DE_PIPE_IRQ_FAULT_ERRORS; else if (INTEL_GEN(dev_priv) >= 11) return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; @@ -2311,21 +2319,30 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } if (iir & GEN8_DE_EDP_PSR) { + struct intel_encoder *encoder; u32 psr_iir; i915_reg_t iir_reg; - if (INTEL_GEN(dev_priv) >= 12) - iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder); - else - iir_reg = EDP_PSR_IIR; + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg); - intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir); + if (INTEL_GEN(dev_priv) >= 12) + iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); + else + iir_reg = EDP_PSR_IIR; + + psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg); + intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir); - if (psr_iir) - found = true; + if (psr_iir) + found = true; - intel_psr_irq_handler(dev_priv, psr_iir); + intel_psr_irq_handler(intel_dp, psr_iir); + + /* prior GEN12 only have one EDP PSR */ + if (INTEL_GEN(dev_priv) < 12) + break; + } } if (!found) @@ -3023,6 +3040,24 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } +static void cnp_display_clock_wa(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + + /* + * Wa_14010685332:cnp/cmp,tgp,adp + * TODO: Clarify which platforms this applies to + * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as + * on earlier platforms and whether the workaround is also needed for runtime suspend/resume + */ + if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP || + (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) { + intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, + SBCLK_RUN_REFCLK_DIS); + intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); + } +} + static void gen8_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3046,6 +3081,8 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_reset(dev_priv); + + cnp_display_clock_wa(dev_priv); } static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) @@ -3087,15 +3124,7 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(uncore, SDE); - /* Wa_14010685332:cnp/cmp,tgp,adp */ - if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP || - (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && - INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) { - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, - SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, - SBCLK_RUN_REFCLK_DIS, 0); - } + cnp_display_clock_wa(dev_priv); } static void gen11_irq_reset(struct drm_i915_private *dev_priv) @@ -3747,9 +3776,19 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) } } +static void icp_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + u32 mask = SDE_GMBUS_ICP; + + GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); +} + static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) { - if (HAS_PCH_SPLIT(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + icp_irq_postinstall(dev_priv); + else if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_postinstall(dev_priv); gen8_gt_irq_postinstall(&dev_priv->gt); @@ -3758,13 +3797,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) gen8_master_intr_enable(dev_priv->uncore.regs); } -static void icp_irq_postinstall(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 mask = SDE_GMBUS_ICP; - - GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); -} static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { @@ -4287,6 +4319,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; else if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; else @@ -4392,7 +4426,7 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv) */ int intel_irq_install(struct drm_i915_private *dev_priv) { - int irq = dev_priv->drm.pdev->irq; + int irq = to_pci_dev(dev_priv->drm.dev)->irq; int ret; /* @@ -4427,7 +4461,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { - int irq = dev_priv->drm.pdev->irq; + int irq = to_pci_dev(dev_priv->drm.dev)->irq; /* * FIXME we can get called twice during driver probe @@ -4487,5 +4521,5 @@ bool intel_irqs_enabled(struct drm_i915_private *dev_priv) void intel_synchronize_irq(struct drm_i915_private *i915) { - synchronize_irq(i915->drm.pdev->irq); + synchronize_irq(to_pci_dev(i915->drm.dev)->irq); } diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index f031966af5b7..48f47e44e848 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -54,8 +54,8 @@ struct drm_printer; param(int, enable_dc, -1, 0400) \ param(int, enable_fbc, -1, 0600) \ param(int, enable_psr, -1, 0600) \ - param(bool, psr_safest_params, false, 0600) \ - param(bool, enable_psr2_sel_fetch, false, 0600) \ + param(bool, psr_safest_params, false, 0400) \ + param(bool, enable_psr2_sel_fetch, false, 0400) \ param(int, disable_power_well, -1, 0400) \ param(int, enable_ips, 1, 0600) \ param(int, invert_brightness, 0, 0600) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 020b5f561f07..9a481ad5a8f6 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -538,7 +538,7 @@ static const struct intel_device_info vlv_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ - .has_fpga_dbg = 1, \ + .display.has_fpga_dbg = 1, \ .display.has_psr = 1, \ .display.has_psr_hw_tracking = 1, \ .display.has_dp_mst = 1, \ @@ -689,7 +689,7 @@ static const struct intel_device_info skl_gt4_info = { BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ - .has_fpga_dbg = 1, \ + .display.has_fpga_dbg = 1, \ .display.has_fbc = 1, \ .display.has_hdcp = 1, \ .display.has_psr = 1, \ @@ -897,7 +897,6 @@ static const struct intel_device_info rkl_info = { .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), - .require_force_probe = 1, .display.has_hti = 1, .display.has_psr_hw_tracking = 0, .platform_engine_mask = @@ -924,6 +923,18 @@ static const struct intel_device_info dg1_info __maybe_unused = { .ppgtt_size = 47, }; +static const struct intel_device_info adl_s_info = { + GEN12_FEATURES, + PLATFORM(INTEL_ALDERLAKE_S), + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .require_force_probe = 1, + .display.has_hti = 1, + .display.has_psr_hw_tracking = 0, + .platform_engine_mask = + BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), + .dma_mask_size = 46, +}; + #undef GEN #undef PLATFORM @@ -1000,6 +1011,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_JSL_IDS(&jsl_info), INTEL_TGL_12_IDS(&tgl_info), INTEL_RKL_IDS(&rkl_info), + INTEL_ADLS_IDS(&adl_s_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 112ba5f2ce90..41ad5a66657e 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -302,7 +302,7 @@ static u32 i915_oa_max_sample_rate = 100000; * code assumes all reports have a power-of-two size and ~(size - 1) can * be used as a mask to align the OA tail pointer. */ -static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { +static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_A13] = { 0, 64 }, [I915_OA_FORMAT_A29] = { 1, 128 }, [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, @@ -311,17 +311,9 @@ static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, [I915_OA_FORMAT_C4_B8] = { 7, 64 }, -}; - -static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_A12] = { 0, 64 }, [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, - [I915_OA_FORMAT_C4_B8] = { 7, 64 }, -}; - -static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = { - [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, }; #define SAMPLE_OA_REPORT (1<<0) @@ -603,7 +595,6 @@ static int append_oa_sample(struct i915_perf_stream *stream, { int report_size = stream->oa_buffer.format_size; struct drm_i915_perf_record_header header; - u32 sample_flags = stream->sample_flags; header.type = DRM_I915_PERF_RECORD_SAMPLE; header.pad = 0; @@ -617,10 +608,8 @@ static int append_oa_sample(struct i915_perf_stream *stream, return -EFAULT; buf += sizeof(header); - if (sample_flags & SAMPLE_OA_REPORT) { - if (copy_to_user(buf, report, report_size)) - return -EFAULT; - } + if (copy_to_user(buf, report, report_size)) + return -EFAULT; (*offset) += header.size; @@ -733,11 +722,6 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, (IS_GEN(stream->perf->i915, 12) ? OAREPORT_REASON_MASK_EXTENDED : OAREPORT_REASON_MASK)); - if (reason == 0) { - if (__ratelimit(&stream->perf->spurious_report_rs)) - DRM_NOTE("Skipping spurious, invalid OA report\n"); - continue; - } ctx_id = report32[2] & stream->specific_ctx_id_mask; @@ -2682,7 +2666,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) stream->perf->ops.oa_enable(stream); - if (stream->periodic) + if (stream->sample_flags & SAMPLE_OA_REPORT) hrtimer_start(&stream->poll_check_timer, ns_to_ktime(stream->poll_oa_period), HRTIMER_MODE_REL_PINNED); @@ -2745,7 +2729,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream) { stream->perf->ops.oa_disable(stream); - if (stream->periodic) + if (stream->sample_flags & SAMPLE_OA_REPORT) hrtimer_cancel(&stream->poll_check_timer); } @@ -3028,7 +3012,7 @@ static ssize_t i915_perf_read(struct file *file, * disabled stream as an error. In particular it might otherwise lead * to a deadlock for blocking file descriptors... */ - if (!stream->enabled) + if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) return -EIO; if (!(file->f_flags & O_NONBLOCK)) { @@ -3524,6 +3508,18 @@ static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 2ULL << exponent); } +static __always_inline bool +oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format) +{ + return test_bit(format, perf->format_mask); +} + +static __always_inline void +oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format) +{ + __set_bit(format, perf->format_mask); +} + /** * read_properties_unlocked - validate + copy userspace stream open properties * @perf: i915 perf instance @@ -3615,7 +3611,7 @@ static int read_properties_unlocked(struct i915_perf *perf, value); return -EINVAL; } - if (!perf->oa_formats[value].size) { + if (!oa_format_valid(perf, value)) { DRM_DEBUG("Unsupported OA report format %llu\n", value); return -EINVAL; @@ -4259,6 +4255,50 @@ static struct ctl_table dev_root[] = { {} }; +static void oa_init_supported_formats(struct i915_perf *perf) +{ + struct drm_i915_private *i915 = perf->i915; + enum intel_platform platform = INTEL_INFO(i915)->platform; + + switch (platform) { + case INTEL_HASWELL: + oa_format_add(perf, I915_OA_FORMAT_A13); + oa_format_add(perf, I915_OA_FORMAT_A13); + oa_format_add(perf, I915_OA_FORMAT_A29); + oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8); + oa_format_add(perf, I915_OA_FORMAT_B4_C8); + oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8); + oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16); + oa_format_add(perf, I915_OA_FORMAT_C4_B8); + break; + + case INTEL_BROADWELL: + case INTEL_CHERRYVIEW: + case INTEL_SKYLAKE: + case INTEL_BROXTON: + case INTEL_KABYLAKE: + case INTEL_GEMINILAKE: + case INTEL_COFFEELAKE: + case INTEL_COMETLAKE: + case INTEL_CANNONLAKE: + case INTEL_ICELAKE: + case INTEL_ELKHARTLAKE: + case INTEL_JASPERLAKE: + case INTEL_TIGERLAKE: + case INTEL_ROCKETLAKE: + case INTEL_DG1: + case INTEL_ALDERLAKE_S: + oa_format_add(perf, I915_OA_FORMAT_A12); + oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8); + oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8); + oa_format_add(perf, I915_OA_FORMAT_C4_B8); + break; + + default: + MISSING_CASE(platform); + } +} + /** * i915_perf_init - initialize i915-perf state on module bind * @i915: i915 device instance @@ -4274,6 +4314,7 @@ void i915_perf_init(struct drm_i915_private *i915) /* XXX const struct i915_perf_ops! */ + perf->oa_formats = oa_formats; if (IS_HASWELL(i915)) { perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; @@ -4284,8 +4325,6 @@ void i915_perf_init(struct drm_i915_private *i915) perf->ops.oa_disable = gen7_oa_disable; perf->ops.read = gen7_oa_read; perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; - - perf->oa_formats = hsw_oa_formats; } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { /* Note: that although we could theoretically also support the * legacy ringbuffer mode on BDW (and earlier iterations of @@ -4296,8 +4335,6 @@ void i915_perf_init(struct drm_i915_private *i915) perf->ops.read = gen8_oa_read; if (IS_GEN_RANGE(i915, 8, 9)) { - perf->oa_formats = gen8_plus_oa_formats; - perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; perf->ops.is_valid_mux_reg = @@ -4328,8 +4365,6 @@ void i915_perf_init(struct drm_i915_private *i915) perf->gen8_valid_ctx_bit = BIT(16); } } else if (IS_GEN_RANGE(i915, 10, 11)) { - perf->oa_formats = gen8_plus_oa_formats; - perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; perf->ops.is_valid_mux_reg = @@ -4352,8 +4387,6 @@ void i915_perf_init(struct drm_i915_private *i915) } perf->gen8_valid_ctx_bit = BIT(16); } else if (IS_GEN(i915, 12)) { - perf->oa_formats = gen12_oa_formats; - perf->ops.is_valid_b_counter_reg = gen12_is_valid_b_counter_addr; perf->ops.is_valid_mux_reg = @@ -4408,6 +4441,8 @@ void i915_perf_init(struct drm_i915_private *i915) 500 * 1000 /* 500us */); perf->i915 = i915; + + oa_init_supported_formats(perf); } } diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index a36a455ae336..aa14354a5120 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/uuid.h> #include <linux/wait.h> +#include <uapi/drm/i915_drm.h> #include "gt/intel_sseu.h" #include "i915_reg.h" @@ -441,6 +442,13 @@ struct i915_perf { struct i915_oa_ops ops; const struct i915_oa_format *oa_formats; + /** + * Use a format mask to store the supported formats + * for a platform. + */ +#define FORMAT_MASK_SIZE DIV_ROUND_UP(I915_OA_FORMAT_MAX - 1, BITS_PER_LONG) + unsigned long format_mask[FORMAT_MASK_SIZE]; + atomic64_t noa_programming_delay; }; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 2b88c0baa1bf..41651ac255fa 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1124,7 +1124,7 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) static bool is_igp(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); /* IGP is 0000:00:02.0 */ return pci_domain_nr(pdev->bus) == 0 && diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 598abd2f5b5f..765ba64442ab 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1874,10 +1874,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _ICL_COMBOPHY_B 0x6C000 #define _EHL_COMBOPHY_C 0x160000 #define _RKL_COMBOPHY_D 0x161000 +#define _ADL_COMBOPHY_E 0x16B000 + #define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \ _ICL_COMBOPHY_B, \ _EHL_COMBOPHY_C, \ - _RKL_COMBOPHY_D) + _RKL_COMBOPHY_D, \ + _ADL_COMBOPHY_E) /* CNL/ICL Port CL_DW registers */ #define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ @@ -2927,7 +2930,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MBUS_BBOX_CTL_S2 _MMIO(0x45044) #define HDPORT_STATE _MMIO(0x45050) -#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12) +#define HDPORT_DPLL_USED_MASK REG_GENMASK(15, 12) #define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1) #define HDPORT_ENABLED REG_BIT(0) @@ -3316,7 +3319,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS (1 << 22) -#define ILK_PABSTRETCH_DIS (1 << 21) +#define ILK_PABSTRETCH_DIS REG_BIT(21) +#define ILK_SABSTRETCH_DIS REG_BIT(20) +#define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20) +#define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0) +#define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1) +#define IVB_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 2) +#define IVB_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 3) +#define IVB_SPR_STRETCH_MAX_MASK REG_GENMASK(19, 18) +#define IVB_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 0) +#define IVB_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 1) +#define IVB_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 2) +#define IVB_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(IVB_SPR_STRETCH_MAX_MASK, 3) /* @@ -8039,6 +8053,16 @@ enum { #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 +#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27) +#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0) +#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1) +#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2) +#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3) +#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25) +#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0) +#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1) +#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2) +#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3) #define HSW_FBCQ_DIS (1 << 22) #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) @@ -8225,6 +8249,7 @@ enum { #define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6) #define GEN8_LQSC_RO_PERF_DIS (1 << 27) #define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21) +#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22) /* GEN8 chicken */ #define HDC_CHICKEN0 _MMIO(0x7300) @@ -10356,7 +10381,7 @@ enum skl_power_gate { /* ICL Clocks */ #define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) -#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24)) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5)) #define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10) #define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \ (tc_port) + 12 : \ @@ -10391,14 +10416,38 @@ enum skl_power_gate { #define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy) \ (((clk_sel) >> DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + _DG1_PHY_DPLL_MAP(phy)) +/* ADLS Clocks */ +#define _ADLS_DPCLKA_CFGCR0 0x164280 +#define _ADLS_DPCLKA_CFGCR1 0x1642BC +#define ADLS_DPCLKA_CFGCR(phy) _MMIO_PHY((phy) / 3, \ + _ADLS_DPCLKA_CFGCR0, \ + _ADLS_DPCLKA_CFGCR1) +#define ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy) (((phy) % 3) * 2) +/* ADLS DPCLKA_CFGCR0 DDI mask */ +#define ADLS_DPCLKA_DDII_SEL_MASK REG_GENMASK(5, 4) +#define ADLS_DPCLKA_DDIB_SEL_MASK REG_GENMASK(3, 2) +#define ADLS_DPCLKA_DDIA_SEL_MASK REG_GENMASK(1, 0) +/* ADLS DPCLKA_CFGCR1 DDI mask */ +#define ADLS_DPCLKA_DDIK_SEL_MASK REG_GENMASK(3, 2) +#define ADLS_DPCLKA_DDIJ_SEL_MASK REG_GENMASK(1, 0) +#define ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy) _PICK((phy), \ + ADLS_DPCLKA_DDIA_SEL_MASK, \ + ADLS_DPCLKA_DDIB_SEL_MASK, \ + ADLS_DPCLKA_DDII_SEL_MASK, \ + ADLS_DPCLKA_DDIJ_SEL_MASK, \ + ADLS_DPCLKA_DDIK_SEL_MASK) + /* CNL PLL */ #define DPLL0_ENABLE 0x46010 #define DPLL1_ENABLE 0x46014 +#define _ADLS_DPLL2_ENABLE 0x46018 +#define _ADLS_DPLL3_ENABLE 0x46030 #define PLL_ENABLE (1 << 31) #define PLL_LOCK (1 << 30) #define PLL_POWER_ENABLE (1 << 27) #define PLL_POWER_STATE (1 << 26) -#define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE) +#define CNL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ + _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE) #define TBT_PLL_ENABLE _MMIO(0x46020) @@ -10644,6 +10693,21 @@ enum skl_power_gate { _DG1_DPLL2_CFGCR1, \ _DG1_DPLL3_CFGCR1) +/* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */ +#define _ADLS_DPLL3_CFGCR0 0x1642C0 +#define _ADLS_DPLL4_CFGCR0 0x164294 +#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ + _TGL_DPLL1_CFGCR0, \ + _ADLS_DPLL4_CFGCR0, \ + _ADLS_DPLL3_CFGCR0) + +#define _ADLS_DPLL3_CFGCR1 0x1642C4 +#define _ADLS_DPLL4_CFGCR1 0x164298 +#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ + _TGL_DPLL1_CFGCR1, \ + _ADLS_DPLL4_CFGCR1, \ + _ADLS_DPLL3_CFGCR1) + #define _DKL_PHY1_BASE 0x168000 #define _DKL_PHY2_BASE 0x169000 #define _DKL_PHY3_BASE 0x16A000 @@ -11405,6 +11469,9 @@ enum skl_power_gate { #define BIG_JOINER_ENABLE (1 << 29) #define MASTER_BIG_JOINER_ENABLE (1 << 28) #define VGA_CENTERING_ENABLE (1 << 27) +#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) +#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) +#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) #define _ICL_PIPE_DSS_CTL2_PB 0x78204 #define _ICL_PIPE_DSS_CTL2_PC 0x78404 @@ -12107,6 +12174,12 @@ enum skl_power_gate { #define __GEN11_VCS2_MOCS0 0x10000 #define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4) +#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008) +#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0) + +#define GEN9_SCRATCH1 _MMIO(0xb11c) +#define EVICTION_PERF_FIX_ENABLE REG_BIT(8) + #define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0) #define PMFLUSHDONE_LNICRSDROP (1 << 20) #define PMFLUSH_GAPL3UNBLOCK (1 << 21) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 63212df33c9e..0bc7b49f843c 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -85,7 +85,7 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv) void i915_save_display(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); /* Display arbitration control */ if (INTEL_GEN(dev_priv) <= 4) @@ -100,7 +100,7 @@ void i915_save_display(struct drm_i915_private *dev_priv) void i915_restore_display(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); intel_restore_swf(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index b3a24eac21f1..de0e224b56ce 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -54,14 +54,14 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { int i915_switcheroo_register(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); } void i915_switcheroo_unregister(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); vga_switcheroo_unregister_client(pdev); } diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 70fca72f5162..172799277dd5 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -61,7 +61,7 @@ */ void intel_vgpu_detect(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u64 magic; u16 version_major; void __iomem *shared_area; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index f2d5ae59081e..8aaa0f8f6cfd 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -66,6 +66,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(TIGERLAKE), PLATFORM_NAME(ROCKETLAKE), PLATFORM_NAME(DG1), + PLATFORM_NAME(ALDERLAKE_S), }; #undef PLATFORM_NAME @@ -204,7 +205,7 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } if (IS_TIGERLAKE(i915)) { - struct pci_dev *root, *pdev = i915->drm.pdev; + struct pci_dev *root, *pdev = to_pci_dev(i915->drm.dev); root = list_first_entry(&pdev->bus->devices, typeof(*root), bus_list); @@ -249,7 +250,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv); enum pipe pipe; - if (INTEL_GEN(dev_priv) >= 10) { + /* Wa_14011765242: adl-s A0 */ + if (IS_ADLS_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0)) + for_each_pipe(dev_priv, pipe) + runtime->num_scalers[pipe] = 0; + else if (INTEL_GEN(dev_priv) >= 10) { for_each_pipe(dev_priv, pipe) runtime->num_scalers[pipe] = 2; } else if (IS_GEN(dev_priv, 9)) { @@ -260,7 +265,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); - if (IS_ROCKETLAKE(dev_priv)) + if (HAS_D12_PLANE_MINIMIZATION(dev_priv)) for_each_pipe(dev_priv, pipe) runtime->num_sprites[pipe] = 4; else if (INTEL_GEN(dev_priv) >= 11) diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index cf2d528c6e9b..efd138761e14 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -84,6 +84,7 @@ enum intel_platform { INTEL_TIGERLAKE, INTEL_ROCKETLAKE, INTEL_DG1, + INTEL_ALDERLAKE_S, INTEL_MAX_PLATFORMS }; @@ -116,7 +117,6 @@ enum intel_ppgtt_type { func(has_64bit_reloc); \ func(gpu_reset_clobbers_display); \ func(has_reset_engine); \ - func(has_fpga_dbg); \ func(has_global_mocs); \ func(has_gt_uc); \ func(has_l3_dpf); \ @@ -143,6 +143,7 @@ enum intel_ppgtt_type { func(has_dsb); \ func(has_dsc); \ func(has_fbc); \ + func(has_fpga_dbg); \ func(has_gmch); \ func(has_hdcp); \ func(has_hotplug); \ diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 73d256fc6830..1e53c017c30d 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -427,6 +427,12 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv) case 0: dram_info->type = INTEL_DRAM_DDR4; break; + case 1: + dram_info->type = INTEL_DRAM_DDR5; + break; + case 2: + dram_info->type = INTEL_DRAM_LPDDR5; + break; case 3: dram_info->type = INTEL_DRAM_LPDDR4; break; diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index ecaf314d60b6..7476f0e063c6 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -121,13 +121,18 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) case INTEL_PCH_TGP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) && - !IS_ROCKETLAKE(dev_priv)); + !IS_ROCKETLAKE(dev_priv) && + !IS_GEN9_BC(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv)); return PCH_JSP; + case INTEL_PCH_ADP_DEVICE_ID_TYPE: + drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n"); + drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv)); + return PCH_ADP; default: return PCH_NONE; } @@ -156,7 +161,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv, * make an educated guess as to which PCH is really there. */ - if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) + if (IS_ALDERLAKE_S(dev_priv)) + id = INTEL_PCH_ADP_DEVICE_ID_TYPE; + else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) id = INTEL_PCH_TGP_DEVICE_ID_TYPE; else if (IS_JSL_EHL(dev_priv)) id = INTEL_PCH_MCC_DEVICE_ID_TYPE; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 06d2cd50af0b..7318377503b0 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -26,6 +26,7 @@ enum intel_pch { PCH_JSP, /* Jasper Lake PCH */ PCH_MCC, /* Mule Creek Canyon PCH */ PCH_TGP, /* Tiger Lake PCH */ + PCH_ADP, /* Alder Lake PCH */ /* Fake PCHs, functionality handled on the same PCI dev */ PCH_DG1 = 1024, @@ -53,12 +54,14 @@ enum intel_pch { #define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 #define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 +#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) +#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP) #define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1) #define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP) #define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0c3e63f27c29..4c07745a6fdb 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -38,6 +38,7 @@ #include "display/intel_display_types.h" #include "display/intel_fbc.h" #include "display/intel_sprite.h" +#include "display/skl_universal_plane.h" #include "gt/intel_llc.h" @@ -3875,6 +3876,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum plane_id plane_id; + int max_level = INT_MAX; if (!intel_has_sagv(dev_priv)) return false; @@ -3891,20 +3893,31 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) int level; /* Skip this plane if it's not enabled */ - if (!wm->wm[0].plane_en) + if (!wm->wm[0].enable) continue; /* Find the highest enabled wm level for this plane */ for (level = ilk_wm_max_level(dev_priv); - !wm->wm[level].plane_en; --level) + !wm->wm[level].enable; --level) { } + /* Highest common enabled wm level for all planes */ + max_level = min(level, max_level); + } + + /* No enabled planes? */ + if (max_level == INT_MAX) + return true; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + /* - * If any of the planes on this pipe don't enable wm levels that - * incur memory latencies higher than sagv_block_time_us we - * can't enable SAGV. + * All enabled planes must have enabled a common wm level that + * can tolerate memory latencies higher than sagv_block_time_us */ - if (!wm->wm[level].can_sagv) + if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) return false; } @@ -3920,12 +3933,10 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) return true; for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_ddb_entry *plane_alloc = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (skl_ddb_entry_size(plane_alloc) < wm->sagv_wm0.min_ddb_alloc) + if (wm->wm[0].enable && !wm->sagv.wm0.enable) return false; } @@ -4746,20 +4757,61 @@ icl_get_total_relative_data_rate(struct intel_atomic_state *state, return total_data_rate; } -static const struct skl_wm_level * -skl_plane_wm_level(const struct intel_crtc_state *crtc_state, +const struct skl_wm_level * +skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, enum plane_id plane_id, int level) { - const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; if (level == 0 && pipe_wm->use_sagv_wm) - return &wm->sagv_wm0; + return &wm->sagv.wm0; return &wm->wm[level]; } +const struct skl_wm_level * +skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, + enum plane_id plane_id) +{ + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + if (pipe_wm->use_sagv_wm) + return &wm->sagv.trans_wm; + + return &wm->trans_wm; +} + +/* + * We only disable the watermarks for each plane if + * they exceed the ddb allocation of said plane. This + * is done so that we don't end up touching cursor + * watermarks needlessly when some other plane reduces + * our max possible watermark level. + * + * Bspec has this to say about the PLANE_WM enable bit: + * "All the watermarks at this level for all enabled + * planes must be enabled before the level will be used." + * So this is actually safe to do. + */ +static void +skl_check_wm_level(struct skl_wm_level *wm, u64 total) +{ + if (wm->min_ddb_alloc > total) + memset(wm, 0, sizeof(*wm)); +} + +static void +skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, + u64 total, u64 uv_total) +{ + if (wm->min_ddb_alloc > total || + uv_wm->min_ddb_alloc > uv_total) { + memset(wm, 0, sizeof(*wm)); + memset(uv_wm, 0, sizeof(*uv_wm)); + } +} + static int skl_allocate_plane_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) @@ -4927,45 +4979,33 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - /* - * We only disable the watermarks for each plane if - * they exceed the ddb allocation of said plane. This - * is done so that we don't end up touching cursor - * watermarks needlessly when some other plane reduces - * our max possible watermark level. - * - * Bspec has this to say about the PLANE_WM enable bit: - * "All the watermarks at this level for all enabled - * planes must be enabled before the level will be used." - * So this is actually safe to do. - */ - if (wm->wm[level].min_ddb_alloc > total[plane_id] || - wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id]) - memset(&wm->wm[level], 0, sizeof(wm->wm[level])); + skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], + total[plane_id], uv_total[plane_id]); /* * Wa_1408961008:icl, ehl * Underruns with WM1+ disabled */ if (IS_GEN(dev_priv, 11) && - level == 1 && wm->wm[0].plane_en) { - wm->wm[level].plane_res_b = wm->wm[0].plane_res_b; - wm->wm[level].plane_res_l = wm->wm[0].plane_res_l; + level == 1 && wm->wm[0].enable) { + wm->wm[level].blocks = wm->wm[0].blocks; + wm->wm[level].lines = wm->wm[0].lines; wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; } } } /* - * Go back and disable the transition watermark if it turns out we - * don't have enough DDB blocks for it. + * Go back and disable the transition and SAGV watermarks + * if it turns out we don't have enough DDB blocks for them. */ for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (wm->trans_wm.plane_res_b >= total[plane_id]) - memset(&wm->trans_wm, 0, sizeof(wm->trans_wm)); + skl_check_wm_level(&wm->trans_wm, total[plane_id]); + skl_check_wm_level(&wm->sagv.wm0, total[plane_id]); + skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]); } return 0; @@ -5170,7 +5210,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; - u32 res_blocks, res_lines, min_ddb_alloc = 0; + u32 blocks, lines, min_ddb_alloc = 0; if (latency == 0) { /* reject it */ @@ -5216,24 +5256,22 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } } - res_blocks = fixed16_to_u32_round_up(selected_result) + 1; - res_lines = div_round_up_fixed16(selected_result, - wp->plane_blocks_per_line); + blocks = fixed16_to_u32_round_up(selected_result) + 1; + lines = div_round_up_fixed16(selected_result, + wp->plane_blocks_per_line); if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { /* Display WA #1125: skl,bxt,kbl */ if (level == 0 && wp->rc_surface) - res_blocks += - fixed16_to_u32_round_up(wp->y_tile_minimum); + blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); /* Display WA #1126: skl,bxt,kbl */ if (level >= 1 && level <= 7) { if (wp->y_tiled) { - res_blocks += - fixed16_to_u32_round_up(wp->y_tile_minimum); - res_lines += wp->y_min_scanlines; + blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); + lines += wp->y_min_scanlines; } else { - res_blocks++; + blocks++; } /* @@ -5242,8 +5280,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, * Assumption in DDB algorithm optimization for special * cases. Also covers Display WA #1125 for RC. */ - if (result_prev->plane_res_b > res_blocks) - res_blocks = result_prev->plane_res_b; + if (result_prev->blocks > blocks) + blocks = result_prev->blocks; } } @@ -5251,40 +5289,39 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, if (wp->y_tiled) { int extra_lines; - if (res_lines % wp->y_min_scanlines == 0) + if (lines % wp->y_min_scanlines == 0) extra_lines = wp->y_min_scanlines; else extra_lines = wp->y_min_scanlines * 2 - - res_lines % wp->y_min_scanlines; + lines % wp->y_min_scanlines; - min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines, + min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines, wp->plane_blocks_per_line); } else { - min_ddb_alloc = res_blocks + - DIV_ROUND_UP(res_blocks, 10); + min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10); } } if (!skl_wm_has_lines(dev_priv, level)) - res_lines = 0; + lines = 0; - if (res_lines > 31) { + if (lines > 31) { /* reject it */ result->min_ddb_alloc = U16_MAX; return; } /* - * If res_lines is valid, assume we can use this watermark level + * If lines is valid, assume we can use this watermark level * for now. We'll come back and disable it after we calculate the * DDB allocation if it turns out we don't actually have enough * blocks to satisfy it. */ - result->plane_res_b = res_blocks; - result->plane_res_l = res_lines; + result->blocks = blocks; + result->lines = lines; /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ - result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1; - result->plane_en = true; + result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; + result->enable = true; if (INTEL_GEN(dev_priv) < 12) result->can_sagv = latency >= dev_priv->sagv_block_time_us; @@ -5315,7 +5352,7 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, struct skl_plane_wm *plane_wm) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct skl_wm_level *sagv_wm = &plane_wm->sagv_wm0; + struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; struct skl_wm_level *levels = plane_wm->wm; unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us; @@ -5324,14 +5361,13 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, sagv_wm); } -static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, - const struct skl_wm_params *wp, - struct skl_plane_wm *wm) +static void skl_compute_transition_wm(struct drm_i915_private *dev_priv, + struct skl_wm_level *trans_wm, + const struct skl_wm_level *wm0, + const struct skl_wm_params *wp) { - struct drm_device *dev = crtc_state->uapi.crtc->dev; - const struct drm_i915_private *dev_priv = to_i915(dev); u16 trans_min, trans_amount, trans_y_tile_min; - u16 wm0_sel_res_b, trans_offset_b, res_blocks; + u16 wm0_blocks, trans_offset, blocks; /* Transition WM don't make any sense if ipc is disabled */ if (!dev_priv->ipc_enabled) @@ -5355,36 +5391,37 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, else trans_amount = 10; /* This is configurable amount */ - trans_offset_b = trans_min + trans_amount; + trans_offset = trans_min + trans_amount; /* * The spec asks for Selected Result Blocks for wm0 (the real value), * not Result Blocks (the integer value). Pay attention to the capital - * letters. The value wm_l0->plane_res_b is actually Result Blocks, but + * letters. The value wm_l0->blocks is actually Result Blocks, but * since Result Blocks is the ceiling of Selected Result Blocks plus 1, * and since we later will have to get the ceiling of the sum in the * transition watermarks calculation, we can just pretend Selected * Result Blocks is Result Blocks minus 1 and it should work for the * current platforms. */ - wm0_sel_res_b = wm->wm[0].plane_res_b - 1; + wm0_blocks = wm0->blocks - 1; if (wp->y_tiled) { trans_y_tile_min = (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); - res_blocks = max(wm0_sel_res_b, trans_y_tile_min) + - trans_offset_b; + blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset; } else { - res_blocks = wm0_sel_res_b + trans_offset_b; + blocks = wm0_blocks + trans_offset; } + blocks++; /* * Just assume we can enable the transition watermark. After * computing the DDB we'll come back and disable it if that * assumption turns out to be false. */ - wm->trans_wm.plane_res_b = res_blocks + 1; - wm->trans_wm.plane_en = true; + trans_wm->blocks = blocks; + trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1); + trans_wm->enable = true; } static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, @@ -5404,10 +5441,15 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, skl_compute_wm_levels(crtc_state, &wm_params, wm->wm); - if (INTEL_GEN(dev_priv) >= 12) + skl_compute_transition_wm(dev_priv, &wm->trans_wm, + &wm->wm[0], &wm_params); + + if (INTEL_GEN(dev_priv) >= 12) { tgl_compute_sagv_wm(crtc_state, &wm_params, wm); - skl_compute_transition_wm(crtc_state, &wm_params, wm); + skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm, + &wm->sagv.wm0, &wm_params); + } return 0; } @@ -5554,12 +5596,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, { u32 val = 0; - if (level->plane_en) + if (level->enable) val |= PLANE_WM_EN; if (level->ignore_lines) val |= PLANE_WM_IGNORE_LINES; - val |= level->plane_res_b; - val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; + val |= level->blocks; + val |= level->lines << PLANE_WM_LINES_SHIFT; intel_de_write_fw(dev_priv, reg, val); } @@ -5571,23 +5613,19 @@ void skl_write_plane_wm(struct intel_plane *plane, int level, max_level = ilk_wm_max_level(dev_priv); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; + const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; const struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; const struct skl_ddb_entry *ddb_uv = &crtc_state->wm.skl.plane_ddb_uv[plane_id]; - for (level = 0; level <= max_level; level++) { - const struct skl_wm_level *wm_level; - - wm_level = skl_plane_wm_level(crtc_state, plane_id, level); - + for (level = 0; level <= max_level; level++) skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), - wm_level); - } + skl_plane_wm_level(pipe_wm, plane_id, level)); + skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), - &wm->trans_wm); + skl_plane_trans_wm(pipe_wm, plane_id)); if (INTEL_GEN(dev_priv) >= 11) { skl_ddb_entry_write(dev_priv, @@ -5611,20 +5649,16 @@ void skl_write_cursor_wm(struct intel_plane *plane, int level, max_level = ilk_wm_max_level(dev_priv); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; + const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb_y[plane_id]; - for (level = 0; level <= max_level; level++) { - const struct skl_wm_level *wm_level; - - wm_level = skl_plane_wm_level(crtc_state, plane_id, level); - + for (level = 0; level <= max_level; level++) skl_write_wm_level(dev_priv, CUR_WM(pipe, level), - wm_level); - } - skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); + skl_plane_wm_level(pipe_wm, plane_id, level)); + + skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), + skl_plane_trans_wm(pipe_wm, plane_id)); skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb); } @@ -5632,10 +5666,10 @@ void skl_write_cursor_wm(struct intel_plane *plane, bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2) { - return l1->plane_en == l2->plane_en && + return l1->enable == l2->enable && l1->ignore_lines == l2->ignore_lines && - l1->plane_res_l == l2->plane_res_l && - l1->plane_res_b == l2->plane_res_b; + l1->lines == l2->lines && + l1->blocks == l2->blocks; } static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, @@ -5654,7 +5688,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, return false; } - return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); + return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) && + skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) && + skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm); } static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, @@ -5884,85 +5920,114 @@ skl_print_wm_changes(struct intel_atomic_state *state) continue; drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm" - " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm\n", + "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), - enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), - enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), - enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), - enast(old_wm->trans_wm.plane_en), - enast(old_wm->sagv_wm0.plane_en), - enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), - enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), - enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), - enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), - enast(new_wm->trans_wm.plane_en), - enast(new_wm->sagv_wm0.plane_en)); + enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), + enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), + enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), + enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), + enast(old_wm->trans_wm.enable), + enast(old_wm->sagv.wm0.enable), + enast(old_wm->sagv.trans_wm.enable), + enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), + enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), + enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), + enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), + enast(new_wm->trans_wm.enable), + enast(new_wm->sagv.wm0.enable), + enast(new_wm->sagv.trans_wm.enable)); drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" - " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n", + "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" + " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, - enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, - enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, - enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, - enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, - enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, - enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, - enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, - enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, - enast(old_wm->sagv_wm0.ignore_lines), old_wm->sagv_wm0.plane_res_l, - - enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, - enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, - enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, - enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, - enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, - enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, - enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, - enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, - enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l, - enast(new_wm->sagv_wm0.ignore_lines), new_wm->sagv_wm0.plane_res_l); + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, + enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, + enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, + enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, + enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane->base.base.id, plane->base.name, - old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, - old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, - old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, - old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, - old_wm->trans_wm.plane_res_b, - old_wm->sagv_wm0.plane_res_b, - new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, - new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, - new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, - new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, - new_wm->trans_wm.plane_res_b, - new_wm->sagv_wm0.plane_res_b); + old_wm->wm[0].blocks, old_wm->wm[1].blocks, + old_wm->wm[2].blocks, old_wm->wm[3].blocks, + old_wm->wm[4].blocks, old_wm->wm[5].blocks, + old_wm->wm[6].blocks, old_wm->wm[7].blocks, + old_wm->trans_wm.blocks, + old_wm->sagv.wm0.blocks, + old_wm->sagv.trans_wm.blocks, + new_wm->wm[0].blocks, new_wm->wm[1].blocks, + new_wm->wm[2].blocks, new_wm->wm[3].blocks, + new_wm->wm[4].blocks, new_wm->wm[5].blocks, + new_wm->wm[6].blocks, new_wm->wm[7].blocks, + new_wm->trans_wm.blocks, + new_wm->sagv.wm0.blocks, + new_wm->sagv.trans_wm.blocks); drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc, - old_wm->sagv_wm0.min_ddb_alloc, + old_wm->sagv.wm0.min_ddb_alloc, + old_wm->sagv.trans_wm.min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, - new_wm->sagv_wm0.min_ddb_alloc); + new_wm->sagv.wm0.min_ddb_alloc, + new_wm->sagv.trans_wm.min_ddb_alloc); } } } +static bool skl_plane_selected_wm_equals(struct intel_plane *plane, + const struct skl_pipe_wm *old_pipe_wm, + const struct skl_pipe_wm *new_pipe_wm) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + int level, max_level = ilk_wm_max_level(i915); + + for (level = 0; level <= max_level; level++) { + /* + * We don't check uv_wm as the hardware doesn't actually + * use it. It only gets used for calculating the required + * ddb allocation. + */ + if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, level, plane->id), + skl_plane_wm_level(new_pipe_wm, level, plane->id))) + return false; + } + + return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id), + skl_plane_trans_wm(new_pipe_wm, plane->id)); +} + /* * To make sure the cursor watermark registers are always consistent * with our computed state the following scenario needs special @@ -6008,9 +6073,9 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, * with the software state. */ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && - skl_plane_wm_equals(dev_priv, - &old_crtc_state->wm.skl.optimal.planes[plane_id], - &new_crtc_state->wm.skl.optimal.planes[plane_id])) + skl_plane_selected_wm_equals(plane, + &old_crtc_state->wm.skl.optimal, + &new_crtc_state->wm.skl.optimal)) continue; plane_state = intel_atomic_get_plane_state(state, plane); @@ -6141,10 +6206,10 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state, static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) { - level->plane_en = val & PLANE_WM_EN; + level->enable = val & PLANE_WM_EN; level->ignore_lines = val & PLANE_WM_IGNORE_LINES; - level->plane_res_b = val & PLANE_WM_BLOCKS_MASK; - level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) & + level->blocks = val & PLANE_WM_BLOCKS_MASK; + level->lines = (val >> PLANE_WM_LINES_SHIFT) & PLANE_WM_LINES_MASK; } @@ -6171,19 +6236,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, skl_wm_level_from_reg_val(val, &wm->wm[level]); } - if (INTEL_GEN(dev_priv) >= 12) - wm->sagv_wm0 = wm->wm[0]; - if (plane_id != PLANE_CURSOR) val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id)); else val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe)); skl_wm_level_from_reg_val(val, &wm->trans_wm); - } - if (!crtc->active) - return; + if (INTEL_GEN(dev_priv) >= 12) { + wm->sagv.wm0 = wm->wm[0]; + wm->sagv.trans_wm = wm->trans_wm; + } + } } void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) @@ -7072,7 +7136,7 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_1409825376:tgl (pre-prod)*/ - if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1)) + if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_B1)) intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | TGL_VRH_GATING_DIS); @@ -7245,11 +7309,16 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); - /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ for_each_pipe(dev_priv, pipe) { + /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) | BDW_DPRS_MASK_VBLANK_SRD); + + /* Undocumented but fixes async flip + VT-d corruption */ + if (intel_vtd_active()) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); } /* WaVSRefCountFullforceMissDisable:bdw */ @@ -7285,11 +7354,20 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { + enum pipe pipe; + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | HSW_FBCQ_DIS); + for_each_pipe(dev_priv, pipe) { + /* Undocumented but fixes async flip + VT-d corruption */ + if (intel_vtd_active()) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); + } + /* This is required by WaCatErrorRejectionIssue:hsw */ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 97550cf0b6df..669c8d505677 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -52,6 +52,11 @@ bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); void intel_sagv_post_plane_update(struct intel_atomic_state *state); +const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, + enum plane_id plane_id, + int level); +const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, + enum plane_id plane_id); bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2); bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 153ca9e65382..4970ef0843dc 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -625,7 +625,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) { struct drm_i915_private *i915 = container_of(rpm, struct drm_i915_private, runtime_pm); - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct device *kdev = &pdev->dev; rpm->kdev = kdev; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9ac501bcfdad..661b50191f2b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -465,6 +465,22 @@ fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) return false; + /* + * Bugs in PCI programming (or failing hardware) can occasionally cause + * us to lose access to the MMIO BAR. When this happens, register + * reads will come back with 0xFFFFFFFF for every register and things + * go bad very quickly. Let's try to detect that special case and at + * least try to print a more informative message about what has + * happened. + * + * During normal operation the FPGA_DBG register has several unused + * bits that will always read back as 0's so we can use them as canaries + * to recognize when MMIO accesses are just busted. + */ + if (unlikely(dbg == ~0)) + drm_err(&uncore->i915->drm, + "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n"); + __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); return true; @@ -1780,7 +1796,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, static int uncore_mmio_setup(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); int mmio_bar; int mmio_size; @@ -1812,7 +1828,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) static void uncore_mmio_cleanup(struct intel_uncore *uncore) { - struct pci_dev *pdev = uncore->i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev); pci_iounmap(pdev, uncore->regs); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 412e21604a05..dc394fb7ccfa 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -8,6 +8,7 @@ #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +#include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 70e07e9b78c2..c1adea8765a9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1881,7 +1881,7 @@ static int igt_cs_tlb(void *arg) vma = i915_vma_instance(out, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto out_put_batch; + goto out_put_out; } err = i915_vma_pin(vma, 0, 0, diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 7270fc8ca801..5c7ae40bba63 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -74,7 +74,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.dma = i915->drm.dev; i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c index 03ba88f7f995..044d3bdf313c 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-plane.c +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -6,7 +6,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> #include "dcss-dev.h" @@ -137,11 +137,13 @@ static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt) } static int dcss_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct dcss_plane *dcss_plane = to_dcss_plane(plane); struct dcss_dev *dcss = plane->dev->dev_private; - struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *fb = new_plane_state->fb; bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY; struct drm_gem_cma_object *cma_obj; struct drm_crtc_state *crtc_state; @@ -149,20 +151,20 @@ static int dcss_plane_atomic_check(struct drm_plane *plane, int min, max; int ret; - if (!fb || !state->crtc) + if (!fb || !new_plane_state->crtc) return 0; cma_obj = drm_fb_cma_get_gem_obj(fb, 0); WARN_ON(!cma_obj); - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_plane_state->crtc); hdisplay = crtc_state->adjusted_mode.hdisplay; vdisplay = crtc_state->adjusted_mode.vdisplay; - if (!dcss_plane_is_source_size_allowed(state->src_w >> 16, - state->src_h >> 16, + if (!dcss_plane_is_source_size_allowed(new_plane_state->src_w >> 16, + new_plane_state->src_h >> 16, fb->format->format)) { DRM_DEBUG_KMS("Source plane size is not allowed!\n"); return -EINVAL; @@ -171,26 +173,26 @@ static int dcss_plane_atomic_check(struct drm_plane *plane, dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num, &min, &max); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min, max, !is_primary_plane, false); if (ret) return ret; - if (!state->visible) + if (!new_plane_state->visible) return 0; if (!dcss_plane_can_rotate(fb->format, !!(fb->flags & DRM_MODE_FB_MODIFIERS), fb->modifier, - state->rotation)) { + new_plane_state->rotation)) { DRM_DEBUG_KMS("requested rotation is not allowed!\n"); return -EINVAL; } - if ((state->crtc_x < 0 || state->crtc_y < 0 || - state->crtc_x + state->crtc_w > hdisplay || - state->crtc_y + state->crtc_h > vdisplay) && + if ((new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 || + new_plane_state->crtc_x + new_plane_state->crtc_w > hdisplay || + new_plane_state->crtc_y + new_plane_state->crtc_h > vdisplay) && !dcss_plane_fb_is_linear(fb)) { DRM_DEBUG_KMS("requested cropping operation is not allowed!\n"); return -EINVAL; @@ -262,12 +264,15 @@ static bool dcss_plane_needs_setup(struct drm_plane_state *state, } static void dcss_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct dcss_plane *dcss_plane = to_dcss_plane(plane); struct dcss_dev *dcss = plane->dev->dev_private; - struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *fb = new_state->fb; struct drm_crtc_state *crtc_state; bool modifiers_present; u32 src_w, src_h, dst_w, dst_h; @@ -275,14 +280,14 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, bool enable = true; bool is_rotation_90_or_270; - if (!fb || !state->crtc || !state->visible) + if (!fb || !new_state->crtc || !new_state->visible) return; - crtc_state = state->crtc->state; + crtc_state = new_state->crtc->state; modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS); if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) && - !dcss_plane_needs_setup(state, old_state)) { + !dcss_plane_needs_setup(new_state, old_state)) { dcss_plane_atomic_set_base(dcss_plane); return; } @@ -302,23 +307,24 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR) modifiers_present = false; - dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format, + dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, + new_state->fb->format, modifiers_present ? fb->modifier : DRM_FORMAT_MOD_LINEAR); dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h); dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num, - state->rotation); + new_state->rotation); dcss_plane_atomic_set_base(dcss_plane); - is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 | + is_rotation_90_or_270 = new_state->rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270); dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num, - state->scaling_filter); + new_state->scaling_filter); dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num, - state->fb->format, + new_state->fb->format, is_rotation_90_or_270 ? src_h : src_w, is_rotation_90_or_270 ? src_w : src_h, dst_w, dst_h, @@ -327,9 +333,9 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, dst.x1, dst.y1, dst_w, dst_h); dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num, - fb->format, state->alpha >> 8); + fb->format, new_state->alpha >> 8); - if (!dcss_plane->ch_num && (state->alpha >> 8) == 0) + if (!dcss_plane->ch_num && (new_state->alpha >> 8) == 0) enable = false; dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable); @@ -343,7 +349,7 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, } static void dcss_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct dcss_plane *dcss_plane = to_dcss_plane(plane); struct dcss_dev *dcss = plane->dev->dev_private; @@ -355,7 +361,7 @@ static void dcss_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = dcss_plane_atomic_check, .atomic_update = dcss_plane_atomic_update, .atomic_disable = dcss_plane_atomic_disable, diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 075508051b5f..fa5009705365 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -9,8 +9,8 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> @@ -337,12 +337,15 @@ static const struct drm_plane_funcs ipu_plane_funcs = { }; static int ipu_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_plane_state *old_state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_crtc_state *crtc_state; struct device *dev = plane->dev->dev; - struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *fb = new_state->fb; struct drm_framebuffer *old_fb = old_state->fb; unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba; bool can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); @@ -352,15 +355,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (!fb) return 0; - if (WARN_ON(!state->crtc)) + if (WARN_ON(!new_state->crtc)) return -EINVAL; crtc_state = - drm_atomic_get_existing_crtc_state(state->state, state->crtc); + drm_atomic_get_existing_crtc_state(state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, can_position, true); @@ -374,7 +378,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: /* full plane minimum width is 13 pixels */ - if (drm_rect_width(&state->dst) < 13) + if (drm_rect_width(&new_state->dst) < 13) return -EINVAL; break; case DRM_PLANE_TYPE_OVERLAY: @@ -384,7 +388,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - if (drm_rect_height(&state->dst) < 2) + if (drm_rect_height(&new_state->dst) < 2) return -EINVAL; /* @@ -395,12 +399,12 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, * callback. */ if (old_fb && - (drm_rect_width(&state->dst) != drm_rect_width(&old_state->dst) || - drm_rect_height(&state->dst) != drm_rect_height(&old_state->dst) || + (drm_rect_width(&new_state->dst) != drm_rect_width(&old_state->dst) || + drm_rect_height(&new_state->dst) != drm_rect_height(&old_state->dst) || fb->format != old_fb->format)) crtc_state->mode_changed = true; - eba = drm_plane_state_to_eba(state, 0); + eba = drm_plane_state_to_eba(new_state, 0); if (eba & 0x7) return -EINVAL; @@ -426,7 +430,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, * - Only EBA may be changed while scanout is active * - The strides of U and V planes must be identical. */ - vbo = drm_plane_state_to_vbo(state); + vbo = drm_plane_state_to_vbo(new_state); if (vbo & 0x7 || vbo > 0xfffff8) return -EINVAL; @@ -443,7 +447,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, fallthrough; case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: - ubo = drm_plane_state_to_ubo(state); + ubo = drm_plane_state_to_ubo(new_state); if (ubo & 0x7 || ubo > 0xfffff8) return -EINVAL; @@ -464,8 +468,8 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, * The x/y offsets must be even in case of horizontal/vertical * chroma subsampling. */ - if (((state->src.x1 >> 16) & (fb->format->hsub - 1)) || - ((state->src.y1 >> 16) & (fb->format->vsub - 1))) + if (((new_state->src.x1 >> 16) & (fb->format->hsub - 1)) || + ((new_state->src.y1 >> 16) & (fb->format->vsub - 1))) return -EINVAL; break; case DRM_FORMAT_RGB565_A8: @@ -474,7 +478,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, case DRM_FORMAT_BGR888_A8: case DRM_FORMAT_RGBX8888_A8: case DRM_FORMAT_BGRX8888_A8: - alpha_eba = drm_plane_state_to_eba(state, 1); + alpha_eba = drm_plane_state_to_eba(new_state, 1); if (alpha_eba & 0x7) return -EINVAL; @@ -490,7 +494,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, } static void ipu_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); @@ -535,14 +539,17 @@ static void ipu_calculate_bursts(u32 width, u32 cpp, u32 stride, } static void ipu_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct ipu_plane *ipu_plane = to_ipu_plane(plane); - struct drm_plane_state *state = plane->state; - struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); - struct drm_crtc_state *crtc_state = state->crtc->state; - struct drm_framebuffer *fb = state->fb; - struct drm_rect *dst = &state->dst; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct ipu_plane_state *ipu_state = to_ipu_plane_state(new_state); + struct drm_crtc_state *crtc_state = new_state->crtc->state; + struct drm_framebuffer *fb = new_state->fb; + struct drm_rect *dst = &new_state->dst; unsigned long eba, ubo, vbo; unsigned long alpha_eba = 0; enum ipu_color_space ics; @@ -557,7 +564,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, switch (ipu_plane->dp_flow) { case IPU_DP_FLOW_SYNC_BG: - if (state->normalized_zpos == 1) { + if (new_state->normalized_zpos == 1) { ipu_dp_set_global_alpha(ipu_plane->dp, !fb->format->has_alpha, 0xff, true); @@ -566,7 +573,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, } break; case IPU_DP_FLOW_SYNC_FG: - if (state->normalized_zpos == 1) { + if (new_state->normalized_zpos == 1) { ipu_dp_set_global_alpha(ipu_plane->dp, !fb->format->has_alpha, 0xff, false); @@ -574,7 +581,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; } - eba = drm_plane_state_to_eba(state, 0); + eba = drm_plane_state_to_eba(new_state, 0); /* * Configure PRG channel and attached PRE, this changes the EBA to an @@ -583,8 +590,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, if (ipu_state->use_pre) { axi_id = ipu_chan_assign_axi_id(ipu_plane->dma); ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16, + drm_rect_width(&new_state->src) >> 16, + drm_rect_height(&new_state->src) >> 16, fb->pitches[0], fb->format->format, fb->modifier, &eba); } @@ -618,8 +625,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst)); - width = drm_rect_width(&state->src) >> 16; - height = drm_rect_height(&state->src) >> 16; + width = drm_rect_width(&new_state->src) >> 16; + height = drm_rect_height(&new_state->src) >> 16; info = drm_format_info(fb->format->format); ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0], &burstsize, &num_bursts); @@ -641,8 +648,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: - ubo = drm_plane_state_to_ubo(state); - vbo = drm_plane_state_to_vbo(state); + ubo = drm_plane_state_to_ubo(new_state); + vbo = drm_plane_state_to_vbo(new_state); if (fb->format->format == DRM_FORMAT_YVU420 || fb->format->format == DRM_FORMAT_YVU422 || fb->format->format == DRM_FORMAT_YVU444) @@ -653,18 +660,18 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, dev_dbg(ipu_plane->base.dev->dev, "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo, - state->src.x1 >> 16, state->src.y1 >> 16); + new_state->src.x1 >> 16, new_state->src.y1 >> 16); break; case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: - ubo = drm_plane_state_to_ubo(state); + ubo = drm_plane_state_to_ubo(new_state); ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, fb->pitches[1], ubo, ubo); dev_dbg(ipu_plane->base.dev->dev, "phy = %lu %lu, x = %d, y = %d", eba, ubo, - state->src.x1 >> 16, state->src.y1 >> 16); + new_state->src.x1 >> 16, new_state->src.y1 >> 16); break; case DRM_FORMAT_RGB565_A8: case DRM_FORMAT_BGR565_A8: @@ -672,18 +679,19 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, case DRM_FORMAT_BGR888_A8: case DRM_FORMAT_RGBX8888_A8: case DRM_FORMAT_BGRX8888_A8: - alpha_eba = drm_plane_state_to_eba(state, 1); + alpha_eba = drm_plane_state_to_eba(new_state, 1); num_bursts = 0; dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d", - eba, alpha_eba, state->src.x1 >> 16, state->src.y1 >> 16); + eba, alpha_eba, new_state->src.x1 >> 16, + new_state->src.y1 >> 16); ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16); ipu_cpmem_zero(ipu_plane->alpha_ch); ipu_cpmem_set_resolution(ipu_plane->alpha_ch, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16); + drm_rect_width(&new_state->src) >> 16, + drm_rect_height(&new_state->src) >> 16); ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8); ipu_cpmem_set_high_priority(ipu_plane->alpha_ch); ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1); @@ -694,7 +702,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; default: dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d", - eba, state->src.x1 >> 16, state->src.y1 >> 16); + eba, new_state->src.x1 >> 16, new_state->src.y1 >> 16); break; } ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba); @@ -704,7 +712,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = ipu_plane_atomic_check, .atomic_disable = ipu_plane_atomic_disable, .atomic_update = ipu_plane_atomic_update, diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 7bb31fbee29d..54ee2cb61f3c 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -28,6 +28,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> #include <drm/drm_managed.h> @@ -359,21 +360,26 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc, } static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct ingenic_drm *priv = drm_device_get_priv(plane->dev); struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc = state->crtc ?: plane->state->crtc; + struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; int ret; if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + crtc); if (WARN_ON(!crtc_state)) return -EINVAL; - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, priv->soc_info->has_osd, @@ -386,9 +392,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, * Note that state->src_* are in 16.16 fixed-point format. */ if (!priv->soc_info->has_osd && - (state->src_x != 0 || - (state->src_w >> 16) != state->crtc_w || - (state->src_h >> 16) != state->crtc_h)) + (new_plane_state->src_x != 0 || + (new_plane_state->src_w >> 16) != new_plane_state->crtc_w || + (new_plane_state->src_h >> 16) != new_plane_state->crtc_h)) return -EINVAL; /* @@ -396,12 +402,12 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, * its position, size or depth. */ if (priv->soc_info->has_osd && - (!plane->state->fb || !state->fb || - plane->state->crtc_x != state->crtc_x || - plane->state->crtc_y != state->crtc_y || - plane->state->crtc_w != state->crtc_w || - plane->state->crtc_h != state->crtc_h || - plane->state->fb->format->format != state->fb->format->format)) + (!old_plane_state->fb || !new_plane_state->fb || + old_plane_state->crtc_x != new_plane_state->crtc_x || + old_plane_state->crtc_y != new_plane_state->crtc_y || + old_plane_state->crtc_w != new_plane_state->crtc_w || + old_plane_state->crtc_h != new_plane_state->crtc_h || + old_plane_state->fb->format->format != new_plane_state->fb->format->format)) crtc_state->mode_changed = true; return 0; @@ -438,7 +444,7 @@ void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane) } static void ingenic_drm_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_device_get_priv(plane->dev); @@ -536,23 +542,24 @@ static void ingenic_drm_update_palette(struct ingenic_drm *priv, } static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_device_get_priv(plane->dev); - struct drm_plane_state *state = plane->state; + struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; struct ingenic_dma_hwdesc *hwdesc; unsigned int width, height, cpp, offset; dma_addr_t addr; u32 fourcc; - if (state && state->fb) { - crtc_state = state->crtc->state; + if (newstate && newstate->fb) { + crtc_state = newstate->crtc->state; - addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); - width = state->src_w >> 16; - height = state->src_h >> 16; - cpp = state->fb->format->cpp[0]; + addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0); + width = newstate->src_w >> 16; + height = newstate->src_h >> 16; + cpp = newstate->fb->format->cpp[0]; if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY) hwdesc = &priv->dma_hwdescs->hwdesc_f0; @@ -563,7 +570,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4); if (drm_atomic_crtc_needs_modeset(crtc_state)) { - fourcc = state->fb->format->format; + fourcc = newstate->fb->format->format; ingenic_drm_plane_config(priv->dev, plane, fourcc); @@ -780,7 +787,7 @@ static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = { .atomic_update = ingenic_drm_plane_atomic_update, .atomic_check = ingenic_drm_plane_atomic_check, .atomic_disable = ingenic_drm_plane_atomic_disable, - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = { diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index e52777ef85fd..5ae6adab8306 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -23,7 +23,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> #include <drm/drm_property.h> @@ -282,19 +282,20 @@ static inline bool osd_changed(struct drm_plane_state *state, } static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane); - struct drm_plane_state *state = plane->state; + struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, + plane); const struct drm_format_info *finfo; u32 ctrl, stride = 0, coef_index = 0, format = 0; bool needs_modeset, upscaling_w, upscaling_h; int err; - if (!state || !state->fb) + if (!newstate || !newstate->fb) return; - finfo = drm_format_info(state->fb->format->format); + finfo = drm_format_info(newstate->fb->format->format); if (!ipu->clk_enabled) { err = clk_enable(ipu->clk); @@ -307,7 +308,7 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, } /* Reset all the registers if needed */ - needs_modeset = drm_atomic_crtc_needs_modeset(state->crtc->state); + needs_modeset = drm_atomic_crtc_needs_modeset(newstate->crtc->state); if (needs_modeset) { regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RST); @@ -317,11 +318,13 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, } /* New addresses will be committed in vblank handler... */ - ipu->addr_y = drm_fb_cma_get_gem_addr(state->fb, state, 0); + ipu->addr_y = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0); if (finfo->num_planes > 1) - ipu->addr_u = drm_fb_cma_get_gem_addr(state->fb, state, 1); + ipu->addr_u = drm_fb_cma_get_gem_addr(newstate->fb, newstate, + 1); if (finfo->num_planes > 2) - ipu->addr_v = drm_fb_cma_get_gem_addr(state->fb, state, 2); + ipu->addr_v = drm_fb_cma_get_gem_addr(newstate->fb, newstate, + 2); if (!needs_modeset) return; @@ -338,21 +341,21 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, /* Set the input height/width/strides */ if (finfo->num_planes > 2) - stride = ((state->src_w >> 16) * finfo->cpp[2] / finfo->hsub) + stride = ((newstate->src_w >> 16) * finfo->cpp[2] / finfo->hsub) << JZ_IPU_UV_STRIDE_V_LSB; if (finfo->num_planes > 1) - stride |= ((state->src_w >> 16) * finfo->cpp[1] / finfo->hsub) + stride |= ((newstate->src_w >> 16) * finfo->cpp[1] / finfo->hsub) << JZ_IPU_UV_STRIDE_U_LSB; regmap_write(ipu->map, JZ_REG_IPU_UV_STRIDE, stride); - stride = ((state->src_w >> 16) * finfo->cpp[0]) << JZ_IPU_Y_STRIDE_Y_LSB; + stride = ((newstate->src_w >> 16) * finfo->cpp[0]) << JZ_IPU_Y_STRIDE_Y_LSB; regmap_write(ipu->map, JZ_REG_IPU_Y_STRIDE, stride); regmap_write(ipu->map, JZ_REG_IPU_IN_GS, (stride << JZ_IPU_IN_GS_W_LSB) | - ((state->src_h >> 16) << JZ_IPU_IN_GS_H_LSB)); + ((newstate->src_h >> 16) << JZ_IPU_IN_GS_H_LSB)); switch (finfo->format) { case DRM_FORMAT_XRGB1555: @@ -421,9 +424,9 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, /* Set the output height/width/stride */ regmap_write(ipu->map, JZ_REG_IPU_OUT_GS, - ((state->crtc_w * 4) << JZ_IPU_OUT_GS_W_LSB) - | state->crtc_h << JZ_IPU_OUT_GS_H_LSB); - regmap_write(ipu->map, JZ_REG_IPU_OUT_STRIDE, state->crtc_w * 4); + ((newstate->crtc_w * 4) << JZ_IPU_OUT_GS_W_LSB) + | newstate->crtc_h << JZ_IPU_OUT_GS_H_LSB); + regmap_write(ipu->map, JZ_REG_IPU_OUT_STRIDE, newstate->crtc_w * 4); if (finfo->is_yuv) { regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_CSC_EN); @@ -508,55 +511,59 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, JZ_IPU_CTRL_RUN | JZ_IPU_CTRL_FM_IRQ_EN); dev_dbg(ipu->dev, "Scaling %ux%u to %ux%u (%u:%u horiz, %u:%u vert)\n", - state->src_w >> 16, state->src_h >> 16, - state->crtc_w, state->crtc_h, + newstate->src_w >> 16, newstate->src_h >> 16, + newstate->crtc_w, newstate->crtc_h, ipu->num_w, ipu->denom_w, ipu->num_h, ipu->denom_h); } static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); unsigned int num_w, denom_w, num_h, denom_h, xres, yres, max_w, max_h; struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane); - struct drm_crtc *crtc = state->crtc ?: plane->state->crtc; + struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; struct drm_crtc_state *crtc_state; if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; /* Request a full modeset if we are enabling or disabling the IPU. */ - if (!plane->state->crtc ^ !state->crtc) + if (!old_plane_state->crtc ^ !new_plane_state->crtc) crtc_state->mode_changed = true; - if (!state->crtc || + if (!new_plane_state->crtc || !crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay) return 0; /* Plane must be fully visible */ - if (state->crtc_x < 0 || state->crtc_y < 0 || - state->crtc_x + state->crtc_w > crtc_state->mode.hdisplay || - state->crtc_y + state->crtc_h > crtc_state->mode.vdisplay) + if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 || + new_plane_state->crtc_x + new_plane_state->crtc_w > crtc_state->mode.hdisplay || + new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->mode.vdisplay) return -EINVAL; /* Minimum size is 4x4 */ - if ((state->src_w >> 16) < 4 || (state->src_h >> 16) < 4) + if ((new_plane_state->src_w >> 16) < 4 || (new_plane_state->src_h >> 16) < 4) return -EINVAL; /* Input and output lines must have an even number of pixels. */ - if (((state->src_w >> 16) & 1) || (state->crtc_w & 1)) + if (((new_plane_state->src_w >> 16) & 1) || (new_plane_state->crtc_w & 1)) return -EINVAL; - if (!osd_changed(state, plane->state)) + if (!osd_changed(new_plane_state, old_plane_state)) return 0; crtc_state->mode_changed = true; - xres = state->src_w >> 16; - yres = state->src_h >> 16; + xres = new_plane_state->src_w >> 16; + yres = new_plane_state->src_h >> 16; /* * Increase the scaled image's theorical width/height until we find a @@ -568,13 +575,13 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, max_w = crtc_state->mode.hdisplay * 102 / 100; max_h = crtc_state->mode.vdisplay * 102 / 100; - for (denom_w = xres, num_w = state->crtc_w; num_w <= max_w; num_w++) + for (denom_w = xres, num_w = new_plane_state->crtc_w; num_w <= max_w; num_w++) if (!reduce_fraction(&num_w, &denom_w)) break; if (num_w > max_w) return -EINVAL; - for (denom_h = yres, num_h = state->crtc_h; num_h <= max_h; num_h++) + for (denom_h = yres, num_h = new_plane_state->crtc_h; num_h <= max_h; num_h++) if (!reduce_fraction(&num_h, &denom_h)) break; if (num_h > max_h) @@ -589,7 +596,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, } static void ingenic_ipu_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane); @@ -608,7 +615,7 @@ static const struct drm_plane_helper_funcs ingenic_ipu_plane_helper_funcs = { .atomic_update = ingenic_ipu_plane_atomic_update, .atomic_check = ingenic_ipu_plane_atomic_check, .atomic_disable = ingenic_ipu_plane_atomic_disable, - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static int diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index be8eea3830c1..d5b6195856d1 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -77,36 +77,40 @@ static unsigned int check_pixel_format(struct drm_plane *plane, u32 format) } static int kmb_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_framebuffer *fb; int ret; struct drm_crtc_state *crtc_state; bool can_position; - fb = state->fb; - if (!fb || !state->crtc) + fb = new_plane_state->fb; + if (!fb || !new_plane_state->crtc) return 0; ret = check_pixel_format(plane, fb->format->format); if (ret) return ret; - if (state->crtc_w > KMB_MAX_WIDTH || state->crtc_h > KMB_MAX_HEIGHT) + if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT) return -EINVAL; - if (state->crtc_w < KMB_MIN_WIDTH || state->crtc_h < KMB_MIN_HEIGHT) + if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT) return -EINVAL; can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); crtc_state = - drm_atomic_get_existing_crtc_state(state->state, state->crtc); - return drm_atomic_helper_check_plane_state(state, crtc_state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - can_position, true); + drm_atomic_get_existing_crtc_state(state, + new_plane_state->crtc); + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + can_position, true); } static void kmb_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { struct kmb_plane *kmb_plane = to_kmb_plane(plane); int plane_id = kmb_plane->id; @@ -274,8 +278,12 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id) } static void kmb_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_framebuffer *fb; struct kmb_drm_private *kmb; unsigned int width; @@ -289,10 +297,10 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, int num_planes; static dma_addr_t addr[MAX_SUB_PLANES]; - if (!plane || !plane->state || !state) + if (!plane || !new_plane_state || !old_plane_state) return; - fb = plane->state->fb; + fb = new_plane_state->fb; if (!fb) return; num_planes = fb->format->num_planes; @@ -309,10 +317,10 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, } spin_unlock_irq(&kmb->irq_lock); - src_w = (plane->state->src_w >> 16); - src_h = plane->state->src_h >> 16; - crtc_x = plane->state->crtc_x; - crtc_y = plane->state->crtc_y; + src_w = (new_plane_state->src_w >> 16); + src_h = new_plane_state->src_h >> 16; + crtc_x = new_plane_state->crtc_x; + crtc_y = new_plane_state->crtc_y; drm_dbg(&kmb->drm, "src_w=%d src_h=%d, fb->format->format=0x%x fb->flags=0x%x\n", @@ -329,7 +337,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id), (width * fb->format->cpp[0])); - addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, 0); + addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0); kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id), addr[Y_PLANE] + fb->offsets[0]); val = get_pixel_format(fb->format->format); @@ -341,7 +349,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id), (width * fb->format->cpp[0])); - addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, + addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state, U_PLANE); /* check if Cb/Cr is swapped*/ if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER)) @@ -363,7 +371,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, ((width) * fb->format->cpp[0])); addr[V_PLANE] = drm_fb_cma_get_gem_addr(fb, - plane->state, + new_plane_state, V_PLANE); /* check if Cb/Cr is swapped*/ diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c index 5686ad4aaf7c..4f64940b9055 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.c +++ b/drivers/gpu/drm/lima/lima_devfreq.c @@ -81,6 +81,7 @@ static int lima_devfreq_get_dev_status(struct device *dev, } static struct devfreq_dev_profile lima_devfreq_profile = { + .timer = DEVFREQ_TIMER_DELAYED, .polling_ms = 50, /* ~3 frames */ .target = lima_devfreq_target, .get_dev_status = lima_devfreq_get_dev_status, @@ -163,8 +164,16 @@ int lima_devfreq_init(struct lima_device *ldev) lima_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); + /* + * Setup default thresholds for the simple_ondemand governor. + * The values are chosen based on experiments. + */ + ldevfreq->gov_data.upthreshold = 30; + ldevfreq->gov_data.downdifferential = 5; + devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile, - DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL); + DEVFREQ_GOV_SIMPLE_ONDEMAND, + &ldevfreq->gov_data); if (IS_ERR(devfreq)) { dev_err(dev, "Couldn't initialize GPU devfreq\n"); ret = PTR_ERR(devfreq); diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h index 2d9b3008ce77..b0c7c736e81a 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.h +++ b/drivers/gpu/drm/lima/lima_devfreq.h @@ -4,6 +4,7 @@ #ifndef __LIMA_DEVFREQ_H__ #define __LIMA_DEVFREQ_H__ +#include <linux/devfreq.h> #include <linux/spinlock.h> #include <linux/ktime.h> @@ -18,6 +19,7 @@ struct lima_devfreq { struct opp_table *clkname_opp_table; struct opp_table *regulators_opp_table; struct thermal_cooling_device *cooling; + struct devfreq_simple_ondemand_data gov_data; ktime_t busy_time; ktime_t idle_time; diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 63b4c5643f9c..ecf3267334ff 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -201,7 +201,7 @@ static int lima_pm_busy(struct lima_device *ldev) int ret; /* resume GPU if it has been suspended by runtime PM */ - ret = pm_runtime_get_sync(ldev->dev); + ret = pm_runtime_resume_and_get(ldev->dev); if (ret < 0) return ret; @@ -415,7 +415,7 @@ out: mutex_unlock(&dev->error_task_list_lock); } -static void lima_sched_timedout_job(struct drm_sched_job *job) +static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job) { struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); struct lima_sched_task *task = to_lima_task(job); @@ -449,6 +449,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job) drm_sched_resubmit_jobs(&pipe->base); drm_sched_start(&pipe->base, true); + + return DRM_GPU_SCHED_STAT_NOMINAL; } static void lima_sched_free_job(struct drm_sched_job *job) @@ -507,7 +509,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) return drm_sched_init(&pipe->base, &lima_sched_ops, 1, lima_job_hang_limit, msecs_to_jiffies(timeout), - name); + NULL, name); } void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 7c2e0b865441..4ddc55d58f38 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -13,8 +13,8 @@ #include <drm/drm_device.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_bridge.h> @@ -1161,7 +1161,6 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe, int dsi_pkt_size; int fifo_wtrmrk; int cpp = fb->format->cpp[0]; - struct drm_format_name_buf tmp; u32 dsi_formatter_frame; u32 val; int ret; @@ -1173,9 +1172,8 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe, return; } - dev_info(drm->dev, "enable MCDE, %d x %d format %s\n", - mode->hdisplay, mode->vdisplay, - drm_get_format_name(format, &tmp)); + dev_info(drm->dev, "enable MCDE, %d x %d format %p4cc\n", + mode->hdisplay, mode->vdisplay, &format); /* Clear any pending interrupts */ @@ -1481,7 +1479,7 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = { .update = mcde_display_update, .enable_vblank = mcde_display_enable_vblank, .disable_vblank = mcde_display_disable_vblank, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; int mcde_display_init(struct drm_device *drm) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 8b0de90156c6..54ab3a324752 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -522,7 +522,7 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, } void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); const struct drm_plane_helper_funcs *plane_helper_funcs = @@ -531,7 +531,7 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, if (!mtk_crtc->enabled) return; - plane_helper_funcs->atomic_update(plane, new_state); + plane_helper_funcs->atomic_update(plane, state); mtk_drm_crtc_hw_config(mtk_crtc); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 45cfd0a032de..cb9a36c48d4f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -21,6 +21,6 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, - struct drm_plane_state *plane_state); + struct drm_atomic_state *plane_state); #endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 92141a19681b..b5582dcf564c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -6,10 +6,10 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_fourcc.h> #include <drm/drm_atomic_uapi.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp_comp.h" @@ -77,12 +77,14 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane, } static int mtk_plane_atomic_async_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; int ret; - if (plane != state->crtc->cursor) + if (plane != new_plane_state->crtc->cursor) return -EINVAL; if (!plane->state) @@ -91,16 +93,16 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane, if (!plane->state->fb) return -EINVAL; - ret = mtk_drm_crtc_plane_check(state->crtc, plane, - to_mtk_plane_state(state)); + ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane, + to_mtk_plane_state(new_plane_state)); if (ret) return ret; - if (state->state) - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + if (state) + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_plane_state->crtc); else /* Special case for asynchronous cursor updates. */ - crtc_state = state->crtc->state; + crtc_state = new_plane_state->crtc->state; return drm_atomic_helper_check_plane_state(plane->state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, @@ -109,9 +111,11 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane, } static void mtk_plane_atomic_async_update(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *new_plane_state = to_mtk_plane_state(plane->state); plane->state->crtc_x = new_state->crtc_x; plane->state->crtc_y = new_state->crtc_y; @@ -122,9 +126,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane, plane->state->src_h = new_state->src_h; plane->state->src_w = new_state->src_w; swap(plane->state->fb, new_state->fb); - state->pending.async_dirty = true; + new_plane_state->pending.async_dirty = true; - mtk_drm_crtc_async_update(new_state->crtc, plane, new_state); + mtk_drm_crtc_async_update(new_state->crtc, plane, state); } static const struct drm_plane_funcs mtk_plane_funcs = { @@ -137,49 +141,56 @@ static const struct drm_plane_funcs mtk_plane_funcs = { }; static int mtk_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_framebuffer *fb = state->fb; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; struct drm_crtc_state *crtc_state; int ret; if (!fb) return 0; - if (WARN_ON(!state->crtc)) + if (WARN_ON(!new_plane_state->crtc)) return 0; - ret = mtk_drm_crtc_plane_check(state->crtc, plane, - to_mtk_plane_state(state)); + ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane, + to_mtk_plane_state(new_plane_state)); if (ret) return ret; - crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - return drm_atomic_helper_check_plane_state(state, crtc_state, + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true); } static void mtk_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - - state->pending.enable = false; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + mtk_plane_state->pending.enable = false; wmb(); /* Make sure the above parameter is set before update */ - state->pending.dirty = true; + mtk_plane_state->pending.dirty = true; } static void mtk_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - struct drm_crtc *crtc = plane->state->crtc; - struct drm_framebuffer *fb = plane->state->fb; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + struct drm_crtc *crtc = new_state->crtc; + struct drm_framebuffer *fb = new_state->fb; struct drm_gem_object *gem; struct mtk_drm_gem_obj *mtk_gem; unsigned int pitch, format; @@ -188,8 +199,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return; - if (!plane->state->visible) { - mtk_plane_atomic_disable(plane, old_state); + if (!new_state->visible) { + mtk_plane_atomic_disable(plane, state); return; } @@ -199,24 +210,24 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, pitch = fb->pitches[0]; format = fb->format->format; - addr += (plane->state->src.x1 >> 16) * fb->format->cpp[0]; - addr += (plane->state->src.y1 >> 16) * pitch; - - state->pending.enable = true; - state->pending.pitch = pitch; - state->pending.format = format; - state->pending.addr = addr; - state->pending.x = plane->state->dst.x1; - state->pending.y = plane->state->dst.y1; - state->pending.width = drm_rect_width(&plane->state->dst); - state->pending.height = drm_rect_height(&plane->state->dst); - state->pending.rotation = plane->state->rotation; + addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; + addr += (new_state->src.y1 >> 16) * pitch; + + mtk_plane_state->pending.enable = true; + mtk_plane_state->pending.pitch = pitch; + mtk_plane_state->pending.format = format; + mtk_plane_state->pending.addr = addr; + mtk_plane_state->pending.x = new_state->dst.x1; + mtk_plane_state->pending.y = new_state->dst.y1; + mtk_plane_state->pending.width = drm_rect_width(&new_state->dst); + mtk_plane_state->pending.height = drm_rect_height(&new_state->dst); + mtk_plane_state->pending.rotation = new_state->rotation; wmb(); /* Make sure the above parameters are set before update */ - state->pending.dirty = true; + mtk_plane_state->pending.dirty = true; } static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = mtk_plane_atomic_check, .atomic_update = mtk_plane_atomic_update, .atomic_disable = mtk_plane_atomic_disable, diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 42c5d3246cfc..453d8b4c5763 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -482,6 +482,16 @@ static int meson_probe_remote(struct platform_device *pdev, return count; } +static void meson_drv_shutdown(struct platform_device *pdev) +{ + struct meson_drm *priv = dev_get_drvdata(&pdev->dev); + struct drm_device *drm = priv->drm; + + DRM_DEBUG_DRIVER("\n"); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); +} + static int meson_drv_probe(struct platform_device *pdev) { struct component_match *match = NULL; @@ -553,6 +563,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = { static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, + .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c index 1ffbbecafa22..ed063152aecd 100644 --- a/drivers/gpu/drm/meson/meson_overlay.c +++ b/drivers/gpu/drm/meson/meson_overlay.c @@ -10,11 +10,11 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_device.h> +#include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_plane_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_fb_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_plane_helper.h> #include "meson_overlay.h" #include "meson_registers.h" @@ -165,18 +165,22 @@ struct meson_overlay { #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) static int meson_overlay_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; - if (!state->crtc) + if (!new_plane_state->crtc) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - return drm_atomic_helper_check_plane_state(state, crtc_state, + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, FRAC_16_16(1, 5), FRAC_16_16(5, 1), true, true); @@ -464,11 +468,12 @@ static void meson_overlay_setup_scaler_params(struct meson_drm *priv, } static void meson_overlay_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct meson_overlay *meson_overlay = to_meson_overlay(plane); - struct drm_plane_state *state = plane->state; - struct drm_framebuffer *fb = state->fb; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_state->fb; struct meson_drm *priv = meson_overlay->priv; struct drm_gem_cma_object *gem; unsigned long flags; @@ -476,7 +481,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, DRM_DEBUG_DRIVER("\n"); - interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; + interlace_mode = new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; spin_lock_irqsave(&priv->drm->event_lock, flags); @@ -717,7 +722,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, } static void meson_overlay_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct meson_overlay *meson_overlay = to_meson_overlay(plane); struct meson_drm *priv = meson_overlay->priv; @@ -742,7 +747,7 @@ static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = { .atomic_check = meson_overlay_atomic_check, .atomic_disable = meson_overlay_atomic_disable, .atomic_update = meson_overlay_atomic_update, - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static bool meson_overlay_format_mod_supported(struct drm_plane *plane, diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 35338ed18209..a18510dae4c8 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -16,8 +16,8 @@ #include <drm/drm_device.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include "meson_plane.h" @@ -71,14 +71,17 @@ struct meson_plane { #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) static int meson_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; - if (!state->crtc) + if (!new_plane_state->crtc) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -87,7 +90,8 @@ static int meson_plane_atomic_check(struct drm_plane *plane, * - Upscaling up to 5x, vertical and horizontal * - Final coordinates must match crtc size */ - return drm_atomic_helper_check_plane_state(state, crtc_state, + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, FRAC_16_16(1, 5), DRM_PLANE_HELPER_NO_SCALING, false, true); @@ -126,13 +130,14 @@ static u32 meson_g12a_afbcd_line_stride(struct meson_drm *priv) } static void meson_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct meson_plane *meson_plane = to_meson_plane(plane); - struct drm_plane_state *state = plane->state; - struct drm_rect dest = drm_plane_state_dest(state); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_rect dest = drm_plane_state_dest(new_state); struct meson_drm *priv = meson_plane->priv; - struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *fb = new_state->fb; struct drm_gem_cma_object *gem; unsigned long flags; int vsc_ini_rcv_num, vsc_ini_rpt_p0_num; @@ -245,7 +250,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, hf_bank_len = 4; vf_bank_len = 4; - if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { + if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { vsc_bot_rcv_num = 6; vsc_bot_rpt_p0_num = 2; } @@ -255,10 +260,10 @@ static void meson_plane_atomic_update(struct drm_plane *plane, hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1; vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1; - src_w = fixed16_to_int(state->src_w); - src_h = fixed16_to_int(state->src_h); - dst_w = state->crtc_w; - dst_h = state->crtc_h; + src_w = fixed16_to_int(new_state->src_w); + src_h = fixed16_to_int(new_state->src_h); + dst_w = new_state->crtc_w; + dst_h = new_state->crtc_h; /* * When the output is interlaced, the OSD must switch between @@ -267,7 +272,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, * But the vertical scaler can provide such funtionnality if * is configured for 2:1 scaling with interlace options enabled. */ - if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { + if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { dest.y1 /= 2; dest.y2 /= 2; dst_h /= 2; @@ -276,7 +281,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, hf_phase_step = ((src_w << 18) / dst_w) << 6; vf_phase_step = (src_h << 20) / dst_h; - if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) + if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) bot_ini_phase = ((vf_phase_step / 2) >> 4); else bot_ini_phase = 0; @@ -308,7 +313,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) | VSC_VERTICAL_SCALER_EN; - if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) + if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) priv->viu.osd_sc_v_ctrl0 |= VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) | VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) | @@ -343,11 +348,11 @@ static void meson_plane_atomic_update(struct drm_plane *plane, * e.g. +30x1920 would be (1919 << 16) | 30 */ priv->viu.osd1_blk0_cfg[1] = - ((fixed16_to_int(state->src.x2) - 1) << 16) | - fixed16_to_int(state->src.x1); + ((fixed16_to_int(new_state->src.x2) - 1) << 16) | + fixed16_to_int(new_state->src.x1); priv->viu.osd1_blk0_cfg[2] = - ((fixed16_to_int(state->src.y2) - 1) << 16) | - fixed16_to_int(state->src.y1); + ((fixed16_to_int(new_state->src.y2) - 1) << 16) | + fixed16_to_int(new_state->src.y1); priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1; priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1; @@ -391,7 +396,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, } static void meson_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct meson_plane *meson_plane = to_meson_plane(plane); struct meson_drm *priv = meson_plane->priv; @@ -417,7 +422,7 @@ static const struct drm_plane_helper_funcs meson_plane_helper_funcs = { .atomic_check = meson_plane_atomic_check, .atomic_disable = meson_plane_atomic_disable, .atomic_update = meson_plane_atomic_update, - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static bool meson_plane_format_mod_supported(struct drm_plane *plane, diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 1dfc42170059..cece3e57fb27 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -17,6 +17,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_print.h> @@ -706,13 +707,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) static int mga_g200er_set_plls(struct mga_device *mdev, long clock) { + static const unsigned int m_div_val[] = { 1, 2, 4, 8 }; unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; int testr, testn, testm, testo; unsigned int p, m, n; unsigned int computed, vco; int tmp; - const unsigned int m_div_val[] = { 1, 2, 4, 8 }; m = n = p = 0; vcomax = 1488000; @@ -1549,22 +1550,12 @@ mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe, static void mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, - struct drm_rect *clip) + struct drm_rect *clip, const struct dma_buf_map *map) { - struct drm_device *dev = &mdev->base; - struct dma_buf_map map; - void *vmap; - int ret; - - ret = drm_gem_shmem_vmap(fb->obj[0], &map); - if (drm_WARN_ON(dev, ret)) - return; /* BUG: SHMEM BO should always be vmapped */ - vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ + void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip); - drm_gem_shmem_vunmap(fb->obj[0], &map); - /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); mgag200_set_offset(mdev, fb); @@ -1580,6 +1571,7 @@ mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, struct mga_device *mdev = to_mga_device(dev); struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct drm_framebuffer *fb = plane_state->fb; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_rect fullscreen = { .x1 = 0, .x2 = fb->width, @@ -1608,7 +1600,7 @@ mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, mga_crtc_load_lut(crtc); mgag200_enable_display(mdev); - mgag200_handle_damage(mdev, fb, &fullscreen); + mgag200_handle_damage(mdev, fb, &fullscreen, &shadow_plane_state->map[0]); } static void @@ -1649,6 +1641,7 @@ mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_device *dev = plane->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_plane_state *state = plane->state; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_framebuffer *fb = state->fb; struct drm_rect damage; @@ -1656,7 +1649,7 @@ mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, return; if (drm_atomic_helper_damage_merged(old_state, state, &damage)) - mgag200_handle_damage(mdev, fb, &damage); + mgag200_handle_damage(mdev, fb, &damage, &shadow_plane_state->map[0]); } static const struct drm_simple_display_pipe_funcs @@ -1666,7 +1659,7 @@ mgag200_simple_display_pipe_funcs = { .disable = mgag200_simple_display_pipe_disable, .check = mgag200_simple_display_pipe_check, .update = mgag200_simple_display_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, }; static const uint32_t mgag200_simple_display_pipe_formats[] = { diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h index 346cc6ff3a36..7b9fcfe95c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h @@ -2367,6 +2367,8 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val) #define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80 +#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81 + #define REG_A5XX_UCHE_SVM_CNTL 0x00000e82 #define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87 diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index a5af223eaf50..7e553d3efeb2 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -222,7 +222,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) a5xx_preempt_trigger(gpu); } -static const struct { +static const struct adreno_five_hwcg_regs { u32 offset; u32 value; } a5xx_hwcg[] = { @@ -318,16 +318,124 @@ static const struct { {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} +}, a50x_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, +}, a512_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, }; void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - unsigned int i; + const struct adreno_five_hwcg_regs *regs; + unsigned int i, sz; + + if (adreno_is_a508(adreno_gpu)) { + regs = a50x_hwcg; + sz = ARRAY_SIZE(a50x_hwcg); + } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) { + regs = a512_hwcg; + sz = ARRAY_SIZE(a512_hwcg); + } else { + regs = a5xx_hwcg; + sz = ARRAY_SIZE(a5xx_hwcg); + } - for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) - gpu_write(gpu, a5xx_hwcg[i].offset, - state ? a5xx_hwcg[i].value : 0); + for (i = 0; i < sz; i++) + gpu_write(gpu, regs[i].offset, + state ? regs[i].value : 0); if (adreno_is_a540(adreno_gpu)) { gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0); @@ -538,11 +646,13 @@ static int a5xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + u32 regbit; int ret; gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); - if (adreno_is_a540(adreno_gpu)) + if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a540(adreno_gpu)) gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); /* Make all blocks contribute to the GPU BUSY perf counter */ @@ -604,29 +714,48 @@ static int a5xx_hw_init(struct msm_gpu *gpu) 0x00100000 + adreno_gpu->gmem - 1); gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); - if (adreno_is_a510(adreno_gpu)) { + if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) { gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); - gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); + if (adreno_is_a508(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); + else + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, - (0x200 << 11 | 0x200 << 22)); } else { gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); if (adreno_is_a530(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); - if (adreno_is_a540(adreno_gpu)) + else gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); + } + + if (adreno_is_a508(adreno_gpu)) + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x100 << 11 | 0x100 << 22)); + else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) || + adreno_is_a512(adreno_gpu)) + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x200 << 11 | 0x200 << 22)); + else gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); - } if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); - gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); + /* + * Disable the RB sampler datapath DP2 clock gating optimization + * for 1-SP GPUs, as it is enabled by default. + */ + if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || + adreno_is_a512(adreno_gpu)) + gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9)); + + /* Disable UCHE global filter as SP can invalidate/flush independently */ + gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29)); /* Enable USE_RETENTION_FLOPS */ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); @@ -653,10 +782,20 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); /* Set the highest bank bit */ - gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7); - gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1); if (adreno_is_a540(adreno_gpu)) - gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2); + regbit = 2; + else + regbit = 1; + + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7); + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1); + + if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a540(adreno_gpu)) + gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit); + + /* Disable All flat shading optimization (ALLFLATOPTDIS) */ + gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10)); /* Protect registers from the CP */ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); @@ -688,12 +827,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* VPC */ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8)); - gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16)); /* UCHE */ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); - if (adreno_is_a530(adreno_gpu) || adreno_is_a510(adreno_gpu)) + if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || + adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a530(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_PROTECT(17), ADRENO_PROTECT_RW(0x10000, 0x8000)); @@ -735,7 +876,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; - if (!adreno_is_a510(adreno_gpu)) + if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || + adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu))) a5xx_gpmu_ucode_init(gpu); ret = a5xx_ucode_init(gpu); @@ -1168,7 +1310,8 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) if (ret) return ret; - if (adreno_is_a510(adreno_gpu)) { + /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */ + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) { /* Halt the sp_input_clk at HM level */ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); a5xx_set_hwcg(gpu, true); @@ -1210,8 +1353,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) u32 mask = 0xf; int i, ret; - /* A510 has 3 XIN ports in VBIF */ - if (adreno_is_a510(adreno_gpu)) + /* A508, A510 have 3 XIN ports in VBIF */ + if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) mask = 0x7; /* Clear the VBIF pipe before shutting down */ @@ -1223,10 +1366,12 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) /* * Reset the VBIF before power collapse to avoid issue with FIFO - * entries + * entries on Adreno A510 and A530 (the others will tend to lock up) */ - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) { + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + } ret = msm_gpu_pm_suspend(gpu); if (ret) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index f176a6f3eff6..5ccc9da455a1 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -298,7 +298,7 @@ int a5xx_power_init(struct msm_gpu *gpu) int ret; /* Not all A5xx chips have a GPMU */ - if (adreno_is_a510(adreno_gpu)) + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) return 0; /* Set up the limits management */ @@ -330,7 +330,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) unsigned int *data, *ptr, *cmds; unsigned int cmds_size; - if (adreno_is_a510(adreno_gpu)) + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) return; if (a5xx_gpu->gpmu_bo) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index e6703ae98760..71c917f909af 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -134,7 +134,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (!gmu->legacy) { a6xx_hfi_set_freq(gmu, perf_index); - dev_pm_opp_set_bw(&gpu->pdev->dev, opp); + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); return; } @@ -158,7 +158,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (ret) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); - dev_pm_opp_set_bw(&gpu->pdev->dev, opp); + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); } @@ -245,37 +245,66 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) return ret; } +struct a6xx_gmu_oob_bits { + int set, ack, set_new, ack_new; + const char *name; +}; + +/* These are the interrupt / ack bits for each OOB request that are set + * in a6xx_gmu_set_oob and a6xx_clear_oob + */ +static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { + [GMU_OOB_GPU_SET] = { + .name = "GPU_SET", + .set = 16, + .ack = 24, + .set_new = 30, + .ack_new = 31, + }, + + [GMU_OOB_PERFCOUNTER_SET] = { + .name = "PERFCOUNTER", + .set = 17, + .ack = 25, + .set_new = 28, + .ack_new = 30, + }, + + [GMU_OOB_BOOT_SLUMBER] = { + .name = "BOOT_SLUMBER", + .set = 22, + .ack = 30, + }, + + [GMU_OOB_DCVS_SET] = { + .name = "GPU_DCVS", + .set = 23, + .ack = 31, + }, +}; + /* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { int ret; u32 val; int request, ack; - const char *name; - switch (state) { - case GMU_OOB_GPU_SET: - if (gmu->legacy) { - request = GMU_OOB_GPU_SET_REQUEST; - ack = GMU_OOB_GPU_SET_ACK; - } else { - request = GMU_OOB_GPU_SET_REQUEST_NEW; - ack = GMU_OOB_GPU_SET_ACK_NEW; - } - name = "GPU_SET"; - break; - case GMU_OOB_BOOT_SLUMBER: - request = GMU_OOB_BOOT_SLUMBER_REQUEST; - ack = GMU_OOB_BOOT_SLUMBER_ACK; - name = "BOOT_SLUMBER"; - break; - case GMU_OOB_DCVS_SET: - request = GMU_OOB_DCVS_REQUEST; - ack = GMU_OOB_DCVS_ACK; - name = "GPU_DCVS"; - break; - default: + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; + + if (gmu->legacy) { + request = a6xx_gmu_oob_bits[state].set; + ack = a6xx_gmu_oob_bits[state].ack; + } else { + request = a6xx_gmu_oob_bits[state].set_new; + ack = a6xx_gmu_oob_bits[state].ack_new; + if (!request || !ack) { + DRM_DEV_ERROR(gmu->dev, + "Invalid non-legacy GMU request %s\n", + a6xx_gmu_oob_bits[state].name); + return -EINVAL; + } } /* Trigger the equested OOB operation */ @@ -288,7 +317,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) if (ret) DRM_DEV_ERROR(gmu->dev, "Timeout waiting for GMU OOB set %s: 0x%x\n", - name, + a6xx_gmu_oob_bits[state].name, gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); /* Clear the acknowledge interrupt */ @@ -300,27 +329,17 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { - if (!gmu->legacy) { - WARN_ON(state != GMU_OOB_GPU_SET); - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_GPU_SET_CLEAR_NEW); + int bit; + + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; - } - switch (state) { - case GMU_OOB_GPU_SET: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_GPU_SET_CLEAR); - break; - case GMU_OOB_BOOT_SLUMBER: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); - break; - case GMU_OOB_DCVS_SET: - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, - 1 << GMU_OOB_DCVS_CLEAR); - break; - } + if (gmu->legacy) + bit = a6xx_gmu_oob_bits[state].ack; + else + bit = a6xx_gmu_oob_bits[state].ack_new; + + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit); } /* Enable CPU control of SPTP power power collapse */ @@ -866,7 +885,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) if (IS_ERR_OR_NULL(gpu_opp)) return; - dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp); + dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); dev_pm_opp_put(gpu_opp); } @@ -1072,7 +1091,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) a6xx_gmu_shutdown(gmu); /* Remove the bus vote */ - dev_pm_opp_set_bw(&gpu->pdev->dev, NULL); + dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); /* * Make sure the GX domain is off before turning off the GMU (CX) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index c6d2bced8e5d..71dfa60070cc 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -153,44 +153,27 @@ static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) */ enum a6xx_gmu_oob_state { + /* + * Let the GMU know that a boot or slumber operation has started. The value in + * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are + * doing + */ GMU_OOB_BOOT_SLUMBER = 0, + /* + * Let the GMU know to not turn off any GPU registers while the CPU is in a + * critical section + */ GMU_OOB_GPU_SET, + /* + * Set a new power level for the GPU when the CPU is doing frequency scaling + */ GMU_OOB_DCVS_SET, + /* + * Used to keep the GPU on for CPU-side reads of performance counters. + */ + GMU_OOB_PERFCOUNTER_SET, }; -/* These are the interrupt / ack bits for each OOB request that are set - * in a6xx_gmu_set_oob and a6xx_clear_oob - */ - -/* - * Let the GMU know that a boot or slumber operation has started. The value in - * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are - * doing - */ -#define GMU_OOB_BOOT_SLUMBER_REQUEST 22 -#define GMU_OOB_BOOT_SLUMBER_ACK 30 -#define GMU_OOB_BOOT_SLUMBER_CLEAR 30 - -/* - * Set a new power level for the GPU when the CPU is doing frequency scaling - */ -#define GMU_OOB_DCVS_REQUEST 23 -#define GMU_OOB_DCVS_ACK 31 -#define GMU_OOB_DCVS_CLEAR 31 - -/* - * Let the GMU know to not turn off any GPU registers while the CPU is in a - * critical section - */ -#define GMU_OOB_GPU_SET_REQUEST 16 -#define GMU_OOB_GPU_SET_ACK 24 -#define GMU_OOB_GPU_SET_CLEAR 24 - -#define GMU_OOB_GPU_SET_REQUEST_NEW 30 -#define GMU_OOB_GPU_SET_ACK_NEW 31 -#define GMU_OOB_GPU_SET_CLEAR_NEW 31 - - void a6xx_hfi_init(struct a6xx_gmu *gmu); int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state); void a6xx_hfi_stop(struct a6xx_gmu *gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 130661898546..ba8e9d3cf0fe 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -10,6 +10,7 @@ #include <linux/bitfield.h> #include <linux/devfreq.h> +#include <linux/nvmem-consumer.h> #include <linux/soc/qcom/llcc-qcom.h> #define GPU_PAS_ID 13 @@ -1117,7 +1118,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev, a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); - if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice)) + if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); } @@ -1169,14 +1170,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + static DEFINE_MUTEX(perfcounter_oob); + + mutex_lock(&perfcounter_oob); /* Force the GPU power on so we can read this register */ - a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO, REG_A6XX_RBBM_PERFCTR_CP_0_HI); - a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); + mutex_unlock(&perfcounter_oob); return 0; } @@ -1208,6 +1213,10 @@ static void a6xx_destroy(struct msm_gpu *gpu) a6xx_gmu_remove(a6xx_gpu); adreno_gpu_cleanup(adreno_gpu); + + if (a6xx_gpu->opp_table) + dev_pm_opp_put_supported_hw(a6xx_gpu->opp_table); + kfree(a6xx_gpu); } @@ -1240,6 +1249,50 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) } static struct msm_gem_address_space * +a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct iommu_domain *iommu; + struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; + u64 start, size; + + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + /* + * This allows GPU to set the bus attributes required to use system + * cache on behalf of the iommu page table walker. + */ + if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) + adreno_set_llc_attributes(iommu); + + mmu = msm_iommu_new(&pdev->dev, iommu); + if (IS_ERR(mmu)) { + iommu_domain_free(iommu); + return ERR_CAST(mmu); + } + + /* + * Use the aperture start or SZ_16M, whichever is greater. This will + * ensure that we align with the allocated pagetable range while still + * allowing room in the lower 32 bits for GMEM and whatnot + */ + start = max_t(u64, SZ_16M, iommu->geometry.aperture_start); + size = iommu->geometry.aperture_end - start + 1; + + aspace = msm_gem_address_space_create(mmu, "gpu", + start & GENMASK_ULL(48, 0), size); + + if (IS_ERR(aspace) && !IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + + return aspace; +} + +static struct msm_gem_address_space * a6xx_create_private_address_space(struct msm_gpu *gpu) { struct msm_mmu *mmu; @@ -1264,6 +1317,78 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); } +static u32 a618_get_speed_bin(u32 fuse) +{ + if (fuse == 0) + return 0; + else if (fuse == 169) + return 1; + else if (fuse == 174) + return 2; + + return UINT_MAX; +} + +static u32 fuse_to_supp_hw(struct device *dev, u32 revn, u32 fuse) +{ + u32 val = UINT_MAX; + + if (revn == 618) + val = a618_get_speed_bin(fuse); + + if (val == UINT_MAX) { + DRM_DEV_ERROR(dev, + "missing support for speed-bin: %u. Some OPPs may not be supported by hardware", + fuse); + return UINT_MAX; + } + + return (1 << val); +} + +static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu, + u32 revn) +{ + struct opp_table *opp_table; + struct nvmem_cell *cell; + u32 supp_hw = UINT_MAX; + void *buf; + + cell = nvmem_cell_get(dev, "speed_bin"); + /* + * -ENOENT means that the platform doesn't support speedbin which is + * fine + */ + if (PTR_ERR(cell) == -ENOENT) + return 0; + else if (IS_ERR(cell)) { + DRM_DEV_ERROR(dev, + "failed to read speed-bin. Some OPPs may not be supported by hardware"); + goto done; + } + + buf = nvmem_cell_read(cell, NULL); + if (IS_ERR(buf)) { + nvmem_cell_put(cell); + DRM_DEV_ERROR(dev, + "failed to read speed-bin. Some OPPs may not be supported by hardware"); + goto done; + } + + supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf)); + + kfree(buf); + nvmem_cell_put(cell); + +done: + opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1); + if (IS_ERR(opp_table)) + return PTR_ERR(opp_table); + + a6xx_gpu->opp_table = opp_table; + return 0; +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -1285,7 +1410,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, #endif - .create_address_space = adreno_iommu_create_address_space, + .create_address_space = a6xx_create_address_space, .create_private_address_space = a6xx_create_private_address_space, .get_rptr = a6xx_get_rptr, }, @@ -1325,6 +1450,12 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) a6xx_llc_slices_init(pdev, a6xx_gpu); + ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info->revn); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index e793d329e77b..ce0610c5256f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -33,6 +33,8 @@ struct a6xx_gpu { void *llc_slice; void *htw_llc_slice; bool have_mmu500; + + struct opp_table *opp_table; }; #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 12e75ba360f9..600d445fabe8 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -134,6 +134,41 @@ static const struct adreno_info gpulist[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a4xx_gpu_init, }, { + .rev = ADRENO_REV(5, 0, 8, ANY_ID), + .revn = 508, + .name = "A508", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_128K + SZ_8K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a508_zap.mdt", + }, { + .rev = ADRENO_REV(5, 0, 9, ANY_ID), + .revn = 509, + .name = "A509", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_256K + SZ_16K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + /* Adreno 509 uses the same ZAP as 512 */ + .zapfw = "a512_zap.mdt", + }, { .rev = ADRENO_REV(5, 1, 0, ANY_ID), .revn = 510, .name = "A510", @@ -149,6 +184,23 @@ static const struct adreno_info gpulist[] = { .inactive_period = 250, .init = a5xx_gpu_init, }, { + .rev = ADRENO_REV(5, 1, 2, ANY_ID), + .revn = 512, + .name = "A512", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_256K + SZ_16K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a512_zap.mdt", + }, { .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", @@ -168,7 +220,7 @@ static const struct adreno_info gpulist[] = { .init = a5xx_gpu_init, .zapfw = "a530_zap.mdt", }, { - .rev = ADRENO_REV(5, 4, 0, 2), + .rev = ADRENO_REV(5, 4, 0, ANY_ID), .revn = 540, .name = "A540", .fw = { diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f09175698827..0f184c3dd9d9 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -186,11 +186,18 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } +void adreno_set_llc_attributes(struct iommu_domain *iommu) +{ + struct io_pgtable_domain_attr pgtbl_cfg; + + pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; + iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); +} + struct msm_gem_address_space * adreno_iommu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct iommu_domain *iommu; struct msm_mmu *mmu; struct msm_gem_address_space *aspace; @@ -200,20 +207,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, if (!iommu) return NULL; - - if (adreno_is_a6xx(adreno_gpu)) { - struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - struct io_pgtable_domain_attr pgtbl_cfg; - /* - * This allows GPU to set the bus attributes required to use system - * cache on behalf of the iommu page table walker. - */ - if (!IS_ERR(a6xx_gpu->htw_llc_slice)) { - pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; - iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); - } - } - mmu = msm_iommu_new(&pdev->dev, iommu); if (IS_ERR(mmu)) { iommu_domain_free(iommu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index b3d9a333591b..ccac275aa7a2 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -197,11 +197,26 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) return gpu->revn == 430; } +static inline int adreno_is_a508(struct adreno_gpu *gpu) +{ + return gpu->revn == 508; +} + +static inline int adreno_is_a509(struct adreno_gpu *gpu) +{ + return gpu->revn == 509; +} + static inline int adreno_is_a510(struct adreno_gpu *gpu) { return gpu->revn == 510; } +static inline int adreno_is_a512(struct adreno_gpu *gpu) +{ + return gpu->revn == 512; +} + static inline int adreno_is_a530(struct adreno_gpu *gpu) { return gpu->revn == 530; @@ -212,11 +227,6 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu) return gpu->revn == 540; } -static inline bool adreno_is_a6xx(struct adreno_gpu *gpu) -{ - return ((gpu->revn < 700 && gpu->revn > 599)); -} - static inline int adreno_is_a618(struct adreno_gpu *gpu) { return gpu->revn == 618; @@ -278,6 +288,8 @@ struct msm_gem_address_space * adreno_iommu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev); +void adreno_set_llc_attributes(struct iommu_domain *iommu); + /* * For a5xx and a6xx targets load the zap shader that is used to pull the GPU * out of secure mode diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 56eb22554197..9607a7644d4b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -71,7 +71,6 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, { struct dpu_hw_mixer *lm = mixer->hw_lm; uint32_t blend_op; - struct drm_format_name_buf format_name; /* default to opaque blending */ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | @@ -87,9 +86,8 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, lm->ops.setup_blend_config(lm, pstate->stage, 0xFF, 0, blend_op); - DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n", - drm_get_format_name(format->base.pixel_format, &format_name), - format->alpha_enable, blend_op); + DPU_DEBUG("format:%p4cc, alpha_en:%u blend_op:0x%x\n", + &format->base.pixel_format, format->alpha_enable, blend_op); } static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) @@ -576,7 +574,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, * of those planes explicitly here prior to plane flush. */ drm_atomic_crtc_for_each_plane(plane, crtc) - dpu_plane_restore(plane); + dpu_plane_restore(plane, state); /* update performance setting before crtc kickoff */ dpu_core_perf_crtc_update(crtc, 1, false); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 5a056c1191df..b2be39b9144e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -4,8 +4,10 @@ */ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include <linux/delay.h> #include "dpu_encoder_phys.h" #include "dpu_hw_interrupts.h" +#include "dpu_hw_pingpong.h" #include "dpu_core_irq.h" #include "dpu_formats.h" #include "dpu_trace.h" @@ -35,6 +37,8 @@ #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000 +#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000 + static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc) { return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false; @@ -368,15 +372,12 @@ static void dpu_encoder_phys_cmd_tearcheck_config( tc_cfg.vsync_count = vsync_hz / (mode->vtotal * drm_mode_vrefresh(mode)); - /* enable external TE after kickoff to avoid premature autorefresh */ - tc_cfg.hw_vsync_mode = 0; - /* - * By setting sync_cfg_height to near max register value, we essentially - * disable dpu hw generated TE signal, since hw TE will arrive first. - * Only caveat is if due to error, we hit wrap-around. + * Set the sync_cfg_height to twice vtotal so that if we lose a + * TE event coming from the display TE pin we won't stall immediately */ - tc_cfg.sync_cfg_height = 0xFFF0; + tc_cfg.hw_vsync_mode = 1; + tc_cfg.sync_cfg_height = mode->vtotal * 2; tc_cfg.vsync_init_val = mode->vdisplay; tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START; tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE; @@ -580,6 +581,69 @@ static void dpu_encoder_phys_cmd_prepare_for_kickoff( atomic_read(&phys_enc->pending_kickoff_cnt)); } +static bool dpu_encoder_phys_cmd_is_ongoing_pptx( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_pp_vsync_info info; + + if (!phys_enc) + return false; + + phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info); + if (info.wr_ptr_line_count > 0 && + info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay) + return true; + + return false; +} + +static void dpu_encoder_phys_cmd_prepare_commit( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + int trial = 0; + + if (!phys_enc) + return; + if (!phys_enc->hw_pp) + return; + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return; + + /* If autorefresh is already disabled, we have nothing to do */ + if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL)) + return; + + /* + * If autorefresh is enabled, disable it and make sure it is safe to + * proceed with current frame commit/push. Sequence fallowed is, + * 1. Disable TE + * 2. Disable autorefresh config + * 4. Poll for frame transfer ongoing to be false + * 5. Enable TE back + */ + _dpu_encoder_phys_cmd_connect_te(phys_enc, false); + phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false); + + do { + udelay(DPU_ENC_MAX_POLL_TIMEOUT_US); + if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US) + > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) { + DPU_ERROR_CMDENC(cmd_enc, + "disable autorefresh failed\n"); + break; + } + + trial++; + } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc)); + + _dpu_encoder_phys_cmd_connect_te(phys_enc, true); + + DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), + "disabled autorefresh\n"); +} + static int _dpu_encoder_phys_cmd_wait_for_ctl_start( struct dpu_encoder_phys *phys_enc) { @@ -621,20 +685,15 @@ static int dpu_encoder_phys_cmd_wait_for_tx_complete( static int dpu_encoder_phys_cmd_wait_for_commit_done( struct dpu_encoder_phys *phys_enc) { - int rc = 0; struct dpu_encoder_phys_cmd *cmd_enc; cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); /* only required for master controller */ - if (dpu_encoder_phys_cmd_is_master(phys_enc)) - rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); - - /* required for both controllers */ - if (!rc && cmd_enc->serialize_wait4pp) - dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc); + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return 0; - return rc; + return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); } static int dpu_encoder_phys_cmd_wait_for_vblank( @@ -681,6 +740,7 @@ static void dpu_encoder_phys_cmd_trigger_start( static void dpu_encoder_phys_cmd_init_ops( struct dpu_encoder_phys_ops *ops) { + ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit; ops->is_master = dpu_encoder_phys_cmd_is_master; ops->mode_set = dpu_encoder_phys_cmd_mode_set; ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 90393fe9e59c..189f3533525c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -12,14 +12,17 @@ #define VIG_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ - BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\ + BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) #define VIG_SDM845_MASK \ - (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3)) #define VIG_SC7180_MASK \ - (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED4)) + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4)) + +#define VIG_SM8250_MASK \ + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE)) #define DMA_SDM845_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\ @@ -185,7 +188,7 @@ static const struct dpu_caps sm8150_dpu_caps = { static const struct dpu_caps sm8250_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */ + .qseed_type = DPU_SSPP_SCALER_QSEED3LITE, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_40, .has_src_split = true, @@ -444,6 +447,34 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), }; +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 = + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 = + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 = + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 = + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE); + +static const struct dpu_sspp_cfg sm8250_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK, + sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK, + sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), + SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK, + sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), + SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK, + sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), +}; + /************************************************************* * MIXER sub blocks config *************************************************************/ @@ -532,23 +563,28 @@ static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = { .len = 0x90, .version = 0x40000}, }; -#define DSPP_BLK(_name, _id, _base, _sblk) \ +#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0x1800, \ - .features = DSPP_SC7180_MASK, \ + .features = _mask, \ .sblk = _sblk \ } static const struct dpu_dspp_cfg sc7180_dspp[] = { - DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk), + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK, + &sc7180_dspp_sblk), }; static const struct dpu_dspp_cfg sm8150_dspp[] = { - DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk), - DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk), - DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk), - DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk), + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), }; /************************************************************* @@ -624,33 +660,33 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = { /************************************************************* * INTF sub blocks config *************************************************************/ -#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _features) \ +#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0x280, \ .features = _features, \ .type = _type, \ .controller_id = _ctrl_id, \ - .prog_fetch_lines_worst_case = 24 \ + .prog_fetch_lines_worst_case = _progfetch \ } static const struct dpu_intf_cfg sdm845_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SDM845_MASK), - INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SDM845_MASK), - INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SDM845_MASK), - INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SDM845_MASK), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK), + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK), }; static const struct dpu_intf_cfg sc7180_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK), - INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK), }; static const struct dpu_intf_cfg sm8150_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, INTF_SC7180_MASK), - INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, INTF_SC7180_MASK), - INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, INTF_SC7180_MASK), - INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, INTF_SC7180_MASK), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK), + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK), }; /************************************************************* @@ -969,9 +1005,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg) .mdp = sm8250_mdp, .ctl_count = ARRAY_SIZE(sm8150_ctl), .ctl = sm8150_ctl, - /* TODO: sspp qseed version differs from 845 */ - .sspp_count = ARRAY_SIZE(sdm845_sspp), - .sspp = sdm845_sspp, + .sspp_count = ARRAY_SIZE(sm8250_sspp), + .sspp = sm8250_sspp, .mixer_count = ARRAY_SIZE(sm8150_lm), .mixer = sm8150_lm, .dspp_count = ARRAY_SIZE(sm8150_dspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index eaef99db2d2f..ea4647d21a20 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -95,6 +95,7 @@ enum { * @DPU_SSPP_SRC Src and fetch part of the pipes, * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support + * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes * @DPU_SSPP_CSC, Support of Color space converion @@ -114,6 +115,7 @@ enum { DPU_SSPP_SRC = 0x1, DPU_SSPP_SCALER_QSEED2, DPU_SSPP_SCALER_QSEED3, + DPU_SSPP_SCALER_QSEED3LITE, DPU_SSPP_SCALER_QSEED4, DPU_SSPP_SCALER_RGB, DPU_SSPP_CSC, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c index bea4ab5c58c5..245a7a62b5c6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c @@ -23,6 +23,7 @@ #define PP_WR_PTR_IRQ 0x024 #define PP_OUT_LINE_COUNT 0x028 #define PP_LINE_COUNT 0x02C +#define PP_AUTOREFRESH_CONFIG 0x030 #define PP_FBC_MODE 0x034 #define PP_FBC_BUDGET_CTL 0x038 @@ -120,6 +121,29 @@ static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp, return 0; } +static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp, + u32 frame_count, bool enable) +{ + DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG, + enable ? (BIT(31) | frame_count) : 0); +} + +/* + * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW + * @pp: DPU pingpong structure + * @frame_count: Used to return the current frame count from hw + * + * Returns: True if autorefresh enabled, false if disabled. + */ +static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp, + u32 *frame_count) +{ + u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG); + if (frame_count != NULL) + *frame_count = val & 0xffff; + return !!((val & BIT(31)) >> 31); +} + static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp, u32 timeout_us) { @@ -228,6 +252,8 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c, c->ops.enable_tearcheck = dpu_hw_pp_enable_te; c->ops.connect_external_te = dpu_hw_pp_connect_external_te; c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info; + c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config; + c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config; c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr; c->ops.get_line_count = dpu_hw_pp_get_line_count; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index 6902b9b95c8e..845b9ce80e31 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -63,6 +63,8 @@ struct dpu_hw_dither_cfg { * @setup_tearcheck : program tear check values * @enable_tearcheck : enables tear check * @get_vsync_info : retries timing info of the panel + * @setup_autorefresh : configure and enable the autorefresh config + * @get_autorefresh : retrieve autorefresh config from hardware * @setup_dither : function to program the dither hw block * @get_line_count: obtain current vertical line counter */ @@ -95,6 +97,18 @@ struct dpu_hw_pingpong_ops { struct dpu_hw_pp_vsync_info *info); /** + * configure and enable the autorefresh config + */ + void (*setup_autorefresh)(struct dpu_hw_pingpong *pp, + u32 frame_count, bool enable); + + /** + * retrieve autorefresh config from hardware + */ + bool (*get_autorefresh)(struct dpu_hw_pingpong *pp, + u32 *frame_count); + + /** * poll until write pointer transmission starts * @Return: 0 on success, -ETIMEDOUT on timeout */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 2c2ca5335aa8..34d81aa16041 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -673,6 +673,7 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c, c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || + test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) || test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index 85b018a9b03c..fdfd4b46e2c6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -28,6 +28,7 @@ struct dpu_hw_pipe; #define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \ (1UL << DPU_SSPP_SCALER_QSEED2) | \ (1UL << DPU_SSPP_SCALER_QSEED3) | \ + (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \ (1UL << DPU_SSPP_SCALER_QSEED4)) /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c index 84e9875994a8..f94584c982cd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c @@ -59,6 +59,19 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE; #define QSEED3_SEP_LUT_SIZE \ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32)) +/* DPU_SCALER_QSEED3LITE */ +#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4 +#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5 +#define QSEED3LITE_COEF_LUT_CTRL 0x4C +#define QSEED3LITE_COEF_LUT_SWAP_BIT 0 +#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60 +#define QSEED3LITE_FILTERS 2 +#define QSEED3LITE_SEPARABLE_LUTS 10 +#define QSEED3LITE_LUT_SIZE 33 +#define QSEED3LITE_SEP_LUT_SIZE \ + (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32)) + + void dpu_reg_write(struct dpu_hw_blk_reg_map *c, u32 reg_off, u32 val, @@ -156,6 +169,57 @@ static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c, } +static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c, + struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset) +{ + int j, filter; + int config_lut = 0x0; + unsigned long lut_flags; + u32 lut_addr, lut_offset; + u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL}; + static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 }; + + DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight); + + if (!scaler3_cfg->sep_lut) + return; + + lut_flags = (unsigned long) scaler3_cfg->lut_flag; + if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) && + (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) { + lut[0] = scaler3_cfg->sep_lut + + scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE; + config_lut = 1; + } + if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) && + (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) { + lut[1] = scaler3_cfg->sep_lut + + scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE; + config_lut = 1; + } + + if (config_lut) { + for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) { + if (!lut[filter]) + continue; + lut_offset = 0; + lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter]; + for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) { + DPU_REG_WRITE(c, + lut_addr, + (lut[filter])[lut_offset++]); + lut_addr += 4; + } + } + } + + if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags)) + DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0)); + +} + static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c, struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset) { @@ -242,9 +306,12 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, op_mode |= BIT(8); } - if (scaler3_cfg->lut_flag) - _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, - scaler_offset); + if (scaler3_cfg->lut_flag) { + if (scaler_version < 0x2004) + _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset); + else + _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset); + } if (scaler_version == 0x1002) { phase_init = diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index 234eb7d65753..ff3cffde84cd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -97,6 +97,7 @@ struct dpu_hw_scaler3_de_cfg { * @ cir_lut: pointer to circular filter LUT * @ sep_lut: pointer to separable filter LUT * @ de: detail enhancer configuration + * @ dir_weight: Directional weight */ struct dpu_hw_scaler3_cfg { u32 enable; @@ -137,6 +138,8 @@ struct dpu_hw_scaler3_cfg { * Detail enhancer settings */ struct dpu_hw_scaler3_de_cfg de; + + u32 dir_weight; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c index cf867f3f7c36..b757054e1c23 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -30,7 +30,7 @@ #define VBIF_XIN_HALT_CTRL0 0x0200 #define VBIF_XIN_HALT_CTRL1 0x0204 #define VBIF_XINL_QOS_RP_REMAP_000 0x0550 -#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590 +#define VBIF_XINL_QOS_LVL_REMAP_000(v) (v < DPU_HW_VER_400 ? 0x570 : 0x0590) static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif, u32 *pnd_errors, u32 *src_errors) @@ -156,18 +156,19 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif, u32 xin_id, u32 level, u32 remap_level) { struct dpu_hw_blk_reg_map *c; - u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift; + u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift; if (!vbif) return; c = &vbif->hw; + reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(c->hwversion); reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8); reg_shift = (xin_id & 0x7) * 4; reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high); - reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high); + reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high); mask = 0x7 << reg_shift; @@ -178,7 +179,7 @@ static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif, reg_val_lvl |= (remap_level << reg_shift) & mask; DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val); - DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl); + DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl); } static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 374b0e8471e6..5a8e3e1fc48c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -749,7 +749,7 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, case DRM_MODE_ENCODER_TMDS: info.num_of_h_tiles = 1; break; - }; + } rc = dpu_encoder_setup(encoder->dev, encoder, &info); if (rc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index bc0231a50132..df7f3d3afd8b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -10,10 +10,11 @@ #include <linux/debugfs.h> #include <linux/dma-buf.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_damage_helper.h> #include <drm/drm_file.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include "msm_drv.h" #include "dpu_kms.h" @@ -892,7 +893,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane, * we can use msm_atomic_prepare_fb() instead of doing the * implicit fence and fb prepare by hand here. */ - drm_gem_fb_prepare_fb(plane, new_state); + drm_gem_plane_helper_prepare_fb(plane, new_state); if (pstate->aspace) { ret = msm_framebuffer_prepare(new_state->fb, @@ -950,44 +951,47 @@ static bool dpu_plane_validate_src(struct drm_rect *src, } static int dpu_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); int ret = 0, min_scale; struct dpu_plane *pdpu = to_dpu_plane(plane); - struct dpu_plane_state *pstate = to_dpu_plane_state(state); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); const struct drm_crtc_state *crtc_state = NULL; const struct dpu_format *fmt; struct drm_rect src, dst, fb_rect = { 0 }; uint32_t min_src_size, max_linewidth; - if (state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state->state, - state->crtc); + if (new_plane_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, - pdpu->pipe_sblk->maxdwnscale << 16, - true, true); + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + min_scale, + pdpu->pipe_sblk->maxdwnscale << 16, + true, true); if (ret) { DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); return ret; } - if (!state->visible) + if (!new_plane_state->visible) return 0; - src.x1 = state->src_x >> 16; - src.y1 = state->src_y >> 16; - src.x2 = src.x1 + (state->src_w >> 16); - src.y2 = src.y1 + (state->src_h >> 16); + src.x1 = new_plane_state->src_x >> 16; + src.y1 = new_plane_state->src_y >> 16; + src.x2 = src.x1 + (new_plane_state->src_w >> 16); + src.y2 = src.y1 + (new_plane_state->src_h >> 16); - dst = drm_plane_state_dest(state); + dst = drm_plane_state_dest(new_plane_state); - fb_rect.x2 = state->fb->width; - fb_rect.y2 = state->fb->height; + fb_rect.x2 = new_plane_state->fb->width; + fb_rect.y2 = new_plane_state->fb->height; max_linewidth = pdpu->catalog->caps->max_linewidth; - fmt = to_dpu_format(msm_framebuffer_format(state->fb)); + fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb)); min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; @@ -1237,23 +1241,24 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane) } static void dpu_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct dpu_plane *pdpu = to_dpu_plane(plane); - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); pdpu->is_error = false; DPU_DEBUG_PLANE(pdpu, "\n"); - if (!state->visible) { + if (!new_state->visible) { _dpu_plane_atomic_disable(plane); } else { dpu_plane_sspp_atomic_update(plane); } } -void dpu_plane_restore(struct drm_plane *plane) +void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state) { struct dpu_plane *pdpu; @@ -1266,8 +1271,7 @@ void dpu_plane_restore(struct drm_plane *plane) DPU_DEBUG_PLANE(pdpu, "\n"); - /* last plane state is same as current state */ - dpu_plane_atomic_update(plane, plane->state); + dpu_plane_atomic_update(plane, state); } static void dpu_plane_destroy(struct drm_plane *plane) @@ -1465,6 +1469,7 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane) pdpu->debugfs_root, &pdpu->debugfs_src); if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) || cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) { dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index 13a983fa8213..03b6365a750c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -88,7 +88,7 @@ void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl, * dpu_plane_restore - restore hw state if previously power collapsed * @plane: Pointer to drm plane structure */ -void dpu_plane_restore(struct drm_plane *plane); +void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state); /** * dpu_plane_flush - final plane operations before commit flush diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c index da3cc1d8c331..9aecca919f24 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c @@ -4,6 +4,7 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <drm/drm_atomic.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> @@ -106,23 +107,24 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane, static int mdp4_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { return 0; } static void mdp4_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); int ret; ret = mdp4_plane_mode_set(plane, - state->crtc, state->fb, - state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - state->src_x, state->src_y, - state->src_w, state->src_h); + new_state->crtc, new_state->fb, + new_state->crtc_x, new_state->crtc_y, + new_state->crtc_w, new_state->crtc_h, + new_state->src_x, new_state->src_y, + new_state->src_w, new_state->src_h); /* atomic_check should have ensured that this doesn't fail */ WARN_ON(ret < 0); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index df10c1ac7591..94ce62a26daf 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -177,7 +177,7 @@ static const struct mdp5_cfg_hw msm8x74v2_config = { [3] = INTF_HDMI, }, }, - .max_clk = 200000000, + .max_clk = 320000000, }; static const struct mdp5_cfg_hw apq8084_config = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 0c8f9f88301f..f5d71b274079 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, pp_done); - complete(&mdp5_crtc->pp_completion); + complete_all(&mdp5_crtc->pp_completion); } static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 83423092de2f..8c9f2f492178 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -5,6 +5,7 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <drm/drm_atomic.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_print.h> @@ -403,76 +404,84 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, } static int mdp5_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - crtc = state->crtc ? state->crtc : plane->state->crtc; + crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc; if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + crtc); if (WARN_ON(!crtc_state)) return -EINVAL; - return mdp5_plane_atomic_check_with_state(crtc_state, state); + return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state); } static void mdp5_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); DBG("%s: update", plane->name); - if (plane_enabled(state)) { + if (plane_enabled(new_state)) { int ret; ret = mdp5_plane_mode_set(plane, - state->crtc, state->fb, - &state->src, &state->dst); + new_state->crtc, new_state->fb, + &new_state->src, &new_state->dst); /* atomic_check should have ensured that this doesn't fail */ WARN_ON(ret < 0); } } static int mdp5_plane_atomic_async_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state); struct drm_crtc_state *crtc_state; int min_scale, max_scale; int ret; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_plane_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; if (!crtc_state->active) return -EINVAL; - mdp5_state = to_mdp5_plane_state(state); + mdp5_state = to_mdp5_plane_state(new_plane_state); /* don't use fast path if we don't have a hwpipe allocated yet */ if (!mdp5_state->hwpipe) return -EINVAL; /* only allow changing of position(crtc x/y or src x/y) in fast path */ - if (plane->state->crtc != state->crtc || - plane->state->src_w != state->src_w || - plane->state->src_h != state->src_h || - plane->state->crtc_w != state->crtc_w || - plane->state->crtc_h != state->crtc_h || + if (plane->state->crtc != new_plane_state->crtc || + plane->state->src_w != new_plane_state->src_w || + plane->state->src_h != new_plane_state->src_h || + plane->state->crtc_w != new_plane_state->crtc_w || + plane->state->crtc_h != new_plane_state->crtc_h || !plane->state->fb || - plane->state->fb != state->fb) + plane->state->fb != new_plane_state->fb) return -EINVAL; min_scale = FRAC_16_16(1, 8); max_scale = FRAC_16_16(8, 1); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min_scale, max_scale, true, true); if (ret) @@ -485,15 +494,17 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane, * also assign/unassign the hwpipe(s) tied to the plane. We avoid * taking the fast path for both these reasons. */ - if (state->visible != plane->state->visible) + if (new_plane_state->visible != plane->state->visible) return -EINVAL; return 0; } static void mdp5_plane_atomic_async_update(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_framebuffer *old_fb = plane->state->fb; plane->state->src_x = new_state->src_x; diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 19b35ae3e927..1c6e1d2b947c 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -336,7 +336,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ssize_t ret; int const aux_cmd_native_max = 16; int const aux_cmd_i2c_max = 128; - int const retry_count = 5; struct dp_aux_private *aux = container_of(dp_aux, struct dp_aux_private, dp_aux); @@ -378,12 +377,6 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, ret = dp_aux_cmd_fifo_tx(aux, msg); if (ret < 0) { - if (aux->native) { - aux->retry_cnt++; - if (!(aux->retry_cnt % retry_count)) - dp_catalog_aux_update_cfg(aux->catalog); - dp_catalog_aux_reset(aux->catalog); - } usleep_range(400, 500); /* at least 400us to next try */ goto unlock_exit; } diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 44f0c57798d0..b1a9b1b98f5f 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -190,6 +190,18 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) return 0; } +/** + * dp_catalog_aux_reset() - reset AUX controller + * + * @aux: DP catalog structure + * + * return: void + * + * This function reset AUX controller + * + * NOTE: reset AUX controller will also clear any pending HPD related interrupts + * + */ void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) { u32 aux_ctrl; @@ -483,6 +495,18 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, return 0; } +/** + * dp_catalog_ctrl_reset() - reset DP controller + * + * @dp_catalog: DP catalog structure + * + * return: void + * + * This function reset the DP controller + * + * NOTE: reset DP controller will also clear any pending HPD related interrupts + * + */ void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) { u32 sw_reset; diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 36b39c381b3f..1390f3547fde 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in, tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (!tu) - return + return; dp_panel_update_tu_timings(in, tu); @@ -1158,7 +1158,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) default: ret = -EINVAL; break; - }; + } if (!ret) DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate); @@ -1296,7 +1296,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, * transitioned to PUSH_IDLE. In order to start transmitting * a link training pattern, we have to first do soft reset. */ - dp_catalog_ctrl_reset(ctrl->catalog); ret = dp_ctrl_link_train(ctrl, cr, training_step); @@ -1365,7 +1364,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl) return ret; } -int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset) { struct dp_ctrl_private *ctrl; struct dp_io *dp_io; @@ -1382,6 +1381,9 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) ctrl->dp_ctrl.orientation = flip; + if (reset) + dp_catalog_ctrl_reset(ctrl->catalog); + dp_catalog_ctrl_phy_reset(ctrl->catalog); phy_init(phy); dp_catalog_ctrl_enable_irq(ctrl->catalog, true); @@ -1496,7 +1498,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) int training_step = DP_TRAINING_NONE; dp_ctrl_push_idle(&ctrl->dp_ctrl); - dp_catalog_ctrl_reset(ctrl->catalog); ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; @@ -1785,14 +1786,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) * Set up transfer unit values and set controller state to send * video. */ + reinit_completion(&ctrl->video_comp); + dp_ctrl_configure_source_params(ctrl); dp_catalog_ctrl_config_msa(ctrl->catalog, ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl)); - reinit_completion(&ctrl->video_comp); - dp_ctrl_setup_tr_unit(ctrl); dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index f60ba93c8678..a836bd358447 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -19,7 +19,7 @@ struct dp_ctrl { u32 pixel_rate; }; -int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip); +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset); void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 3bc7ed21de28..5a39da6e1eaf 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -350,7 +350,7 @@ end: return rc; } -static void dp_display_host_init(struct dp_display_private *dp) +static void dp_display_host_init(struct dp_display_private *dp, int reset) { bool flip = false; @@ -365,7 +365,7 @@ static void dp_display_host_init(struct dp_display_private *dp) dp_display_set_encoder_mode(dp); dp_power_init(dp->power, flip); - dp_ctrl_host_init(dp->ctrl, flip); + dp_ctrl_host_init(dp->ctrl, flip, reset); dp_aux_init(dp->aux); dp->core_initialized = true; } @@ -403,7 +403,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev) goto end; } - dp_display_host_init(dp); + dp_display_host_init(dp, false); /* * set sink to normal operation mode -- D0 @@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); /* signal the disconnect event early to ensure proper teardown */ - dp_display_handle_plugged_change(g_dp_display, false); reinit_completion(&dp->audio_comp); + dp_display_handle_plugged_change(g_dp_display, false); dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK, true); @@ -700,6 +700,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) return 0; } + if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) { + /* wait until ST_CONNECTED */ + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + ret = dp_display_usbpd_attention_cb(&dp->pdev->dev); if (ret == -ECONNRESET) { /* cable unplugged */ dp->core_initialized = false; @@ -890,6 +897,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) /* wait only if audio was enabled */ if (dp_display->audio_enabled) { + /* signal the disconnect event */ + reinit_completion(&dp->audio_comp); + dp_display_handle_plugged_change(dp_display, false); if (!wait_for_completion_timeout(&dp->audio_comp, HZ * 5)) DRM_ERROR("audio comp timeout\n"); @@ -1002,7 +1012,7 @@ int dp_display_get_test_bpp(struct msm_dp *dp) static void dp_display_config_hpd(struct dp_display_private *dp) { - dp_display_host_init(dp); + dp_display_host_init(dp, true); dp_catalog_ctrl_hpd_config(dp->catalog); /* Enable interrupt first time @@ -1256,7 +1266,7 @@ static int dp_pm_resume(struct device *dev) dp->hpd_state = ST_DISCONNECTED; /* turn on dp ctrl/phy */ - dp_display_host_init(dp); + dp_display_host_init(dp, true); dp_catalog_ctrl_hpd_config(dp->catalog); @@ -1439,7 +1449,7 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) state = dp_display->hpd_state; if (state == ST_DISPLAY_OFF) - dp_display_host_init(dp_display); + dp_display_host_init(dp_display, true); dp_display_enable(dp_display, 0); diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index d1780bcac8cc..9cc816663668 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -409,7 +409,6 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel) int dp_panel_init_panel_info(struct dp_panel *dp_panel) { - int rc = 0; struct drm_display_mode *drm_mode; drm_mode = &dp_panel->dp_mode.drm_mode; @@ -436,7 +435,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel) min_t(u32, dp_panel->dp_mode.bpp, 30)); DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp); - return rc; + return 0; } struct dp_panel *dp_panel_get(struct dp_panel_in *in) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index 1afb7c579dbb..eca86bf448f7 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .disable = dsi_20nm_phy_disable, .init = msm_dsi_phy_init_common, }, - .io_start = { 0xfd998300, 0xfd9a0300 }, + .io_start = { 0xfd998500, 0xfd9a0500 }, .num_dsi_phy = 2, }; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index e4e9bf04b736..de3b802ccd3d 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -172,9 +172,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll) multiplier = 1 << config->frac_bits; dec_multiple = div_u64(pll_freq * multiplier, divider); - div_u64_rem(dec_multiple, multiplier, &frac); - - dec = div_u64(dec_multiple, multiplier); + dec = div_u64_rem(dec_multiple, multiplier, &frac); if (pll_freq <= 1900000000UL) regs->pll_prop_gain_rate = 8; @@ -306,7 +304,8 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll) reg->frac_div_start_mid); pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); - pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, + reg->pll_lockdet_rate); pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10); pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS, @@ -345,6 +344,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) { + struct device *dev = &pll->pdev->dev; int rc; u32 status = 0; u32 const delay_us = 100; @@ -357,8 +357,8 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) delay_us, timeout_us); if (rc) - pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", - pll->id, status); + DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n", + pll->id, status); return rc; } @@ -405,6 +405,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct device *dev = &pll_10nm->pdev->dev; int rc; dsi_pll_enable_pll_bias(pll_10nm); @@ -413,7 +414,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0); if (rc) { - pr_err("vco_set_rate failed, rc=%d\n", rc); + DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc); return rc; } @@ -430,7 +431,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) /* Check for PLL lock */ rc = dsi_pll_10nm_lock_status(pll_10nm); if (rc) { - pr_err("PLL(%d) lock failed\n", pll_10nm->id); + DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id); goto error; } @@ -483,6 +484,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, { struct msm_dsi_pll *pll = hw_clk_to_pll(hw); struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct dsi_pll_config *config = &pll_10nm->pll_configuration; void __iomem *base = pll_10nm->mmio; u64 ref_clk = pll_10nm->vco_ref_clk_rate; u64 vco_rate = 0x0; @@ -503,9 +505,8 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, /* * TODO: * 1. Assumes prescaler is disabled - * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) */ - multiplier = 1 << 18; + multiplier = 1 << config->frac_bits; pll_freq = dec * (ref_clk * 2); tmp64 = (ref_clk * 2 * frac); pll_freq += div_u64(tmp64, multiplier); diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 6a326761dc4a..e9c6544b6a01 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -5,7 +5,7 @@ */ #include <drm/drm_atomic_uapi.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_vblank.h> #include "msm_atomic_trace.h" @@ -22,7 +22,7 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, if (!new_state->fb) return 0; - drm_gem_fb_prepare_fb(plane, new_state); + drm_gem_plane_helper_prepare_fb(plane, new_state); return msm_framebuffer_prepare(new_state->fb, kms->aspace); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 108c405e03dd..94525ac76d4e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *obj, uint64_t *iova) { + struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx = file->driver_priv; - if (!ctx->aspace) + if (!priv->gpu) return -EINVAL; /* diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index a588b0388d27..f091c1e164fa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -987,8 +987,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: */ - if (msm_obj->pages) - kvfree(msm_obj->pages); + kvfree(msm_obj->pages); put_iova_vmas(obj); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index d04c349d8112..5480852bdeda 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, submit->cmd[i].idx = submit_cmd.submit_idx; submit->cmd[i].nr_relocs = submit_cmd.nr_relocs; + userptr = u64_to_user_ptr(submit_cmd.relocs); + sz = array_size(submit_cmd.nr_relocs, sizeof(struct drm_msm_gem_submit_reloc)); /* check for overflow: */ diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index d8151a89e163..4735251a394d 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -157,6 +157,7 @@ struct msm_kms { * from the crtc's pending_timer close to end of the frame: */ struct mutex commit_lock[MAX_CRTCS]; + struct lock_class_key commit_lock_keys[MAX_CRTCS]; unsigned pending_crtc_mask; struct msm_pending_timer pending_timers[MAX_CRTCS]; }; @@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms, { unsigned i, ret; - for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) - mutex_init(&kms->commit_lock[i]); + for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) { + lockdep_register_key(&kms->commit_lock_keys[i]); + __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]", + &kms->commit_lock_keys[i]); + } kms->funcs = funcs; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 3e1bb0aefb87..300e7bab0f43 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -21,8 +21,8 @@ #include <drm/drm_encoder.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> @@ -402,12 +402,14 @@ static const struct drm_encoder_funcs mxsfb_encoder_funcs = { */ static int mxsfb_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *plane_state) + struct drm_atomic_state *state) { + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, + plane); struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev); struct drm_crtc_state *crtc_state; - crtc_state = drm_atomic_get_new_crtc_state(plane_state->state, + crtc_state = drm_atomic_get_new_crtc_state(state, &mxsfb->crtc); return drm_atomic_helper_check_plane_state(plane_state, crtc_state, @@ -417,7 +419,7 @@ static int mxsfb_plane_atomic_check(struct drm_plane *plane, } static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_pstate) + struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev); dma_addr_t paddr; @@ -428,10 +430,13 @@ static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane, } static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_pstate) + struct drm_atomic_state *state) { + struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state, + plane); struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev); - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state, + plane); dma_addr_t paddr; u32 ctrl; @@ -460,7 +465,7 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane, ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255); - switch (state->fb->format->format) { + switch (new_pstate->fb->format->format) { case DRM_FORMAT_XRGB4444: ctrl |= AS_CTRL_FORMAT_RGB444 | AS_CTRL_ALPHA_CTRL_OVERRIDE; break; @@ -495,13 +500,13 @@ static bool mxsfb_format_mod_supported(struct drm_plane *plane, } static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, }; static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_overlay_atomic_update, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c index 302d4e6fc52f..788db043a342 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c @@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) NVVAL(NV507C, SET_CONVERSION, OFS, 0x64)); } else { PUSH_MTHD(push, NV507C, SET_PROCESSING, - NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE)); + NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE), + + SET_CONVERSION, + NVVAL(NV507C, SET_CONVERSION, GAIN, 0) | + NVVAL(NV507C, SET_CONVERSION, OFS, 0)); } PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8); diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c index 18d34096f125..093d4ba6910e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c @@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) NVVAL(NV827C, SET_CONVERSION, OFS, 0x64)); } else { PUSH_MTHD(push, NV827C, SET_PROCESSING, - NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE)); + NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE), + + SET_CONVERSION, + NVVAL(NV827C, SET_CONVERSION, GAIN, 0) | + NVVAL(NV827C, SET_CONVERSION, OFS, 0)); } PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index c9e1575a06a2..196612addfd6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2693,6 +2693,14 @@ nv50_display_create(struct drm_device *dev) else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; + if (disp->disp->object.oclass >= GK104_DISP) { + dev->mode_config.cursor_width = 256; + dev->mode_config.cursor_height = 256; + } else { + dev->mode_config.cursor_width = 64; + dev->mode_config.cursor_height = 64; + } + /* create crtc objects to represent the hw heads */ if (disp->disp->object.oclass >= GV100_DISP) crtcs = nvif_rd32(&device->object, 0x610060) & 0xff; diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c index a5d827403660..ea9f8667305e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c @@ -22,6 +22,7 @@ #include "head.h" #include "core.h" +#include "nvif/push.h" #include <nvif/push507c.h> #include <nvhw/class/cl917d.h> @@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh) return 0; } +static int +head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 5); + if (ret) + return ret; + + PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i), + NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) | + NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND), + + HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8); + + PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle); + return 0; +} + int head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw, struct nv50_head_atom *asyh) @@ -101,7 +127,7 @@ head917d = { .core_clr = head907d_core_clr, .curs_layout = head917d_curs_layout, .curs_format = head507d_curs_format, - .curs_set = head907d_curs_set, + .curs_set = head917d_curs_set, .curs_clr = head907d_curs_clr, .base = head917d_base, .ovly = head907d_ovly, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index ce451242f79e..0cb1f9d848d3 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -30,6 +30,7 @@ #include <nvhw/class/cl507e.h> #include <nvhw/class/clc37e.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> @@ -434,12 +435,15 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, } static int -nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) +nv50_wndw_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct nouveau_drm *drm = nouveau_drm(plane->dev); struct nv50_wndw *wndw = nv50_wndw(plane); struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state); - struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); + struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); struct nv50_head_atom *harm = NULL, *asyh = NULL; bool modeset = false; int ret; @@ -702,6 +706,11 @@ nv50_wndw_init(struct nv50_wndw *wndw) nvif_notify_get(&wndw->notify); } +static const u64 nv50_cursor_format_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + int nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, enum drm_plane_type type, const char *name, int index, @@ -713,6 +722,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, struct nvif_mmu *mmu = &drm->client.mmu; struct nv50_disp *disp = nv50_disp(dev); struct nv50_wndw *wndw; + const u64 *format_modifiers; int nformat; int ret; @@ -728,10 +738,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, for (nformat = 0; format[nformat]; nformat++); - ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, - format, nformat, - nouveau_display(dev)->format_modifiers, - type, "%s-%d", name, index); + if (type == DRM_PLANE_TYPE_CURSOR) + format_modifiers = nv50_cursor_format_modifiers; + else + format_modifiers = nouveau_display(dev)->format_modifiers; + + ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat, + format_modifiers, type, "%s-%d", name, index); if (ret) { kfree(*pwndw); *pwndw = NULL; diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h index 2a2612d6e1e0..fb223723a38a 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h @@ -66,6 +66,10 @@ #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300) +#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 #define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index 57d4f457a7d4..0b86c44878e0 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -60,37 +60,33 @@ struct nv_device_time_v0 { #define NV_DEVICE_INFO_UNIT (0xffffffffULL << 32) #define NV_DEVICE_INFO(n) ((n) | (0x00000000ULL << 32)) -#define NV_DEVICE_FIFO(n) ((n) | (0x00000001ULL << 32)) +#define NV_DEVICE_HOST(n) ((n) | (0x00000001ULL << 32)) -/* This will be returned for unsupported queries. */ +/* This will be returned in the mthd field for unsupported queries. */ #define NV_DEVICE_INFO_INVALID ~0ULL -/* These return a mask of available engines of particular type. */ -#define NV_DEVICE_INFO_ENGINE_SW NV_DEVICE_INFO(0x00000000) -#define NV_DEVICE_INFO_ENGINE_GR NV_DEVICE_INFO(0x00000001) -#define NV_DEVICE_INFO_ENGINE_MPEG NV_DEVICE_INFO(0x00000002) -#define NV_DEVICE_INFO_ENGINE_ME NV_DEVICE_INFO(0x00000003) -#define NV_DEVICE_INFO_ENGINE_CIPHER NV_DEVICE_INFO(0x00000004) -#define NV_DEVICE_INFO_ENGINE_BSP NV_DEVICE_INFO(0x00000005) -#define NV_DEVICE_INFO_ENGINE_VP NV_DEVICE_INFO(0x00000006) -#define NV_DEVICE_INFO_ENGINE_CE NV_DEVICE_INFO(0x00000007) -#define NV_DEVICE_INFO_ENGINE_SEC NV_DEVICE_INFO(0x00000008) -#define NV_DEVICE_INFO_ENGINE_MSVLD NV_DEVICE_INFO(0x00000009) -#define NV_DEVICE_INFO_ENGINE_MSPDEC NV_DEVICE_INFO(0x0000000a) -#define NV_DEVICE_INFO_ENGINE_MSPPP NV_DEVICE_INFO(0x0000000b) -#define NV_DEVICE_INFO_ENGINE_MSENC NV_DEVICE_INFO(0x0000000c) -#define NV_DEVICE_INFO_ENGINE_VIC NV_DEVICE_INFO(0x0000000d) -#define NV_DEVICE_INFO_ENGINE_SEC2 NV_DEVICE_INFO(0x0000000e) -#define NV_DEVICE_INFO_ENGINE_NVDEC NV_DEVICE_INFO(0x0000000f) -#define NV_DEVICE_INFO_ENGINE_NVENC NV_DEVICE_INFO(0x00000010) - +/* Returns the number of available runlists. */ +#define NV_DEVICE_HOST_RUNLISTS NV_DEVICE_HOST(0x00000000) /* Returns the number of available channels. */ -#define NV_DEVICE_FIFO_CHANNELS NV_DEVICE_FIFO(0x00000000) - -/* Returns a mask of available runlists. */ -#define NV_DEVICE_FIFO_RUNLISTS NV_DEVICE_FIFO(0x00000001) +#define NV_DEVICE_HOST_CHANNELS NV_DEVICE_HOST(0x00000001) -/* These return a mask of engines available on a particular runlist. */ -#define NV_DEVICE_FIFO_RUNLIST_ENGINES(n) ((n) + NV_DEVICE_FIFO(0x00000010)) -#define NV_DEVICE_FIFO_RUNLIST_ENGINES__SIZE 64 +/* Returns a mask of available engine types on runlist(data). */ +#define NV_DEVICE_HOST_RUNLIST_ENGINES NV_DEVICE_HOST(0x00000100) +#define NV_DEVICE_HOST_RUNLIST_ENGINES_SW 0x00000001 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_GR 0x00000002 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_MPEG 0x00000004 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_ME 0x00000008 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_CIPHER 0x00000010 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_BSP 0x00000020 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_VP 0x00000040 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_CE 0x00000080 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC 0x00000100 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD 0x00000200 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC 0x00000400 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP 0x00000800 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_MSENC 0x00001000 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_VIC 0x00002000 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC2 0x00004000 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVDEC 0x00008000 +#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVENC 0x00010000 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/fifo.h b/drivers/gpu/drm/nouveau/include/nvif/fifo.h index e9468c9f9abf..d351ac890ca1 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvif/fifo.h @@ -2,15 +2,15 @@ #define __NVIF_FIFO_H__ #include <nvif/device.h> -/* Returns mask of runlists that support a NV_DEVICE_INFO_ENGINE_* type. */ +/* Returns mask of runlists that support a NV_DEVICE_INFO_RUNLIST_ENGINES_* type. */ u64 nvif_fifo_runlist(struct nvif_device *, u64 engine); /* CE-supporting runlists (excluding GRCE, if others exist). */ static inline u64 nvif_fifo_runlist_ce(struct nvif_device *device) { - u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR); - u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_CE); + u64 runmgr = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR); + u64 runmce = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_CE); if (runmce && !(runmce &= ~runmgr)) runmce = runmgr; return runmce; diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h index 168d7694ede5..6d3a8a3d2087 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/push.h +++ b/drivers/gpu/drm/nouveau/include/nvif/push.h @@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push) } while(0) #endif -#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \ - PUSH_##o##_HDR((p), s, mA, (c)+(n)); \ - PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ +#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \ + PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \ + PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ } while(0) -#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ - PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ + PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ - PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ + PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ - PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ + PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ - PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ + PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ - PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ + PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ - PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ + PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ - PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ + PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ - PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ + PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ - PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ + PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_1D(X,o,p,s,mA,dA) \ - PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA)) -#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ - PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ - PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ - PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ - PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) +#define PUSH_1D(X,o,p,s,mA,dA) \ + PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA)) +#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ + PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ + PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ + PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ + PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \ - PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \ - PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \ - PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \ - PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \ - X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \ + X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \ - PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \ - X##mI, (dI), \ - X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \ + X##mI, (dI), \ + X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) -#define PUSH_1P(X,o,p,s,mA,dp,ds) \ - PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp)) -#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ - PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \ - X##mA, (dA)) -#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ - PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \ - X##mB, (dB), \ - X##mA, (dA)) +#define PUSH_1P(X,o,p,s,mA,dp,ds) \ + PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp)) +#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ + PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \ + X##mA, (dA)) +#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ + PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL #define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \ diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index c920939a1467..a18b6cfda07e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -3,79 +3,7 @@ #define __NVKM_DEVICE_H__ #include <core/oclass.h> #include <core/event.h> - -enum nvkm_devidx { - NVKM_SUBDEV_PCI, - NVKM_SUBDEV_VBIOS, - NVKM_SUBDEV_DEVINIT, - NVKM_SUBDEV_TOP, - NVKM_SUBDEV_IBUS, - NVKM_SUBDEV_GPIO, - NVKM_SUBDEV_I2C, - NVKM_SUBDEV_FUSE, - NVKM_SUBDEV_MXM, - NVKM_SUBDEV_MC, - NVKM_SUBDEV_BUS, - NVKM_SUBDEV_TIMER, - NVKM_SUBDEV_INSTMEM, - NVKM_SUBDEV_FB, - NVKM_SUBDEV_LTC, - NVKM_SUBDEV_MMU, - NVKM_SUBDEV_BAR, - NVKM_SUBDEV_FAULT, - NVKM_SUBDEV_ACR, - NVKM_SUBDEV_PMU, - NVKM_SUBDEV_VOLT, - NVKM_SUBDEV_ICCSENSE, - NVKM_SUBDEV_THERM, - NVKM_SUBDEV_CLK, - NVKM_SUBDEV_GSP, - - NVKM_ENGINE_BSP, - - NVKM_ENGINE_CE0, - NVKM_ENGINE_CE1, - NVKM_ENGINE_CE2, - NVKM_ENGINE_CE3, - NVKM_ENGINE_CE4, - NVKM_ENGINE_CE5, - NVKM_ENGINE_CE6, - NVKM_ENGINE_CE7, - NVKM_ENGINE_CE8, - NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE8, - - NVKM_ENGINE_CIPHER, - NVKM_ENGINE_DISP, - NVKM_ENGINE_DMAOBJ, - NVKM_ENGINE_FIFO, - NVKM_ENGINE_GR, - NVKM_ENGINE_IFB, - NVKM_ENGINE_ME, - NVKM_ENGINE_MPEG, - NVKM_ENGINE_MSENC, - NVKM_ENGINE_MSPDEC, - NVKM_ENGINE_MSPPP, - NVKM_ENGINE_MSVLD, - - NVKM_ENGINE_NVENC0, - NVKM_ENGINE_NVENC1, - NVKM_ENGINE_NVENC2, - NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2, - - NVKM_ENGINE_NVDEC0, - NVKM_ENGINE_NVDEC1, - NVKM_ENGINE_NVDEC2, - NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2, - - NVKM_ENGINE_PM, - NVKM_ENGINE_SEC, - NVKM_ENGINE_SEC2, - NVKM_ENGINE_SW, - NVKM_ENGINE_VIC, - NVKM_ENGINE_VP, - - NVKM_SUBDEV_NR -}; +enum nvkm_subdev_type; enum nvkm_device_type { NVKM_DEVICE_PCI, @@ -102,7 +30,6 @@ struct nvkm_device { struct nvkm_event event; - u64 disable_mask; u32 debug; const struct nvkm_device_chip *chip; @@ -130,58 +57,16 @@ struct nvkm_device { struct notifier_block nb; } acpi; - struct nvkm_acr *acr; - struct nvkm_bar *bar; - struct nvkm_bios *bios; - struct nvkm_bus *bus; - struct nvkm_clk *clk; - struct nvkm_devinit *devinit; - struct nvkm_fault *fault; - struct nvkm_fb *fb; - struct nvkm_fuse *fuse; - struct nvkm_gpio *gpio; - struct nvkm_gsp *gsp; - struct nvkm_i2c *i2c; - struct nvkm_subdev *ibus; - struct nvkm_iccsense *iccsense; - struct nvkm_instmem *imem; - struct nvkm_ltc *ltc; - struct nvkm_mc *mc; - struct nvkm_mmu *mmu; - struct nvkm_subdev *mxm; - struct nvkm_pci *pci; - struct nvkm_pmu *pmu; - struct nvkm_therm *therm; - struct nvkm_timer *timer; - struct nvkm_top *top; - struct nvkm_volt *volt; - - struct nvkm_engine *bsp; - struct nvkm_engine *ce[9]; - struct nvkm_engine *cipher; - struct nvkm_disp *disp; - struct nvkm_dma *dma; - struct nvkm_fifo *fifo; - struct nvkm_gr *gr; - struct nvkm_engine *ifb; - struct nvkm_engine *me; - struct nvkm_engine *mpeg; - struct nvkm_engine *msenc; - struct nvkm_engine *mspdec; - struct nvkm_engine *msppp; - struct nvkm_engine *msvld; - struct nvkm_nvenc *nvenc[3]; - struct nvkm_nvdec *nvdec[3]; - struct nvkm_pm *pm; - struct nvkm_engine *sec; - struct nvkm_sec2 *sec2; - struct nvkm_sw *sw; - struct nvkm_engine *vic; - struct nvkm_engine *vp; +#define NVKM_LAYOUT_ONCE(type,data,ptr) data *ptr; +#define NVKM_LAYOUT_INST(type,data,ptr,cnt) data *ptr[cnt]; +#include <core/layout.h> +#undef NVKM_LAYOUT_INST +#undef NVKM_LAYOUT_ONCE + struct list_head subdev; }; -struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index); -struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index); +struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst); +struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst); struct nvkm_device_func { struct nvkm_device_pci *(*pci)(struct nvkm_device *); @@ -202,55 +87,15 @@ struct nvkm_device_quirk { struct nvkm_device_chip { const char *name; - - int (*acr )(struct nvkm_device *, int idx, struct nvkm_acr **); - int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **); - int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **); - int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **); - int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **); - int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **); - int (*fault )(struct nvkm_device *, int idx, struct nvkm_fault **); - int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **); - int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **); - int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **); - int (*gsp )(struct nvkm_device *, int idx, struct nvkm_gsp **); - int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **); - int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **); - int (*iccsense)(struct nvkm_device *, int idx, struct nvkm_iccsense **); - int (*imem )(struct nvkm_device *, int idx, struct nvkm_instmem **); - int (*ltc )(struct nvkm_device *, int idx, struct nvkm_ltc **); - int (*mc )(struct nvkm_device *, int idx, struct nvkm_mc **); - int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **); - int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **); - int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **); - int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **); - int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **); - int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **); - int (*top )(struct nvkm_device *, int idx, struct nvkm_top **); - int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **); - - int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*ce[9] )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **); - int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **); - int (*fifo )(struct nvkm_device *, int idx, struct nvkm_fifo **); - int (*gr )(struct nvkm_device *, int idx, struct nvkm_gr **); - int (*ifb )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*me )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*mpeg )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*msenc )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_nvenc **); - int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **); - int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); - int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **); - int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **); - int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **); +#define NVKM_LAYOUT_ONCE(type,data,ptr,...) \ + struct { \ + u32 inst; \ + int (*ctor)(struct nvkm_device *, enum nvkm_subdev_type, int inst, data **); \ + } ptr; +#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A) +#include <core/layout.h> +#undef NVKM_LAYOUT_INST +#undef NVKM_LAYOUT_ONCE }; struct nvkm_device *nvkm_device_find(u64 name); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h index c6b401a6ea23..e58923b67d74 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h @@ -6,12 +6,18 @@ struct nvkm_fifo_chan; struct nvkm_fb_tile; +extern const struct nvkm_subdev_func nvkm_engine; + struct nvkm_engine { const struct nvkm_engine_func *func; struct nvkm_subdev subdev; spinlock_t lock; - int usecount; + struct { + refcount_t refcount; + struct mutex mutex; + bool enabled; + } use; }; struct nvkm_engine_func { @@ -42,9 +48,10 @@ struct nvkm_engine_func { }; int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *, - int index, bool enable, struct nvkm_engine *); + enum nvkm_subdev_type, int inst, bool enable, struct nvkm_engine *); int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *, - int index, bool enable, struct nvkm_engine **); + enum nvkm_subdev_type, int, bool enable, struct nvkm_engine **); + struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *); void nvkm_engine_unref(struct nvkm_engine **); void nvkm_engine_tile(struct nvkm_engine *, int region); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h index ce98efd4b209..070462be35d9 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h @@ -8,6 +8,7 @@ struct nvkm_enum { const char *name; const void *data; u32 data2; + int inst; }; const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h index 3981cb106aae..fd9a3f9a518e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h @@ -21,11 +21,11 @@ void nvkm_falcon_v1_disable(struct nvkm_falcon *); void gp102_sec2_flcn_bind_context(struct nvkm_falcon *, struct nvkm_memory *); int gp102_sec2_flcn_enable(struct nvkm_falcon *); -#define FLCN_PRINTK(t,f,fmt,a...) do { \ - if (nvkm_subdev_name[(f)->owner->index] != (f)->name) \ - nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \ - else \ - nvkm_##t((f)->owner, fmt"\n", ##a); \ +#define FLCN_PRINTK(t,f,fmt,a...) do { \ + if ((f)->owner->name != (f)->name) \ + nvkm_##t((f)->owner, "%s: "fmt"\n", (f)->name, ##a); \ + else \ + nvkm_##t((f)->owner, fmt"\n", ##a); \ } while(0) #define FLCN_DBG(f,fmt,a...) FLCN_PRINTK(debug, (f), fmt, ##a) #define FLCN_ERR(f,fmt,a...) FLCN_PRINTK(error, (f), fmt, ##a) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h new file mode 100644 index 000000000000..7afe1579b20f --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PCI , struct nvkm_pci , pci) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VBIOS , struct nvkm_bios , bios) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_DEVINIT , struct nvkm_devinit , devinit) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PRIVRING, struct nvkm_subdev , privring) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GPIO , struct nvkm_gpio , gpio) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_I2C , struct nvkm_i2c , i2c) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FUSE , struct nvkm_fuse , fuse) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MXM , struct nvkm_subdev , mxm) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MC , struct nvkm_mc , mc) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BUS , struct nvkm_bus , bus) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TIMER , struct nvkm_timer , timer) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_INSTMEM , struct nvkm_instmem , imem) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FB , struct nvkm_fb , fb) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_LTC , struct nvkm_ltc , ltc) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_MMU , struct nvkm_mmu , mmu) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_BAR , struct nvkm_bar , bar) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FAULT , struct nvkm_fault , fault) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ACR , struct nvkm_acr , acr) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PMU , struct nvkm_pmu , pmu) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VOLT , struct nvkm_volt , volt) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_ICCSENSE, struct nvkm_iccsense, iccsense) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_THERM , struct nvkm_therm , therm) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_CLK , struct nvkm_clk , clk) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp) +NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3) +NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla) + +NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp) +NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_FIFO , struct nvkm_fifo , fifo) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_GR , struct nvkm_gr , gr) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_IFB , struct nvkm_engine , ifb) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_ME , struct nvkm_engine , me) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_MPEG , struct nvkm_engine , mpeg) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSENC , struct nvkm_engine , msenc) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld) +NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 5) +NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM , struct nvkm_pm , pm) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_VIC , struct nvkm_engine , vic) +NVKM_LAYOUT_ONCE(NVKM_ENGINE_VP , struct nvkm_engine , vp) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h index 76288c682e9e..1665738948fb 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h @@ -3,13 +3,25 @@ #define __NVKM_SUBDEV_H__ #include <core/device.h> +enum nvkm_subdev_type { +#define NVKM_LAYOUT_ONCE(t,s,p,...) t, +#define NVKM_LAYOUT_INST NVKM_LAYOUT_ONCE +#include <core/layout.h> +#undef NVKM_LAYOUT_INST +#undef NVKM_LAYOUT_ONCE + NVKM_SUBDEV_NR +}; + struct nvkm_subdev { const struct nvkm_subdev_func *func; struct nvkm_device *device; - enum nvkm_devidx index; - struct mutex mutex; + enum nvkm_subdev_type type; + int inst; + char name[16]; u32 debug; + struct list_head head; + void **pself; bool oneinit; }; @@ -23,11 +35,12 @@ struct nvkm_subdev_func { void (*intr)(struct nvkm_subdev *); }; -extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR]; -int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *, - int index, struct nvkm_subdev **); +extern const char *nvkm_subdev_type[NVKM_SUBDEV_NR]; +int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *, enum nvkm_subdev_type, + int inst, struct nvkm_subdev **); void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *, - int index, struct nvkm_subdev *); + enum nvkm_subdev_type, int inst, struct nvkm_subdev *); +void nvkm_subdev_disable(struct nvkm_device *, enum nvkm_subdev_type, int inst); void nvkm_subdev_del(struct nvkm_subdev **); int nvkm_subdev_preinit(struct nvkm_subdev *); int nvkm_subdev_init(struct nvkm_subdev *); @@ -38,10 +51,8 @@ void nvkm_subdev_intr(struct nvkm_subdev *); /* subdev logging */ #define nvkm_printk_(s,l,p,f,a...) do { \ const struct nvkm_subdev *_subdev = (s); \ - if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) { \ - dev_##p(_subdev->device->dev, "%s: "f, \ - nvkm_subdev_name[_subdev->index], ##a); \ - } \ + if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) \ + dev_##p(_subdev->device->dev, "%s: "f, _subdev->name, ##a); \ } while(0) #define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a) #define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL, crit, f, ##a) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h index f938f024db81..d5530faf025e 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h @@ -2,5 +2,5 @@ #ifndef __NVKM_BSP_H__ #define __NVKM_BSP_H__ #include <engine/xtensa.h> -int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **); +int g84_bsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h index 86f420f4630b..cfd2da8e66fe 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h @@ -3,13 +3,13 @@ #define __NVKM_CE_H__ #include <engine/falcon.h> -int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); -int tu102_ce_new(struct nvkm_device *, int, struct nvkm_engine **); +int gt215_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gf100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gk104_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gm107_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gm200_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gp100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gp102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gv100_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int tu102_ce_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h index 66c5c5e27520..9da9176bbbbf 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h @@ -2,5 +2,5 @@ #ifndef __NVKM_CIPHER_H__ #define __NVKM_CIPHER_H__ #include <core/engine.h> -int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **); +int g84_cipher_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h index 0f6fa6631a19..d08d3337ba0d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h @@ -17,25 +17,28 @@ struct nvkm_disp { struct nvkm_event hpd; struct nvkm_event vblank; - struct nvkm_oproxy *client; + struct { + spinlock_t lock; + struct nvkm_oproxy *object; + } client; }; -int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int mcp77_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int mcp89_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **); -int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **); +int nv04_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int nv50_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int g84_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gt200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int g94_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int mcp77_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gt215_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int mcp89_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gf119_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gk104_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gk110_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gm107_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gm200_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gp100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); +int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h index 2e12cdb6bb93..a003da39fd13 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h @@ -23,9 +23,9 @@ struct nvkm_dma { struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object); -int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **); -int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **); -int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **); -int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **); -int gv100_dma_new(struct nvkm_device *, int, struct nvkm_dma **); +int nv04_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **); +int nv50_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **); +int gf100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **); +int gf119_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **); +int gv100_dma_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_dma **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h index 27c1f868552c..306125d17ece 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h @@ -64,7 +64,7 @@ int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *); void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *); int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *, - int index, bool enable, u32 addr, struct nvkm_engine **); + enum nvkm_subdev_type, int inst, bool enable, u32 addr, struct nvkm_engine **); struct nvkm_falcon_func { struct { diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index b335f3a1e66d..54fab7cc36c1 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -7,6 +7,7 @@ struct nvkm_fault_data; #define NVKM_FIFO_CHID_NR 4096 +#define NVKM_FIFO_ENGN_NR 16 struct nvkm_fifo_engn { struct nvkm_object *object; @@ -17,7 +18,7 @@ struct nvkm_fifo_engn { struct nvkm_fifo_chan { const struct nvkm_fifo_chan_func *func; struct nvkm_fifo *fifo; - u64 engines; + u32 engm; struct nvkm_object object; struct list_head head; @@ -29,7 +30,7 @@ struct nvkm_fifo_chan { u64 addr; u32 size; - struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR]; + struct nvkm_fifo_engn engn[NVKM_FIFO_ENGN_NR]; }; struct nvkm_fifo { @@ -40,6 +41,7 @@ struct nvkm_fifo { int nr; struct list_head chan; spinlock_t lock; + struct mutex mutex; struct nvkm_event uevent; /* async user trigger */ struct nvkm_event cevent; /* channel creation event */ @@ -57,22 +59,22 @@ nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags); struct nvkm_fifo_chan * nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags); -int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gk110_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); -int tu102_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); +int nv04_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int nv10_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int nv17_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int nv40_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int nv50_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int g84_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gf100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gk104_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gk110_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gk208_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gk20a_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gm107_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gm200_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gm20b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h index 1530c81f86a2..b28b752ffaa2 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h @@ -14,44 +14,44 @@ int nvkm_gr_ctxsw_pause(struct nvkm_device *); int nvkm_gr_ctxsw_resume(struct nvkm_device *); u32 nvkm_gr_ctxsw_inst(struct nvkm_device *); -int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gp104_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gp108_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int gv100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); -int tu102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int nv04_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv10_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv15_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv17_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv20_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv25_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv2a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv30_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv34_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv35_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv40_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv44_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int nv50_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int g84_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gt200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int mcp79_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gt215_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int mcp89_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gf100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gf104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gf108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gf110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gf117_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gf119_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gk104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gk110_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gk110b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gk208_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gk20a_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gm107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gm200_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gm20b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gp100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gp102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gp104_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gp107_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gp108_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); +int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h index 8585a31f5943..f137f277935f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h @@ -2,9 +2,9 @@ #ifndef __NVKM_MPEG_H__ #define __NVKM_MPEG_H__ #include <core/engine.h> -int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **); -int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **); -int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **); -int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **); -int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **); +int nv31_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int nv40_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int nv44_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int nv50_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int g84_mpeg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h index 83bb2fcb2cbf..ac8f08ce183c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h @@ -2,8 +2,8 @@ #ifndef __NVKM_MSPDEC_H__ #define __NVKM_MSPDEC_H__ #include <engine/falcon.h> -int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **); -int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **); -int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **); -int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **); +int g98_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gt215_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gf100_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gk104_mspdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h index 69e09fd96e0c..81c2b6f0ad84 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h @@ -2,7 +2,7 @@ #ifndef __NVKM_MSPPP_H__ #define __NVKM_MSPPP_H__ #include <engine/falcon.h> -int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **); -int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **); -int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **); +int g98_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gt215_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gf100_msppp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h index 9e11cefc9649..2d5fa961ba66 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h @@ -2,9 +2,9 @@ #ifndef __NVKM_MSVLD_H__ #define __NVKM_MSVLD_H__ #include <engine/falcon.h> -int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **); -int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **); -int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **); -int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **); -int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **); +int g98_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gt215_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int mcp89_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gf100_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); +int gk104_msvld_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h index 1b3183e31606..97bd3092f68a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h @@ -11,5 +11,5 @@ struct nvkm_nvdec { struct nvkm_falcon falcon; }; -int gm107_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **); +int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h index 33e6ba8adc8d..1a259c5c9a71 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h @@ -11,5 +11,5 @@ struct nvkm_nvenc { struct nvkm_falcon falcon; }; -int gm107_nvenc_new(struct nvkm_device *, int, struct nvkm_nvenc **); +int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h index 4d754e7650d9..af89d46ea360 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h @@ -7,20 +7,23 @@ struct nvkm_pm { const struct nvkm_pm_func *func; struct nvkm_engine engine; - struct nvkm_object *perfmon; + struct { + spinlock_t lock; + struct nvkm_object *object; + } client; struct list_head domains; struct list_head sources; u32 sequence; }; -int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **); -int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **); +int nv40_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int nv50_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int g84_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int gt200_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int gt215_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int gf100_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int gf108_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int gf117_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); +int gk104_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h index f14e98a8a0ca..37ed7ab8d050 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h @@ -2,5 +2,5 @@ #ifndef __NVKM_SEC_H__ #define __NVKM_SEC_H__ #include <engine/falcon.h> -int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **); +int g98_sec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h index 34dc765648d5..06264c840eae 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h @@ -18,7 +18,7 @@ struct nvkm_sec2 { bool initmsg_received; }; -int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); -int gp108_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); -int tu102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **); +int gp102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **); +int gp108_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **); +int tu102_sec2_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sec2 **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h index 2e91769e3ee2..b1a53ffbfdef 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h @@ -12,8 +12,8 @@ struct nvkm_sw { bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data); -int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **); -int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **); -int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **); -int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **); +int nv04_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **); +int nv10_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **); +int nv50_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **); +int gf100_sw_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_sw **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h index 8984415b2a3d..1bab26858538 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h @@ -2,5 +2,5 @@ #ifndef __NVKM_VP_H__ #define __NVKM_VP_H__ #include <engine/xtensa.h> -int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **); +int g84_vp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h index fbf27b2293a9..3083a5866a55 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h @@ -13,7 +13,7 @@ struct nvkm_xtensa { }; int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *, - int index, bool enable, u32 addr, struct nvkm_engine **); + enum nvkm_subdev_type, int, bool enable, u32 addr, struct nvkm_engine **); struct nvkm_xtensa_func { u32 fifo_val; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h index 836d8b932822..c0b254f7f0b5 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/acr.h @@ -59,12 +59,12 @@ struct nvkm_acr { bool nvkm_acr_managed_falcon(struct nvkm_device *, enum nvkm_acr_lsf_id); int nvkm_acr_bootstrap_falcons(struct nvkm_device *, unsigned long mask); -int gm200_acr_new(struct nvkm_device *, int, struct nvkm_acr **); -int gm20b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); -int gp102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); -int gp108_acr_new(struct nvkm_device *, int, struct nvkm_acr **); -int gp10b_acr_new(struct nvkm_device *, int, struct nvkm_acr **); -int tu102_acr_new(struct nvkm_device *, int, struct nvkm_acr **); +int gm200_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **); +int gm20b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **); +int gp102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **); +int gp108_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **); +int gp10b_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **); +int tu102_acr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_acr **); struct nvkm_acr_lsfw { const struct nvkm_acr_lsf_func *func; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h index 14b09f7e46a5..4f07836ab984 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h @@ -23,11 +23,11 @@ void nvkm_bar_bar2_reset(struct nvkm_device *); struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *); void nvkm_bar_flush(struct nvkm_bar *); -int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **); -int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **); -int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **); -int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **); -int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **); -int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **); -int tu102_bar_new(struct nvkm_device *, int, struct nvkm_bar **); +int nv50_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); +int g84_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); +int gf100_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); +int gk20a_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); +int gm107_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); +int gm20b_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); +int tu102_bar_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h index f2860f8e0c2e..b61cfb077533 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h @@ -30,5 +30,5 @@ u8 nvbios_rd08(struct nvkm_bios *, u32 addr); u16 nvbios_rd16(struct nvkm_bios *, u32 addr); u32 nvbios_rd32(struct nvkm_bios *, u32 addr); -int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **); +int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h index ae9ad6c034fb..2ac03bbc6133 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h @@ -18,9 +18,9 @@ void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data); void nvkm_hwsq_wait_vblank(struct nvkm_hwsq *); void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec); -int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **); -int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **); -int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **); -int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **); -int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **); +int nv04_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **); +int nv31_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **); +int nv50_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **); +int g94_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **); +int gf100_bus_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bus **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h index bf937e7dfd77..05b99c9e9a26 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h @@ -125,14 +125,14 @@ int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait); int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel); int nvkm_clk_tstate(struct nvkm_clk *, u8 temperature); -int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **); -int gm20b_clk_new(struct nvkm_device *, int, struct nvkm_clk **); +int nv04_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int nv40_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int nv50_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int g84_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int mcp77_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int gt215_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); +int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h index 50cc7c05eac4..848b5d9ce705 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h @@ -14,23 +14,22 @@ struct nvkm_devinit { u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr); int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz); void nvkm_devinit_meminit(struct nvkm_devinit *); -u64 nvkm_devinit_disable(struct nvkm_devinit *); -int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable); +int nvkm_devinit_post(struct nvkm_devinit *); -int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); -int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **); +int nv04_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int nv05_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int nv10_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int nv1a_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int nv20_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int nv50_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int g84_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int g98_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int gt215_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int mcp89_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int gf100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int gm107_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int gm200_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int gv100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int tu102_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); +int ga100_devinit_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h index a513c16ab105..581458ad38e0 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h @@ -30,8 +30,8 @@ struct nvkm_fault_data { u8 reason; }; -int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); -int gp10b_fault_new(struct nvkm_device *, int, struct nvkm_fault **); -int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **); -int tu102_fault_new(struct nvkm_device *, int, struct nvkm_fault **); +int gp100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **); +int gp10b_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **); +int gv100_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **); +int tu102_fault_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fault **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 2ecd52aec1d1..ef6a6297148c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -36,7 +36,11 @@ struct nvkm_fb { struct nvkm_blob vpr_scrubber; struct nvkm_ram *ram; - struct nvkm_mm tags; + + struct { + struct mutex mutex; /* protects mm and nvkm_memory::tags */ + struct nvkm_mm mm; + } tags; struct { struct nvkm_fb_tile region[16]; @@ -54,40 +58,40 @@ void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size, void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *); void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *); -int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); -int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **); +int nv04_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv10_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv1a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv20_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv25_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv30_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv35_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv36_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv40_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv41_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv44_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv46_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv47_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv49_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv4e_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int nv50_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int g84_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gt215_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int mcp77_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int mcp89_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gf100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gf108_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gk104_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gk110_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gk20a_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gm107_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gm200_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gm20b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); +int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **); #include <subdev/bios.h> #include <subdev/bios/ramcfg.h> @@ -128,6 +132,7 @@ struct nvkm_ram { #define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3) struct nvkm_mm vram; u64 stolen; + struct mutex mutex; int ranks; int parts; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h index 00111c34311e..dabbef0ac96c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h @@ -11,7 +11,7 @@ struct nvkm_fuse { u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr); -int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **); -int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **); -int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **); +int nv50_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **); +int gf100_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **); +int gm107_fuse_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fuse **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h index cdcce5ece6ff..0e46ea1fe972 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h @@ -32,10 +32,10 @@ int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line, int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state); int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line); -int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **); -int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **); -int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **); -int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **); -int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **); -int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **); +int nv10_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **); +int nv50_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **); +int g94_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **); +int gf119_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **); +int gk104_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **); +int ga102_gpio_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gpio **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 06db67610a50..cf42a59d4e58 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -9,5 +9,5 @@ struct nvkm_gsp { struct nvkm_falcon falcon; }; -int gv100_gsp_new(struct nvkm_device *, int, struct nvkm_gsp **); +int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h index 640f649ce497..146e13292203 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h @@ -85,15 +85,15 @@ struct nvkm_i2c { struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int); struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int); -int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); -int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); +int nv04_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int nv4e_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int nv50_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int g94_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int gf117_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int gf119_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int gk104_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int gk110_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); +int gm200_i2c_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_i2c **); static inline int nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h deleted file mode 100644 index db791411eaa8..000000000000 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_IBUS_H__ -#define __NVKM_IBUS_H__ -#include <core/subdev.h> - -int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); -int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); -int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); -int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); -int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); -int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); -#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h index f483dcd7cd1c..7400d62dcbec 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h @@ -14,6 +14,6 @@ struct nvkm_iccsense { u32 power_w_crit; }; -int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **); +int gf100_iccsense_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **); int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index c74ab7c31d05..f967b97d163c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h @@ -13,6 +13,11 @@ struct nvkm_instmem { struct list_head boot; u32 reserved; + /* <=nv4x: protects NV_PRAMIN/BAR2 MM + * >=nv50: protects BAR2 MM & LRU + */ + struct mutex mutex; + struct nvkm_memory *vbios; struct nvkm_ramht *ramht; struct nvkm_memory *ramro; @@ -25,8 +30,8 @@ int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero, struct nvkm_memory **); -int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **); -int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **); -int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **); -int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **); +int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); +int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); +int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); +int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h index d76f60d7d29a..d32a326a9290 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h @@ -13,6 +13,7 @@ struct nvkm_ltc { u32 ltc_nr; u32 lts_nr; + struct mutex mutex; /* serialises CBC operations */ u32 num_tags; u32 tag_base; struct nvkm_memory *tag_ram; @@ -33,12 +34,11 @@ int nvkm_ltc_zbc_stencil_get(struct nvkm_ltc *, int index, const u32); void nvkm_ltc_invalidate(struct nvkm_ltc *); void nvkm_ltc_flush(struct nvkm_ltc *); -int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); -int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); -int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); -int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); -int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); -int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); -int gp102_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); -int gp10b_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); +int gf100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **); +int gk104_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **); +int gm107_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **); +int gm200_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **); +int gp100_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **); +int gp102_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **); +int gp10b_ltc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_ltc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h index e45ca4583967..cb86a56e68d4 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h @@ -8,29 +8,29 @@ struct nvkm_mc { struct nvkm_subdev subdev; }; -void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx); -void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx); -bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx); -void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx); +void nvkm_mc_enable(struct nvkm_device *, enum nvkm_subdev_type, int); +void nvkm_mc_disable(struct nvkm_device *, enum nvkm_subdev_type, int); +bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_subdev_type, int); +void nvkm_mc_reset(struct nvkm_device *, enum nvkm_subdev_type, int); void nvkm_mc_intr(struct nvkm_device *, bool *handled); void nvkm_mc_intr_unarm(struct nvkm_device *); void nvkm_mc_intr_rearm(struct nvkm_device *); -void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable); +void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int, bool enable); void nvkm_mc_unk260(struct nvkm_device *, u32 data); -int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int nv17_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int g84_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **); -int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); +int nv04_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int nv11_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int nv17_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int nv44_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int nv50_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int g84_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int g98_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int gt215_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int gf100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int gk104_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int gk20a_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int gp100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int gp10b_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int tu102_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); +int ga100_mc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 54cdcb017518..0911e73f7424 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -117,22 +117,24 @@ struct nvkm_mmu { struct list_head list; } ptc, ptp; + struct mutex mutex; /* serialises mmu invalidations */ + struct nvkm_device_oclass user; }; -int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); -int tu102_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); +int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); +int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h index 78df1e9def05..7d4132a17d0f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h @@ -3,5 +3,5 @@ #define __NVKM_MXM_H__ #include <core/subdev.h> -int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **); +int nv50_mxm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h index 4803a4fad4a2..74c19bdfb757 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h @@ -39,17 +39,17 @@ void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data); u32 nvkm_pci_mask(struct nvkm_pci *, u16 addr, u32 mask, u32 value); void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow); -int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **); -int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); +int nv04_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int nv40_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int nv46_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int nv4c_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int g84_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int g92_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int g94_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); +int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **); /* pcie functions */ int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h index 5ff6d1f8985a..f57a3a5a288d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h @@ -18,6 +18,7 @@ struct nvkm_pmu { struct completion wpr_ready; struct { + struct mutex mutex; u32 base; u32 size; } send; @@ -39,18 +40,18 @@ int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process, void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable); bool nvkm_pmu_fan_controlled(struct nvkm_device *); -int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gm200_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); -int gp10b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); +int gt215_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gf100_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gf119_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gk104_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gk110_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gk208_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gk20a_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gm107_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gm200_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gm20b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gp102_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); +int gp10b_pmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pmu **); /* interface to MEMX process running on PMU */ struct nvkm_memx; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h new file mode 100644 index 000000000000..e1399f8a90ad --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/privring.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_PRIVRING_H__ +#define __NVKM_PRIVRING_H__ +#include <core/subdev.h> + +int gf100_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **); +int gf117_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **); +int gk104_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **); +int gk20a_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **); +int gm200_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **); +int gp10b_privring_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_subdev **); +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h index 62c34f98c930..bd04f49272a6 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h @@ -107,13 +107,13 @@ void nvkm_therm_clkgate_init(struct nvkm_therm *, void nvkm_therm_clkgate_enable(struct nvkm_therm *); void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool); -int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **); -int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **); +int nv40_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int nv50_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int g84_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int gt215_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int gf119_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int gk104_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int gm107_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int gm200_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); +int gp100_therm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_therm **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h index d06dcbe1faa6..439a3f72b0d7 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h @@ -76,8 +76,8 @@ s64 nvkm_timer_wait_test(struct nvkm_timer_wait *); #define nvkm_wait_msec(d,m,addr,mask,data) \ nvkm_wait_usec((d), (m) * 1000, (addr), (mask), (data)) -int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **); -int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **); -int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **); -int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **); +int nv04_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **); +int nv40_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **); +int nv41_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **); +int gk20a_timer_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_timer **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h index 7be0e7e7bd77..ee75c5524c43 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h @@ -9,13 +9,24 @@ struct nvkm_top { struct list_head device; }; -u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_devidx); -u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx); -u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs); -u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx); -int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx); -enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault); -enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn); +struct nvkm_top_device { + enum nvkm_subdev_type type; + int inst; + u32 addr; + int fault; + int engine; + int runlist; + int reset; + int intr; + struct list_head head; +}; + +u32 nvkm_top_addr(struct nvkm_device *, enum nvkm_subdev_type, int); +u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_subdev_type, int); +u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_subdev_type, int); +int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_subdev_type, int); +struct nvkm_subdev *nvkm_top_fault(struct nvkm_device *, int fault); -int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **); +int gk104_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **); +int ga100_top_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_top **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h index 45053a280930..0be86d5f0158 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h @@ -36,10 +36,10 @@ int nvkm_volt_get(struct nvkm_volt *); int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp, int condition); -int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **); -int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **); -int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **); -int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **); -int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **); -int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **); +int nv40_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **); +int gf100_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **); +int gf117_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **); +int gk104_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **); +int gk20a_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **); +int gm20b_volt_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_volt **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index f08b31d84d4d..0a9334deffe2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -269,19 +269,19 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { if (init->fb_ctxdma_handle == ~0) { switch (init->tt_ctxdma_handle) { - case 0x01: engine = NV_DEVICE_INFO_ENGINE_GR ; break; - case 0x02: engine = NV_DEVICE_INFO_ENGINE_MSPDEC; break; - case 0x04: engine = NV_DEVICE_INFO_ENGINE_MSPPP ; break; - case 0x08: engine = NV_DEVICE_INFO_ENGINE_MSVLD ; break; - case 0x30: engine = NV_DEVICE_INFO_ENGINE_CE ; break; + case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break; + case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break; + case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break; + case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break; + case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break; default: return nouveau_abi16_put(abi16, -ENOSYS); } } else { - engine = NV_DEVICE_INFO_ENGINE_GR; + engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR; } - if (engine != NV_DEVICE_INFO_ENGINE_CE) + if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE) engine = nvif_fifo_runlist(device, engine); else engine = nvif_fifo_runlist_ce(device); diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 7cc683b8dc7a..e8c445eb1100 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -2083,13 +2083,11 @@ nouveau_bios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; - struct pci_dev *pdev; int ret; /* only relevant for PCI devices */ if (!dev_is_pci(dev->dev)) return 0; - pdev = to_pci_dev(dev->dev); if (!NVInitVBIOS(dev)) return -ENODEV; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 33dc886d1d6d..281e9ed13989 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -43,9 +43,9 @@ #include <nvif/if500b.h> #include <nvif/if900b.h> -static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, +static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); -static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); /* * NV10-NV40 tiling helpers @@ -300,18 +300,15 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, struct sg_table *sg, struct dma_resv *robj) { int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; - size_t acc_size; int ret; - acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); - nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_placement_set(nvbo, domain, 0); INIT_LIST_HEAD(&nvbo->io_reserve_lru); ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, - &nvbo->placement, align >> PAGE_SHIFT, false, - acc_size, sg, robj, nouveau_bo_del_ttm); + &nvbo->placement, align >> PAGE_SHIFT, false, sg, + robj, nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; @@ -547,7 +544,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -556,10 +553,22 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + i = 0; + while (i < ttm_dma->num_pages) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } dma_sync_single_for_device(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_TO_DEVICE); + num_pages * PAGE_SIZE, DMA_TO_DEVICE); + i += num_pages; + } } void @@ -567,7 +576,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -576,9 +585,22 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + i = 0; + while (i < ttm_dma->num_pages) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } + dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_FROM_DEVICE); + num_pages * PAGE_SIZE, DMA_FROM_DEVICE); + i += num_pages; + } } void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) @@ -674,7 +696,7 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) } static int -nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, +nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) { #if IS_ENABLED(CONFIG_AGP) @@ -690,7 +712,7 @@ nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, } static void -nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { #if IS_ENABLED(CONFIG_AGP) struct nouveau_drm *drm = nouveau_bdev(bdev); @@ -1055,7 +1077,7 @@ nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, } static int -nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) +nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); @@ -1163,7 +1185,7 @@ out: } static void -nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) +nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); @@ -1223,7 +1245,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } static int -nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, +nouveau_ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct ttm_tt *ttm_dma = (void *)ttm; @@ -1247,7 +1269,7 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, } static void -nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, +nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct nouveau_drm *drm; @@ -1264,7 +1286,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, } static void -nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev, +nouveau_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { #if IS_ENABLED(CONFIG_AGP) @@ -1296,7 +1318,7 @@ nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) nouveau_bo_move_ntfy(bo, false, NULL); } -struct ttm_bo_driver nouveau_bo_driver = { +struct ttm_device_funcs nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 6045b85a762a..c2d3f9c48eba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -68,7 +68,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) return 0; } -extern struct ttm_bo_driver nouveau_bo_driver; +extern struct ttm_device_funcs nouveau_bo_driver; void nouveau_bo_move_init(struct nouveau_drm *); struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index e48f1f7eb370..7cfac265fd45 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -556,7 +556,7 @@ nouveau_channels_init(struct nouveau_drm *drm) } args = { .m.version = 1, .m.count = sizeof(args.v) / sizeof(args.v.channels), - .v.channels.mthd = NV_DEVICE_FIFO_CHANNELS, + .v.channels.mthd = NV_DEVICE_HOST_CHANNELS, }; struct nvif_object *device = &drm->client.device.object; int ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 17831ee897ea..dac02c7be54d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -322,12 +322,9 @@ nouveau_framebuffer_new(struct drm_device *dev, mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */ (mode_cmd->pitches[1] && /* pitches for planes must match */ mode_cmd->pitches[0] != mode_cmd->pitches[1]))) { - struct drm_format_name_buf format_name; - DRM_DEBUG_KMS("Unsuitable framebuffer: format: %s; pitches: 0x%x\n 0x%x\n", - drm_get_format_name(mode_cmd->pixel_format, - &format_name), - mode_cmd->pitches[0], - mode_cmd->pitches[1]); + DRM_DEBUG_KMS("Unsuitable framebuffer: format: %p4cc; pitches: 0x%x\n 0x%x\n", + &mode_cmd->pixel_format, + mode_cmd->pitches[0], mode_cmd->pitches[1]); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 1b2169e9c295..885815ea917f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -344,7 +344,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm) /* Allocate channel that has access to the graphics engine. */ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { - arg0 = nvif_fifo_runlist(device, NV_DEVICE_INFO_ENGINE_GR); + arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR); arg1 = 1; } else { arg0 = NvDmaFB; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d28ee6844245..ba65f136cf48 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -54,7 +54,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_memory.h> #include <drm/drm_audio_component.h> @@ -151,7 +150,7 @@ struct nouveau_drm { /* TTM interface support */ struct { - struct ttm_bo_device bdev; + struct ttm_device bdev; atomic_t validate_sequence; int (*move)(struct nouveau_channel *, struct ttm_buffer_object *, diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 1cf52635ea74..256ec5b35473 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -16,7 +16,7 @@ struct nouveau_sgdma_be { }; void -nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; @@ -29,7 +29,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) } int -nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) +nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_drm *drm = nouveau_bdev(bdev); @@ -56,7 +56,7 @@ nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_re } void -nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; if (nvbe->mem) { diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 4f69e4c3dafd..1c3f890377d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data, struct drm_nouveau_svm_init *args = data; int ret; + /* We need to fail if svm is disabled */ + if (!cli->drm->svm) + return -ENOSYS; + /* Allocate tracking for SVM-enabled VMM. */ if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index a37bc3d7b38b..b81ae90b8449 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -154,7 +154,7 @@ error_unlock: return ret; } -static struct vm_operations_struct nouveau_ttm_vm_ops = { +static const struct vm_operations_struct nouveau_ttm_vm_ops = { .fault = nouveau_ttm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, @@ -324,10 +324,10 @@ nouveau_ttm_init(struct nouveau_drm *drm) need_swiotlb = !!swiotlb_nr_tbl(); #endif - ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver, - drm->dev->dev, dev->anon_inode->i_mapping, - dev->vma_offset_manager, need_swiotlb, - drm->client.mmu.dmabits <= 32); + ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev, + dev->anon_inode->i_mapping, + dev->vma_offset_manager, need_swiotlb, + drm->client.mmu.dmabits <= 32); if (ret) { NV_ERROR(drm, "error initialising bo driver, %d\n", ret); return ret; @@ -377,7 +377,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm) nouveau_ttm_fini_vram(drm); nouveau_ttm_fini_gtt(drm); - ttm_bo_device_release(&drm->ttm.bdev); + ttm_device_fini(&drm->ttm.bdev); arch_phys_wc_del(drm->ttm.mtrr); drm->ttm.mtrr = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h index 69552049bb96..dbf6dc238efd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.h +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h @@ -3,7 +3,7 @@ #define __NOUVEAU_TTM_H__ static inline struct nouveau_drm * -nouveau_bdev(struct ttm_bo_device *bd) +nouveau_bdev(struct ttm_device *bd) { return container_of(bd, struct nouveau_drm, ttm.bdev); } @@ -22,7 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); int nouveau_ttm_global_init(struct nouveau_drm *); void nouveau_ttm_global_release(struct nouveau_drm *); -int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); -void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); -void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); +void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); +void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); #endif diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c index e84a2e2ff043..a463289962b2 100644 --- a/drivers/gpu/drm/nouveau/nvif/fifo.c +++ b/drivers/gpu/drm/nouveau/nvif/fifo.c @@ -41,9 +41,11 @@ nvif_fifo_runlists(struct nvif_device *device) return -ENOMEM; a->m.version = 1; a->m.count = sizeof(a->v) / sizeof(a->v.runlists); - a->v.runlists.mthd = NV_DEVICE_FIFO_RUNLISTS; - for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) - a->v.runlist[i].mthd = NV_DEVICE_FIFO_RUNLIST_ENGINES(i); + a->v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS; + for (i = 0; i < ARRAY_SIZE(a->v.runlist); i++) { + a->v.runlist[i].mthd = NV_DEVICE_HOST_RUNLIST_ENGINES; + a->v.runlist[i].data = i; + } ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, a, sizeof(*a)); if (ret) @@ -58,7 +60,7 @@ nvif_fifo_runlists(struct nvif_device *device) } for (i = 0; i < device->runlists; i++) { - if (a->v.runlists.data & BIT_ULL(i)) + if (a->v.runlist[i].mthd != NV_DEVICE_INFO_INVALID) device->runlist[i].engines = a->v.runlist[i].data; } @@ -70,29 +72,15 @@ done: u64 nvif_fifo_runlist(struct nvif_device *device, u64 engine) { - struct nvif_object *object = &device->object; - struct { - struct nv_device_info_v1 m; - struct { - struct nv_device_info_v1_data engine; - } v; - } a = { - .m.version = 1, - .m.count = sizeof(a.v) / sizeof(a.v.engine), - .v.engine.mthd = engine, - }; u64 runm = 0; int ret, i; if ((ret = nvif_fifo_runlists(device))) return runm; - ret = nvif_object_mthd(object, NV_DEVICE_V0_INFO, &a, sizeof(a)); - if (ret == 0) { - for (i = 0; i < device->runlists; i++) { - if (device->runlist[i].engines & a.v.engine.data) - runm |= BIT_ULL(i); - } + for (i = 0; i < device->runlists; i++) { + if (device->runlist[i].engines & engine) + runm |= BIT_ULL(i); } return runm; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c index 1a47c40e171b..e41a39ae1597 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c @@ -40,10 +40,11 @@ nvkm_engine_unref(struct nvkm_engine **pengine) { struct nvkm_engine *engine = *pengine; if (engine) { - mutex_lock(&engine->subdev.mutex); - if (--engine->usecount == 0) + if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) { nvkm_subdev_fini(&engine->subdev, false); - mutex_unlock(&engine->subdev.mutex); + engine->use.enabled = false; + mutex_unlock(&engine->use.mutex); + } *pengine = NULL; } } @@ -51,17 +52,21 @@ nvkm_engine_unref(struct nvkm_engine **pengine) struct nvkm_engine * nvkm_engine_ref(struct nvkm_engine *engine) { + int ret; if (engine) { - mutex_lock(&engine->subdev.mutex); - if (++engine->usecount == 1) { - int ret = nvkm_subdev_init(&engine->subdev); - if (ret) { - engine->usecount--; - mutex_unlock(&engine->subdev.mutex); - return ERR_PTR(ret); + if (!refcount_inc_not_zero(&engine->use.refcount)) { + mutex_lock(&engine->use.mutex); + if (!refcount_inc_not_zero(&engine->use.refcount)) { + engine->use.enabled = true; + if ((ret = nvkm_subdev_init(&engine->subdev))) { + engine->use.enabled = false; + mutex_unlock(&engine->use.mutex); + return ERR_PTR(ret); + } + refcount_set(&engine->use.refcount, 1); } + mutex_unlock(&engine->use.mutex); } - mutex_unlock(&engine->subdev.mutex); } return engine; } @@ -114,7 +119,7 @@ nvkm_engine_init(struct nvkm_subdev *subdev) int ret = 0, i; s64 time; - if (!engine->usecount) { + if (!engine->use.enabled) { nvkm_trace(subdev, "init skipped, engine has no users\n"); return ret; } @@ -156,11 +161,12 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev) struct nvkm_engine *engine = nvkm_engine(subdev); if (engine->func->dtor) return engine->func->dtor(engine); + mutex_destroy(&engine->use.mutex); return engine; } -static const struct nvkm_subdev_func -nvkm_engine_func = { +const struct nvkm_subdev_func +nvkm_engine = { .dtor = nvkm_engine_dtor, .preinit = nvkm_engine_preinit, .init = nvkm_engine_init, @@ -170,14 +176,15 @@ nvkm_engine_func = { }; int -nvkm_engine_ctor(const struct nvkm_engine_func *func, - struct nvkm_device *device, int index, bool enable, - struct nvkm_engine *engine) +nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine) { - nvkm_subdev_ctor(&nvkm_engine_func, device, index, &engine->subdev); + nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev); engine->func = func; + refcount_set(&engine->use.refcount, 0); + mutex_init(&engine->use.mutex); - if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) { + if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) { nvkm_debug(&engine->subdev, "disabled\n"); return -ENODEV; } @@ -187,11 +194,11 @@ nvkm_engine_ctor(const struct nvkm_engine_func *func, } int -nvkm_engine_new_(const struct nvkm_engine_func *func, - struct nvkm_device *device, int index, bool enable, +nvkm_engine_new_(const struct nvkm_engine_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine **pengine) { if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL))) return -ENOMEM; - return nvkm_engine_ctor(func, device, index, enable, *pengine); + return nvkm_engine_ctor(func, device, type, inst, enable, *pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c index 38130ef272d6..c69daac9bac7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c @@ -33,13 +33,13 @@ nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device, struct nvkm_fb *fb = device->fb; struct nvkm_tags *tags = *ptags; if (tags) { - mutex_lock(&fb->subdev.mutex); + mutex_lock(&fb->tags.mutex); if (refcount_dec_and_test(&tags->refcount)) { - nvkm_mm_free(&fb->tags, &tags->mn); + nvkm_mm_free(&fb->tags.mm, &tags->mn); kfree(memory->tags); memory->tags = NULL; } - mutex_unlock(&fb->subdev.mutex); + mutex_unlock(&fb->tags.mutex); *ptags = NULL; } } @@ -52,29 +52,29 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, struct nvkm_fb *fb = device->fb; struct nvkm_tags *tags; - mutex_lock(&fb->subdev.mutex); + mutex_lock(&fb->tags.mutex); if ((tags = memory->tags)) { /* If comptags exist for the memory, but a different amount * than requested, the buffer is being mapped with settings * that are incompatible with existing mappings. */ if (tags->mn && tags->mn->length != nr) { - mutex_unlock(&fb->subdev.mutex); + mutex_unlock(&fb->tags.mutex); return -EINVAL; } refcount_inc(&tags->refcount); - mutex_unlock(&fb->subdev.mutex); + mutex_unlock(&fb->tags.mutex); *ptags = tags; return 0; } if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) { - mutex_unlock(&fb->subdev.mutex); + mutex_unlock(&fb->tags.mutex); return -ENOMEM; } - if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) { + if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) { if (clr) clr(device, tags->mn->offset, tags->mn->length); } else { @@ -92,7 +92,7 @@ nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, refcount_set(&tags->refcount, 1); *ptags = memory->tags = tags; - mutex_unlock(&fb->subdev.mutex); + mutex_unlock(&fb->tags.mutex); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index 49d468b45d3f..a74b7acb6832 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -26,69 +26,13 @@ #include <core/option.h> #include <subdev/mc.h> -static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR]; - const char * -nvkm_subdev_name[NVKM_SUBDEV_NR] = { - [NVKM_SUBDEV_ACR ] = "acr", - [NVKM_SUBDEV_BAR ] = "bar", - [NVKM_SUBDEV_VBIOS ] = "bios", - [NVKM_SUBDEV_BUS ] = "bus", - [NVKM_SUBDEV_CLK ] = "clk", - [NVKM_SUBDEV_DEVINIT ] = "devinit", - [NVKM_SUBDEV_FAULT ] = "fault", - [NVKM_SUBDEV_FB ] = "fb", - [NVKM_SUBDEV_FUSE ] = "fuse", - [NVKM_SUBDEV_GPIO ] = "gpio", - [NVKM_SUBDEV_GSP ] = "gsp", - [NVKM_SUBDEV_I2C ] = "i2c", - [NVKM_SUBDEV_IBUS ] = "priv", - [NVKM_SUBDEV_ICCSENSE] = "iccsense", - [NVKM_SUBDEV_INSTMEM ] = "imem", - [NVKM_SUBDEV_LTC ] = "ltc", - [NVKM_SUBDEV_MC ] = "mc", - [NVKM_SUBDEV_MMU ] = "mmu", - [NVKM_SUBDEV_MXM ] = "mxm", - [NVKM_SUBDEV_PCI ] = "pci", - [NVKM_SUBDEV_PMU ] = "pmu", - [NVKM_SUBDEV_THERM ] = "therm", - [NVKM_SUBDEV_TIMER ] = "tmr", - [NVKM_SUBDEV_TOP ] = "top", - [NVKM_SUBDEV_VOLT ] = "volt", - [NVKM_ENGINE_BSP ] = "bsp", - [NVKM_ENGINE_CE0 ] = "ce0", - [NVKM_ENGINE_CE1 ] = "ce1", - [NVKM_ENGINE_CE2 ] = "ce2", - [NVKM_ENGINE_CE3 ] = "ce3", - [NVKM_ENGINE_CE4 ] = "ce4", - [NVKM_ENGINE_CE5 ] = "ce5", - [NVKM_ENGINE_CE6 ] = "ce6", - [NVKM_ENGINE_CE7 ] = "ce7", - [NVKM_ENGINE_CE8 ] = "ce8", - [NVKM_ENGINE_CIPHER ] = "cipher", - [NVKM_ENGINE_DISP ] = "disp", - [NVKM_ENGINE_DMAOBJ ] = "dma", - [NVKM_ENGINE_FIFO ] = "fifo", - [NVKM_ENGINE_GR ] = "gr", - [NVKM_ENGINE_IFB ] = "ifb", - [NVKM_ENGINE_ME ] = "me", - [NVKM_ENGINE_MPEG ] = "mpeg", - [NVKM_ENGINE_MSENC ] = "msenc", - [NVKM_ENGINE_MSPDEC ] = "mspdec", - [NVKM_ENGINE_MSPPP ] = "msppp", - [NVKM_ENGINE_MSVLD ] = "msvld", - [NVKM_ENGINE_NVENC0 ] = "nvenc0", - [NVKM_ENGINE_NVENC1 ] = "nvenc1", - [NVKM_ENGINE_NVENC2 ] = "nvenc2", - [NVKM_ENGINE_NVDEC0 ] = "nvdec0", - [NVKM_ENGINE_NVDEC1 ] = "nvdec1", - [NVKM_ENGINE_NVDEC2 ] = "nvdec2", - [NVKM_ENGINE_PM ] = "pm", - [NVKM_ENGINE_SEC ] = "sec", - [NVKM_ENGINE_SEC2 ] = "sec2", - [NVKM_ENGINE_SW ] = "sw", - [NVKM_ENGINE_VIC ] = "vic", - [NVKM_ENGINE_VP ] = "vp", +nvkm_subdev_type[NVKM_SUBDEV_NR] = { +#define NVKM_LAYOUT_ONCE(type,data,ptr,...) [type] = #ptr, +#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A) +#include <core/layout.h> +#undef NVKM_LAYOUT_ONCE +#undef NVKM_LAYOUT_INST }; void @@ -125,7 +69,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) } } - nvkm_mc_reset(device, subdev->index); + nvkm_mc_reset(device, subdev->type, subdev->inst); time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "%s completed in %lldus\n", action, time); @@ -199,6 +143,7 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev) if (subdev && !WARN_ON(!subdev->func)) { nvkm_trace(subdev, "destroy running...\n"); time = ktime_to_us(ktime_get()); + list_del(&subdev->head); if (subdev->func->dtor) *psubdev = subdev->func->dtor(subdev); time = ktime_to_us(ktime_get()) - time; @@ -209,26 +154,41 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev) } void -nvkm_subdev_ctor(const struct nvkm_subdev_func *func, - struct nvkm_device *device, int index, - struct nvkm_subdev *subdev) +nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) +{ + struct nvkm_subdev *subdev; + list_for_each_entry(subdev, &device->subdev, head) { + if (subdev->type == type && subdev->inst == inst) { + *subdev->pself = NULL; + nvkm_subdev_del(&subdev); + break; + } + } +} + +void +nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev) { - const char *name = nvkm_subdev_name[index]; subdev->func = func; subdev->device = device; - subdev->index = index; - - __mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]); - subdev->debug = nvkm_dbgopt(device->dbgopt, name); + subdev->type = type; + subdev->inst = inst < 0 ? 0 : inst; + + if (inst >= 0) + snprintf(subdev->name, sizeof(subdev->name), "%s%d", nvkm_subdev_type[type], inst); + else + strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name)); + subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name); + list_add_tail(&subdev->head, &device->subdev); } int -nvkm_subdev_new_(const struct nvkm_subdev_func *func, - struct nvkm_device *device, int index, - struct nvkm_subdev **psubdev) +nvkm_subdev_new_(const struct nvkm_subdev_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_subdev **psubdev) { if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(func, device, index, *psubdev); + nvkm_subdev_ctor(func, device, type, inst, *psubdev); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c index 44e116f7880d..39f6db269c7a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c @@ -36,8 +36,9 @@ g84_bsp = { }; int -g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) +g84_bsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) { - return nvkm_xtensa_new_(&g84_bsp, device, index, + return nvkm_xtensa_new_(&g84_bsp, device, type, inst, device->chipset != 0x92, 0x103000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c index ad9f855c9a40..b9cc39565985 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c @@ -29,9 +29,7 @@ static void gf100_ce_init(struct nvkm_falcon *ce) { - struct nvkm_device *device = ce->engine.subdev.device; - const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0; - nvkm_wr32(device, ce->addr + 0x084, index); + nvkm_wr32(ce->engine.subdev.device, ce->addr + 0x084, ce->engine.subdev.inst); } static const struct nvkm_falcon_func @@ -63,16 +61,9 @@ gf100_ce1 = { }; int -gf100_ce_new(struct nvkm_device *device, int index, +gf100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - if (index == NVKM_ENGINE_CE0) { - return nvkm_falcon_new_(&gf100_ce0, device, index, true, - 0x104000, pengine); - } else - if (index == NVKM_ENGINE_CE1) { - return nvkm_falcon_new_(&gf100_ce1, device, index, true, - 0x105000, pengine); - } - return -ENODEV; + return nvkm_falcon_new_(inst ? &gf100_ce1 : &gf100_ce0, device, type, inst, true, + 0x104000 + (inst * 0x1000), pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c index 9e0b53a10f77..27f29eb0494d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c @@ -58,9 +58,9 @@ gk104_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base) void gk104_ce_intr(struct nvkm_engine *ce) { - const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000; struct nvkm_subdev *subdev = &ce->subdev; struct nvkm_device *device = subdev->device; + const u32 base = subdev->inst * 0x1000; u32 mask = nvkm_rd32(device, 0x104904 + base); u32 intr = nvkm_rd32(device, 0x104908 + base) & mask; if (intr & 0x00000001) { @@ -94,8 +94,8 @@ gk104_ce = { }; int -gk104_ce_new(struct nvkm_device *device, int index, +gk104_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&gk104_ce, device, index, true, pengine); + return nvkm_engine_new_(&gk104_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c index c0df7daa85e2..c3c476592c43 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c @@ -36,8 +36,8 @@ gm107_ce = { }; int -gm107_ce_new(struct nvkm_device *device, int index, +gm107_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&gm107_ce, device, index, true, pengine); + return nvkm_engine_new_(&gm107_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c index c6fa8b20737e..d2db61865371 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c @@ -35,8 +35,8 @@ gm200_ce = { }; int -gm200_ce_new(struct nvkm_device *device, int index, +gm200_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&gm200_ce, device, index, true, pengine); + return nvkm_engine_new_(&gm200_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c index c7710456bc30..a4f08a4472c9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c @@ -59,9 +59,9 @@ gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base) void gp100_ce_intr(struct nvkm_engine *ce) { - const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80; struct nvkm_subdev *subdev = &ce->subdev; struct nvkm_device *device = subdev->device; + const u32 base = subdev->inst * 0x80; u32 mask = nvkm_rd32(device, 0x10440c + base); u32 intr = nvkm_rd32(device, 0x104410 + base) & mask; if (intr & 0x00000001) { //XXX: guess @@ -95,8 +95,8 @@ gp100_ce = { }; int -gp100_ce_new(struct nvkm_device *device, int index, +gp100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&gp100_ce, device, index, true, pengine); + return nvkm_engine_new_(&gp100_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c index 985c8f653874..180d497a95eb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp102.c @@ -37,8 +37,8 @@ gp102_ce = { }; int -gp102_ce_new(struct nvkm_device *device, int index, +gp102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&gp102_ce, device, index, true, pengine); + return nvkm_engine_new_(&gp102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c index 63ac51a54fd3..704df0f2d1f1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c @@ -44,7 +44,7 @@ gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan) { struct nvkm_subdev *subdev = &ce->engine.subdev; struct nvkm_device *device = subdev->device; - const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000; + const u32 base = subdev->inst * 0x1000; u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff; u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16; u32 mthd = (addr & 0x07ff) << 2; @@ -75,9 +75,9 @@ gt215_ce = { }; int -gt215_ce_new(struct nvkm_device *device, int index, +gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(>215_ce, device, index, + return nvkm_falcon_new_(>215_ce, device, type, inst, (device->chipset != 0xaf), 0x104000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c index fcda3de45857..cd5e9cdca1cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c @@ -33,8 +33,8 @@ gv100_ce = { }; int -gv100_ce_new(struct nvkm_device *device, int index, +gv100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&gv100_ce, device, index, true, pengine); + return nvkm_engine_new_(&gv100_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c index b4308e2d8c75..e5ff92d9364c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c @@ -33,8 +33,8 @@ tu102_ce = { }; int -tu102_ce_new(struct nvkm_device *device, int index, +tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&tu102_ce, device, index, true, pengine); + return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c index 68ffb520531e..be2a7181dc15 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c @@ -127,8 +127,8 @@ g84_cipher = { }; int -g84_cipher_new(struct nvkm_device *device, int index, +g84_cipher_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_engine_new_(&g84_cipher, device, index, true, pengine); + return nvkm_engine_new_(&g84_cipher, device, type, inst, true, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index cdcc851e06f9..b930f539feec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -71,2640 +71,2557 @@ nvkm_device_list(u64 *name, int size) static const struct nvkm_device_chip null_chipset = { .name = "NULL", - .bios = nvkm_bios_new, + .bios = { 0x00000001, nvkm_bios_new }, }; static const struct nvkm_device_chip nv4_chipset = { .name = "NV04", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv04_devinit_new, - .fb = nv04_fb_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv04_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv04_fifo_new, - .gr = nv04_gr_new, - .sw = nv04_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv04_devinit_new }, + .fb = { 0x00000001, nv04_fb_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv04_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv04_fifo_new }, + .gr = { 0x00000001, nv04_gr_new }, + .sw = { 0x00000001, nv04_sw_new }, }; static const struct nvkm_device_chip nv5_chipset = { .name = "NV05", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv05_devinit_new, - .fb = nv04_fb_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv04_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv04_fifo_new, - .gr = nv04_gr_new, - .sw = nv04_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv05_devinit_new }, + .fb = { 0x00000001, nv04_fb_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv04_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv04_fifo_new }, + .gr = { 0x00000001, nv04_gr_new }, + .sw = { 0x00000001, nv04_sw_new }, }; static const struct nvkm_device_chip nv10_chipset = { .name = "NV10", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv10_devinit_new, - .fb = nv10_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv04_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .gr = nv10_gr_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv10_devinit_new }, + .fb = { 0x00000001, nv10_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv04_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .gr = { 0x00000001, nv10_gr_new }, }; static const struct nvkm_device_chip nv11_chipset = { .name = "NV11", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv10_devinit_new, - .fb = nv10_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv11_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv10_fifo_new, - .gr = nv15_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv10_devinit_new }, + .fb = { 0x00000001, nv10_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv11_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv10_fifo_new }, + .gr = { 0x00000001, nv15_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv15_chipset = { .name = "NV15", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv10_devinit_new, - .fb = nv10_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv04_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv10_fifo_new, - .gr = nv15_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv10_devinit_new }, + .fb = { 0x00000001, nv10_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv04_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv10_fifo_new }, + .gr = { 0x00000001, nv15_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv17_chipset = { .name = "NV17", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv10_devinit_new, - .fb = nv10_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv17_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv10_devinit_new }, + .fb = { 0x00000001, nv10_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv17_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv18_chipset = { .name = "NV18", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv10_devinit_new, - .fb = nv10_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv17_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv10_devinit_new }, + .fb = { 0x00000001, nv10_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv17_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv1a_chipset = { .name = "nForce", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv1a_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv04_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv10_fifo_new, - .gr = nv15_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv1a_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv04_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv10_fifo_new }, + .gr = { 0x00000001, nv15_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv1f_chipset = { .name = "nForce2", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv1a_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv17_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv1a_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv17_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv20_chipset = { .name = "NV20", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv20_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv20_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv20_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv20_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv25_chipset = { .name = "NV25", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv25_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv25_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv25_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv25_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv28_chipset = { .name = "NV28", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv25_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv25_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv25_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv25_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv2a_chipset = { .name = "NV2A", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv25_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv2a_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv25_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv2a_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv30_chipset = { .name = "NV30", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv30_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv30_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv30_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv30_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv31_chipset = { .name = "NV31", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv30_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv30_gr_new, - .mpeg = nv31_mpeg_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv30_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv30_gr_new }, + .mpeg = { 0x00000001, nv31_mpeg_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv34_chipset = { .name = "NV34", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv04_clk_new, - .devinit = nv10_devinit_new, - .fb = nv10_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv34_gr_new, - .mpeg = nv31_mpeg_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv10_devinit_new }, + .fb = { 0x00000001, nv10_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv34_gr_new }, + .mpeg = { 0x00000001, nv31_mpeg_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv35_chipset = { .name = "NV35", - .bios = nvkm_bios_new, - .bus = nv04_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv35_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv35_gr_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv04_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv35_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv35_gr_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv36_chipset = { .name = "NV36", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv04_clk_new, - .devinit = nv20_devinit_new, - .fb = nv36_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv04_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv04_pci_new, - .timer = nv04_timer_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv17_fifo_new, - .gr = nv35_gr_new, - .mpeg = nv31_mpeg_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv04_clk_new }, + .devinit = { 0x00000001, nv20_devinit_new }, + .fb = { 0x00000001, nv36_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv04_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv04_pci_new }, + .timer = { 0x00000001, nv04_timer_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv17_fifo_new }, + .gr = { 0x00000001, nv35_gr_new }, + .mpeg = { 0x00000001, nv31_mpeg_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv40_chipset = { .name = "NV40", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv40_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv40_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv40_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv40_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv40_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv40_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv41_chipset = { .name = "NV41", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv41_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv41_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv40_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv41_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv41_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv40_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv42_chipset = { .name = "NV42", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv41_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv41_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv40_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv41_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv41_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv40_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv43_chipset = { .name = "NV43", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv41_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv41_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv40_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv41_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv41_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv40_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv44_chipset = { .name = "NV44", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv44_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv44_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv44_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv44_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv45_chipset = { .name = "NV45", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv40_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv04_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv40_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv46_chipset = { .name = "G72", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv46_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv44_mmu_new, - .pci = nv46_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv46_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv44_mmu_new }, + .pci = { 0x00000001, nv46_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv47_chipset = { .name = "G70", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv47_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv41_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv47_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv41_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv49_chipset = { .name = "G71", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv49_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv41_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv49_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv41_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv4a_chipset = { .name = "NV44A", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv44_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv04_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv44_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv04_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv4b_chipset = { .name = "G73", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv49_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv17_mc_new, - .mmu = nv41_mmu_new, - .pci = nv40_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv40_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv49_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv17_mc_new }, + .mmu = { 0x00000001, nv41_mmu_new }, + .pci = { 0x00000001, nv40_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv40_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv4c_chipset = { .name = "C61", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv46_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv44_mmu_new, - .pci = nv4c_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv46_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv44_mmu_new }, + .pci = { 0x00000001, nv4c_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv4e_chipset = { .name = "C51", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv4e_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv4e_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv44_mmu_new, - .pci = nv4c_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv4e_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv4e_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv44_mmu_new }, + .pci = { 0x00000001, nv4c_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv50_chipset = { .name = "G80", - .bar = nv50_bar_new, - .bios = nvkm_bios_new, - .bus = nv50_bus_new, - .clk = nv50_clk_new, - .devinit = nv50_devinit_new, - .fb = nv50_fb_new, - .fuse = nv50_fuse_new, - .gpio = nv50_gpio_new, - .i2c = nv50_i2c_new, - .imem = nv50_instmem_new, - .mc = nv50_mc_new, - .mmu = nv50_mmu_new, - .mxm = nv50_mxm_new, - .pci = nv46_pci_new, - .therm = nv50_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv50_disp_new, - .dma = nv50_dma_new, - .fifo = nv50_fifo_new, - .gr = nv50_gr_new, - .mpeg = nv50_mpeg_new, - .pm = nv50_pm_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, nv50_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv50_bus_new }, + .clk = { 0x00000001, nv50_clk_new }, + .devinit = { 0x00000001, nv50_devinit_new }, + .fb = { 0x00000001, nv50_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, nv50_gpio_new }, + .i2c = { 0x00000001, nv50_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, nv50_mc_new }, + .mmu = { 0x00000001, nv50_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, nv46_pci_new }, + .therm = { 0x00000001, nv50_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv50_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, nv50_fifo_new }, + .gr = { 0x00000001, nv50_gr_new }, + .mpeg = { 0x00000001, nv50_mpeg_new }, + .pm = { 0x00000001, nv50_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nv63_chipset = { .name = "C73", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv46_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv44_mmu_new, - .pci = nv4c_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv46_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv44_mmu_new }, + .pci = { 0x00000001, nv4c_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv67_chipset = { .name = "C67", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv46_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv44_mmu_new, - .pci = nv4c_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv46_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv44_mmu_new }, + .pci = { 0x00000001, nv4c_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv68_chipset = { .name = "C68", - .bios = nvkm_bios_new, - .bus = nv31_bus_new, - .clk = nv40_clk_new, - .devinit = nv1a_devinit_new, - .fb = nv46_fb_new, - .gpio = nv10_gpio_new, - .i2c = nv04_i2c_new, - .imem = nv40_instmem_new, - .mc = nv44_mc_new, - .mmu = nv44_mmu_new, - .pci = nv4c_pci_new, - .therm = nv40_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = nv04_disp_new, - .dma = nv04_dma_new, - .fifo = nv40_fifo_new, - .gr = nv44_gr_new, - .mpeg = nv44_mpeg_new, - .pm = nv40_pm_new, - .sw = nv10_sw_new, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv31_bus_new }, + .clk = { 0x00000001, nv40_clk_new }, + .devinit = { 0x00000001, nv1a_devinit_new }, + .fb = { 0x00000001, nv46_fb_new }, + .gpio = { 0x00000001, nv10_gpio_new }, + .i2c = { 0x00000001, nv04_i2c_new }, + .imem = { 0x00000001, nv40_instmem_new }, + .mc = { 0x00000001, nv44_mc_new }, + .mmu = { 0x00000001, nv44_mmu_new }, + .pci = { 0x00000001, nv4c_pci_new }, + .therm = { 0x00000001, nv40_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, nv04_disp_new }, + .dma = { 0x00000001, nv04_dma_new }, + .fifo = { 0x00000001, nv40_fifo_new }, + .gr = { 0x00000001, nv44_gr_new }, + .mpeg = { 0x00000001, nv44_mpeg_new }, + .pm = { 0x00000001, nv40_pm_new }, + .sw = { 0x00000001, nv10_sw_new }, }; static const struct nvkm_device_chip nv84_chipset = { .name = "G84", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = nv50_bus_new, - .clk = g84_clk_new, - .devinit = g84_devinit_new, - .fb = g84_fb_new, - .fuse = nv50_fuse_new, - .gpio = nv50_gpio_new, - .i2c = nv50_i2c_new, - .imem = nv50_instmem_new, - .mc = g84_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g84_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .bsp = g84_bsp_new, - .cipher = g84_cipher_new, - .disp = g84_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = g84_gr_new, - .mpeg = g84_mpeg_new, - .pm = g84_pm_new, - .sw = nv50_sw_new, - .vp = g84_vp_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv50_bus_new }, + .clk = { 0x00000001, g84_clk_new }, + .devinit = { 0x00000001, g84_devinit_new }, + .fb = { 0x00000001, g84_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, nv50_gpio_new }, + .i2c = { 0x00000001, nv50_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g84_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g84_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .bsp = { 0x00000001, g84_bsp_new }, + .cipher = { 0x00000001, g84_cipher_new }, + .disp = { 0x00000001, g84_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, g84_gr_new }, + .mpeg = { 0x00000001, g84_mpeg_new }, + .pm = { 0x00000001, g84_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, + .vp = { 0x00000001, g84_vp_new }, }; static const struct nvkm_device_chip nv86_chipset = { .name = "G86", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = nv50_bus_new, - .clk = g84_clk_new, - .devinit = g84_devinit_new, - .fb = g84_fb_new, - .fuse = nv50_fuse_new, - .gpio = nv50_gpio_new, - .i2c = nv50_i2c_new, - .imem = nv50_instmem_new, - .mc = g84_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g84_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .bsp = g84_bsp_new, - .cipher = g84_cipher_new, - .disp = g84_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = g84_gr_new, - .mpeg = g84_mpeg_new, - .pm = g84_pm_new, - .sw = nv50_sw_new, - .vp = g84_vp_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv50_bus_new }, + .clk = { 0x00000001, g84_clk_new }, + .devinit = { 0x00000001, g84_devinit_new }, + .fb = { 0x00000001, g84_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, nv50_gpio_new }, + .i2c = { 0x00000001, nv50_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g84_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g84_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .bsp = { 0x00000001, g84_bsp_new }, + .cipher = { 0x00000001, g84_cipher_new }, + .disp = { 0x00000001, g84_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, g84_gr_new }, + .mpeg = { 0x00000001, g84_mpeg_new }, + .pm = { 0x00000001, g84_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, + .vp = { 0x00000001, g84_vp_new }, }; static const struct nvkm_device_chip nv92_chipset = { .name = "G92", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = nv50_bus_new, - .clk = g84_clk_new, - .devinit = g84_devinit_new, - .fb = g84_fb_new, - .fuse = nv50_fuse_new, - .gpio = nv50_gpio_new, - .i2c = nv50_i2c_new, - .imem = nv50_instmem_new, - .mc = g84_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g92_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .bsp = g84_bsp_new, - .cipher = g84_cipher_new, - .disp = g84_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = g84_gr_new, - .mpeg = g84_mpeg_new, - .pm = g84_pm_new, - .sw = nv50_sw_new, - .vp = g84_vp_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, nv50_bus_new }, + .clk = { 0x00000001, g84_clk_new }, + .devinit = { 0x00000001, g84_devinit_new }, + .fb = { 0x00000001, g84_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, nv50_gpio_new }, + .i2c = { 0x00000001, nv50_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g84_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g92_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .bsp = { 0x00000001, g84_bsp_new }, + .cipher = { 0x00000001, g84_cipher_new }, + .disp = { 0x00000001, g84_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, g84_gr_new }, + .mpeg = { 0x00000001, g84_mpeg_new }, + .pm = { 0x00000001, g84_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, + .vp = { 0x00000001, g84_vp_new }, }; static const struct nvkm_device_chip nv94_chipset = { .name = "G94", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = g84_clk_new, - .devinit = g84_devinit_new, - .fb = g84_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = g84_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .bsp = g84_bsp_new, - .cipher = g84_cipher_new, - .disp = g94_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = g84_gr_new, - .mpeg = g84_mpeg_new, - .pm = g84_pm_new, - .sw = nv50_sw_new, - .vp = g84_vp_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, g84_clk_new }, + .devinit = { 0x00000001, g84_devinit_new }, + .fb = { 0x00000001, g84_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g84_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .bsp = { 0x00000001, g84_bsp_new }, + .cipher = { 0x00000001, g84_cipher_new }, + .disp = { 0x00000001, g94_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, g84_gr_new }, + .mpeg = { 0x00000001, g84_mpeg_new }, + .pm = { 0x00000001, g84_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, + .vp = { 0x00000001, g84_vp_new }, }; static const struct nvkm_device_chip nv96_chipset = { .name = "G96", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = g84_clk_new, - .devinit = g84_devinit_new, - .fb = g84_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = g84_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .bsp = g84_bsp_new, - .cipher = g84_cipher_new, - .disp = g94_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = g84_gr_new, - .mpeg = g84_mpeg_new, - .pm = g84_pm_new, - .sw = nv50_sw_new, - .vp = g84_vp_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, g84_clk_new }, + .devinit = { 0x00000001, g84_devinit_new }, + .fb = { 0x00000001, g84_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g84_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .bsp = { 0x00000001, g84_bsp_new }, + .cipher = { 0x00000001, g84_cipher_new }, + .disp = { 0x00000001, g94_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, g84_gr_new }, + .mpeg = { 0x00000001, g84_mpeg_new }, + .pm = { 0x00000001, g84_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, + .vp = { 0x00000001, g84_vp_new }, }; static const struct nvkm_device_chip nv98_chipset = { .name = "G98", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = g84_clk_new, - .devinit = g98_devinit_new, - .fb = g84_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = g98_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = g94_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = g84_gr_new, - .mspdec = g98_mspdec_new, - .msppp = g98_msppp_new, - .msvld = g98_msvld_new, - .pm = g84_pm_new, - .sec = g98_sec_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, g84_clk_new }, + .devinit = { 0x00000001, g98_devinit_new }, + .fb = { 0x00000001, g84_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g98_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, g94_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, g84_gr_new }, + .mspdec = { 0x00000001, g98_mspdec_new }, + .msppp = { 0x00000001, g98_msppp_new }, + .msvld = { 0x00000001, g98_msvld_new }, + .pm = { 0x00000001, g84_pm_new }, + .sec = { 0x00000001, g98_sec_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nva0_chipset = { .name = "GT200", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = g84_clk_new, - .devinit = g84_devinit_new, - .fb = g84_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = nv50_i2c_new, - .imem = nv50_instmem_new, - .mc = g84_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .bsp = g84_bsp_new, - .cipher = g84_cipher_new, - .disp = gt200_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = gt200_gr_new, - .mpeg = g84_mpeg_new, - .pm = gt200_pm_new, - .sw = nv50_sw_new, - .vp = g84_vp_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, g84_clk_new }, + .devinit = { 0x00000001, g84_devinit_new }, + .fb = { 0x00000001, g84_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, nv50_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g84_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .bsp = { 0x00000001, g84_bsp_new }, + .cipher = { 0x00000001, g84_cipher_new }, + .disp = { 0x00000001, gt200_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, gt200_gr_new }, + .mpeg = { 0x00000001, g84_mpeg_new }, + .pm = { 0x00000001, gt200_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, + .vp = { 0x00000001, g84_vp_new }, }; static const struct nvkm_device_chip nva3_chipset = { .name = "GT215", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = gt215_clk_new, - .devinit = gt215_devinit_new, - .fb = gt215_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = gt215_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .pmu = gt215_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .ce[0] = gt215_ce_new, - .disp = gt215_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = gt215_gr_new, - .mpeg = g84_mpeg_new, - .mspdec = gt215_mspdec_new, - .msppp = gt215_msppp_new, - .msvld = gt215_msvld_new, - .pm = gt215_pm_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, gt215_clk_new }, + .devinit = { 0x00000001, gt215_devinit_new }, + .fb = { 0x00000001, gt215_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, gt215_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .pmu = { 0x00000001, gt215_pmu_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .ce = { 0x00000001, gt215_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, gt215_gr_new }, + .mpeg = { 0x00000001, g84_mpeg_new }, + .mspdec = { 0x00000001, gt215_mspdec_new }, + .msppp = { 0x00000001, gt215_msppp_new }, + .msvld = { 0x00000001, gt215_msvld_new }, + .pm = { 0x00000001, gt215_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nva5_chipset = { .name = "GT216", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = gt215_clk_new, - .devinit = gt215_devinit_new, - .fb = gt215_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = gt215_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .pmu = gt215_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .ce[0] = gt215_ce_new, - .disp = gt215_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = gt215_gr_new, - .mspdec = gt215_mspdec_new, - .msppp = gt215_msppp_new, - .msvld = gt215_msvld_new, - .pm = gt215_pm_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, gt215_clk_new }, + .devinit = { 0x00000001, gt215_devinit_new }, + .fb = { 0x00000001, gt215_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, gt215_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .pmu = { 0x00000001, gt215_pmu_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .ce = { 0x00000001, gt215_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, gt215_gr_new }, + .mspdec = { 0x00000001, gt215_mspdec_new }, + .msppp = { 0x00000001, gt215_msppp_new }, + .msvld = { 0x00000001, gt215_msvld_new }, + .pm = { 0x00000001, gt215_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nva8_chipset = { .name = "GT218", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = gt215_clk_new, - .devinit = gt215_devinit_new, - .fb = gt215_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = gt215_mc_new, - .mmu = g84_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .pmu = gt215_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .ce[0] = gt215_ce_new, - .disp = gt215_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = gt215_gr_new, - .mspdec = gt215_mspdec_new, - .msppp = gt215_msppp_new, - .msvld = gt215_msvld_new, - .pm = gt215_pm_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, gt215_clk_new }, + .devinit = { 0x00000001, gt215_devinit_new }, + .fb = { 0x00000001, gt215_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, gt215_mc_new }, + .mmu = { 0x00000001, g84_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .pmu = { 0x00000001, gt215_pmu_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .ce = { 0x00000001, gt215_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, gt215_gr_new }, + .mspdec = { 0x00000001, gt215_mspdec_new }, + .msppp = { 0x00000001, gt215_msppp_new }, + .msvld = { 0x00000001, gt215_msvld_new }, + .pm = { 0x00000001, gt215_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nvaa_chipset = { .name = "MCP77/MCP78", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = mcp77_clk_new, - .devinit = g98_devinit_new, - .fb = mcp77_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = g98_mc_new, - .mmu = mcp77_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = mcp77_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = gt200_gr_new, - .mspdec = g98_mspdec_new, - .msppp = g98_msppp_new, - .msvld = g98_msvld_new, - .pm = g84_pm_new, - .sec = g98_sec_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, mcp77_clk_new }, + .devinit = { 0x00000001, g98_devinit_new }, + .fb = { 0x00000001, mcp77_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g98_mc_new }, + .mmu = { 0x00000001, mcp77_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, mcp77_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, gt200_gr_new }, + .mspdec = { 0x00000001, g98_mspdec_new }, + .msppp = { 0x00000001, g98_msppp_new }, + .msvld = { 0x00000001, g98_msvld_new }, + .pm = { 0x00000001, g84_pm_new }, + .sec = { 0x00000001, g98_sec_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nvac_chipset = { .name = "MCP79/MCP7A", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = mcp77_clk_new, - .devinit = g98_devinit_new, - .fb = mcp77_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = g98_mc_new, - .mmu = mcp77_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .therm = g84_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .disp = mcp77_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = mcp79_gr_new, - .mspdec = g98_mspdec_new, - .msppp = g98_msppp_new, - .msvld = g98_msvld_new, - .pm = g84_pm_new, - .sec = g98_sec_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, mcp77_clk_new }, + .devinit = { 0x00000001, g98_devinit_new }, + .fb = { 0x00000001, mcp77_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, g98_mc_new }, + .mmu = { 0x00000001, mcp77_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .therm = { 0x00000001, g84_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .disp = { 0x00000001, mcp77_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, mcp79_gr_new }, + .mspdec = { 0x00000001, g98_mspdec_new }, + .msppp = { 0x00000001, g98_msppp_new }, + .msvld = { 0x00000001, g98_msvld_new }, + .pm = { 0x00000001, g84_pm_new }, + .sec = { 0x00000001, g98_sec_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nvaf_chipset = { .name = "MCP89", - .bar = g84_bar_new, - .bios = nvkm_bios_new, - .bus = g94_bus_new, - .clk = gt215_clk_new, - .devinit = mcp89_devinit_new, - .fb = mcp89_fb_new, - .fuse = nv50_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .imem = nv50_instmem_new, - .mc = gt215_mc_new, - .mmu = mcp77_mmu_new, - .mxm = nv50_mxm_new, - .pci = g94_pci_new, - .pmu = gt215_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = nv40_volt_new, - .ce[0] = gt215_ce_new, - .disp = mcp89_disp_new, - .dma = nv50_dma_new, - .fifo = g84_fifo_new, - .gr = mcp89_gr_new, - .mspdec = gt215_mspdec_new, - .msppp = gt215_msppp_new, - .msvld = mcp89_msvld_new, - .pm = gt215_pm_new, - .sw = nv50_sw_new, + .bar = { 0x00000001, g84_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, g94_bus_new }, + .clk = { 0x00000001, gt215_clk_new }, + .devinit = { 0x00000001, mcp89_devinit_new }, + .fb = { 0x00000001, mcp89_fb_new }, + .fuse = { 0x00000001, nv50_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, gt215_mc_new }, + .mmu = { 0x00000001, mcp77_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, g94_pci_new }, + .pmu = { 0x00000001, gt215_pmu_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, nv40_volt_new }, + .ce = { 0x00000001, gt215_ce_new }, + .disp = { 0x00000001, mcp89_disp_new }, + .dma = { 0x00000001, nv50_dma_new }, + .fifo = { 0x00000001, g84_fifo_new }, + .gr = { 0x00000001, mcp89_gr_new }, + .mspdec = { 0x00000001, gt215_mspdec_new }, + .msppp = { 0x00000001, gt215_msppp_new }, + .msvld = { 0x00000001, mcp89_msvld_new }, + .pm = { 0x00000001, gt215_pm_new }, + .sw = { 0x00000001, nv50_sw_new }, }; static const struct nvkm_device_chip nvc0_chipset = { .name = "GF100", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .ibus = gf100_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf100_pci_new, - .pmu = gf100_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .ce[1] = gf100_ce_new, - .disp = gt215_disp_new, - .dma = gf100_dma_new, - .fifo = gf100_fifo_new, - .gr = gf100_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf100_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf100_pci_new }, + .pmu = { 0x00000001, gf100_pmu_new }, + .privring = { 0x00000001, gf100_privring_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000003, gf100_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, gf100_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf100_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf100_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvc1_chipset = { .name = "GF108", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf108_fb_new, - .fuse = gf100_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .ibus = gf100_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf106_pci_new, - .pmu = gf100_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .disp = gt215_disp_new, - .dma = gf100_dma_new, - .fifo = gf100_fifo_new, - .gr = gf108_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf108_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf108_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf106_pci_new }, + .pmu = { 0x00000001, gf100_pmu_new }, + .privring = { 0x00000001, gf100_privring_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000001, gf100_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, gf100_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf108_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf108_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvc3_chipset = { .name = "GF106", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .ibus = gf100_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf106_pci_new, - .pmu = gf100_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .disp = gt215_disp_new, - .dma = gf100_dma_new, - .fifo = gf100_fifo_new, - .gr = gf104_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf100_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf106_pci_new }, + .pmu = { 0x00000001, gf100_pmu_new }, + .privring = { 0x00000001, gf100_privring_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000001, gf100_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, gf100_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf104_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf100_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvc4_chipset = { .name = "GF104", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .ibus = gf100_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf100_pci_new, - .pmu = gf100_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .ce[1] = gf100_ce_new, - .disp = gt215_disp_new, - .dma = gf100_dma_new, - .fifo = gf100_fifo_new, - .gr = gf104_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf100_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf100_pci_new }, + .pmu = { 0x00000001, gf100_pmu_new }, + .privring = { 0x00000001, gf100_privring_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000003, gf100_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, gf100_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf104_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf100_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvc8_chipset = { .name = "GF110", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .ibus = gf100_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf100_pci_new, - .pmu = gf100_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .ce[1] = gf100_ce_new, - .disp = gt215_disp_new, - .dma = gf100_dma_new, - .fifo = gf100_fifo_new, - .gr = gf110_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf100_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf100_pci_new }, + .pmu = { 0x00000001, gf100_pmu_new }, + .privring = { 0x00000001, gf100_privring_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000003, gf100_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, gf100_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf110_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf100_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvce_chipset = { .name = "GF114", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .ibus = gf100_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf100_pci_new, - .pmu = gf100_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .ce[1] = gf100_ce_new, - .disp = gt215_disp_new, - .dma = gf100_dma_new, - .fifo = gf100_fifo_new, - .gr = gf104_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf100_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf100_pci_new }, + .pmu = { 0x00000001, gf100_pmu_new }, + .privring = { 0x00000001, gf100_privring_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000003, gf100_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, gf100_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf104_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf100_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvcf_chipset = { .name = "GF116", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = g94_gpio_new, - .i2c = g94_i2c_new, - .ibus = gf100_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf106_pci_new, - .pmu = gf100_pmu_new, - .therm = gt215_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .disp = gt215_disp_new, - .dma = gf100_dma_new, - .fifo = gf100_fifo_new, - .gr = gf104_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf100_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, g94_gpio_new }, + .i2c = { 0x00000001, g94_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf106_pci_new }, + .pmu = { 0x00000001, gf100_pmu_new }, + .privring = { 0x00000001, gf100_privring_new }, + .therm = { 0x00000001, gt215_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000001, gf100_ce_new }, + .disp = { 0x00000001, gt215_disp_new }, + .dma = { 0x00000001, gf100_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf104_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf100_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvd7_chipset = { .name = "GF117", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = gf119_gpio_new, - .i2c = gf117_i2c_new, - .ibus = gf117_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf106_pci_new, - .therm = gf119_therm_new, - .timer = nv41_timer_new, - .volt = gf117_volt_new, - .ce[0] = gf100_ce_new, - .disp = gf119_disp_new, - .dma = gf119_dma_new, - .fifo = gf100_fifo_new, - .gr = gf117_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf117_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gf119_gpio_new }, + .i2c = { 0x00000001, gf117_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf106_pci_new }, + .privring = { 0x00000001, gf117_privring_new }, + .therm = { 0x00000001, gf119_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf117_volt_new }, + .ce = { 0x00000001, gf100_ce_new }, + .disp = { 0x00000001, gf119_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf117_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf117_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvd9_chipset = { .name = "GF119", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gf100_clk_new, - .devinit = gf100_devinit_new, - .fb = gf100_fb_new, - .fuse = gf100_fuse_new, - .gpio = gf119_gpio_new, - .i2c = gf119_i2c_new, - .ibus = gf117_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gf100_ltc_new, - .mc = gf100_mc_new, - .mmu = gf100_mmu_new, - .mxm = nv50_mxm_new, - .pci = gf106_pci_new, - .pmu = gf119_pmu_new, - .therm = gf119_therm_new, - .timer = nv41_timer_new, - .volt = gf100_volt_new, - .ce[0] = gf100_ce_new, - .disp = gf119_disp_new, - .dma = gf119_dma_new, - .fifo = gf100_fifo_new, - .gr = gf119_gr_new, - .mspdec = gf100_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gf100_msvld_new, - .pm = gf117_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gf100_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gf100_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gf119_gpio_new }, + .i2c = { 0x00000001, gf119_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gf100_ltc_new }, + .mc = { 0x00000001, gf100_mc_new }, + .mmu = { 0x00000001, gf100_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gf106_pci_new }, + .pmu = { 0x00000001, gf119_pmu_new }, + .privring = { 0x00000001, gf117_privring_new }, + .therm = { 0x00000001, gf119_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .volt = { 0x00000001, gf100_volt_new }, + .ce = { 0x00000001, gf100_ce_new }, + .disp = { 0x00000001, gf119_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gf100_fifo_new }, + .gr = { 0x00000001, gf119_gr_new }, + .mspdec = { 0x00000001, gf100_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gf100_msvld_new }, + .pm = { 0x00000001, gf117_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nve4_chipset = { .name = "GK104", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gf100_devinit_new, - .fb = gk104_fb_new, - .fuse = gf100_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk104_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk104_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gk104_pmu_new, - .therm = gk104_therm_new, - .timer = nv41_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gk104_ce_new, - .ce[1] = gk104_ce_new, - .ce[2] = gk104_ce_new, - .disp = gk104_disp_new, - .dma = gf119_dma_new, - .fifo = gk104_fifo_new, - .gr = gk104_gr_new, - .mspdec = gk104_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gk104_msvld_new, - .pm = gk104_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gk104_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk104_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk104_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gk104_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gk104_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gk104_ce_new }, + .disp = { 0x00000001, gk104_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk104_fifo_new }, + .gr = { 0x00000001, gk104_gr_new }, + .mspdec = { 0x00000001, gk104_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gk104_msvld_new }, + .pm = { 0x00000001, gk104_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nve6_chipset = { .name = "GK106", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gf100_devinit_new, - .fb = gk104_fb_new, - .fuse = gf100_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk104_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk104_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gk104_pmu_new, - .therm = gk104_therm_new, - .timer = nv41_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gk104_ce_new, - .ce[1] = gk104_ce_new, - .ce[2] = gk104_ce_new, - .disp = gk104_disp_new, - .dma = gf119_dma_new, - .fifo = gk104_fifo_new, - .gr = gk104_gr_new, - .mspdec = gk104_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gk104_msvld_new, - .pm = gk104_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gk104_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk104_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk104_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gk104_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gk104_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gk104_ce_new }, + .disp = { 0x00000001, gk104_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk104_fifo_new }, + .gr = { 0x00000001, gk104_gr_new }, + .mspdec = { 0x00000001, gk104_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gk104_msvld_new }, + .pm = { 0x00000001, gk104_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nve7_chipset = { .name = "GK107", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gf100_devinit_new, - .fb = gk104_fb_new, - .fuse = gf100_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk104_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk104_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gk104_pmu_new, - .therm = gk104_therm_new, - .timer = nv41_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gk104_ce_new, - .ce[1] = gk104_ce_new, - .ce[2] = gk104_ce_new, - .disp = gk104_disp_new, - .dma = gf119_dma_new, - .fifo = gk104_fifo_new, - .gr = gk104_gr_new, - .mspdec = gk104_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gk104_msvld_new, - .pm = gk104_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gk104_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk104_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk104_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gk104_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gk104_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gk104_ce_new }, + .disp = { 0x00000001, gk104_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk104_fifo_new }, + .gr = { 0x00000001, gk104_gr_new }, + .mspdec = { 0x00000001, gk104_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gk104_msvld_new }, + .pm = { 0x00000001, gk104_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvea_chipset = { .name = "GK20A", - .bar = gk20a_bar_new, - .bus = gf100_bus_new, - .clk = gk20a_clk_new, - .fb = gk20a_fb_new, - .fuse = gf100_fuse_new, - .ibus = gk20a_ibus_new, - .imem = gk20a_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk20a_mc_new, - .mmu = gk20a_mmu_new, - .pmu = gk20a_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .volt = gk20a_volt_new, - .ce[2] = gk104_ce_new, - .dma = gf119_dma_new, - .fifo = gk20a_fifo_new, - .gr = gk20a_gr_new, - .pm = gk104_pm_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gk20a_bar_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk20a_clk_new }, + .fb = { 0x00000001, gk20a_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .imem = { 0x00000001, gk20a_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gk20a_mmu_new }, + .pmu = { 0x00000001, gk20a_pmu_new }, + .privring = { 0x00000001, gk20a_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk20a_volt_new }, + .ce = { 0x00000004, gk104_ce_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk20a_fifo_new }, + .gr = { 0x00000001, gk20a_gr_new }, + .pm = { 0x00000001, gk104_pm_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvf0_chipset = { .name = "GK110", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gf100_devinit_new, - .fb = gk110_fb_new, - .fuse = gf100_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk110_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk104_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gk110_pmu_new, - .therm = gk104_therm_new, - .timer = nv41_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gk104_ce_new, - .ce[1] = gk104_ce_new, - .ce[2] = gk104_ce_new, - .disp = gk110_disp_new, - .dma = gf119_dma_new, - .fifo = gk110_fifo_new, - .gr = gk110_gr_new, - .mspdec = gk104_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gk104_msvld_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gk110_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk110_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk104_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gk110_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gk104_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gk104_ce_new }, + .disp = { 0x00000001, gk110_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk110_fifo_new }, + .gr = { 0x00000001, gk110_gr_new }, + .mspdec = { 0x00000001, gk104_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gk104_msvld_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nvf1_chipset = { .name = "GK110B", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gf100_devinit_new, - .fb = gk110_fb_new, - .fuse = gf100_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk110_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk104_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gk110_pmu_new, - .therm = gk104_therm_new, - .timer = nv41_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gk104_ce_new, - .ce[1] = gk104_ce_new, - .ce[2] = gk104_ce_new, - .disp = gk110_disp_new, - .dma = gf119_dma_new, - .fifo = gk110_fifo_new, - .gr = gk110b_gr_new, - .mspdec = gk104_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gk104_msvld_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gk110_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk110_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk104_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gk110_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gk104_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gk104_ce_new }, + .disp = { 0x00000001, gk110_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk110_fifo_new }, + .gr = { 0x00000001, gk110b_gr_new }, + .mspdec = { 0x00000001, gk104_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gk104_msvld_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv106_chipset = { .name = "GK208B", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gf100_devinit_new, - .fb = gk110_fb_new, - .fuse = gf100_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk110_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk20a_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gk208_pmu_new, - .therm = gk104_therm_new, - .timer = nv41_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gk104_ce_new, - .ce[1] = gk104_ce_new, - .ce[2] = gk104_ce_new, - .disp = gk110_disp_new, - .dma = gf119_dma_new, - .fifo = gk208_fifo_new, - .gr = gk208_gr_new, - .mspdec = gk104_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gk104_msvld_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gk110_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk110_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gk208_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gk104_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gk104_ce_new }, + .disp = { 0x00000001, gk110_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk208_fifo_new }, + .gr = { 0x00000001, gk208_gr_new }, + .mspdec = { 0x00000001, gk104_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gk104_msvld_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv108_chipset = { .name = "GK208", - .bar = gf100_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gf100_devinit_new, - .fb = gk110_fb_new, - .fuse = gf100_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk110_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gk104_ltc_new, - .mc = gk20a_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gk208_pmu_new, - .therm = gk104_therm_new, - .timer = nv41_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gk104_ce_new, - .ce[1] = gk104_ce_new, - .ce[2] = gk104_ce_new, - .disp = gk110_disp_new, - .dma = gf119_dma_new, - .fifo = gk208_fifo_new, - .gr = gk208_gr_new, - .mspdec = gk104_mspdec_new, - .msppp = gf100_msppp_new, - .msvld = gk104_msvld_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gf100_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gf100_devinit_new }, + .fb = { 0x00000001, gk110_fb_new }, + .fuse = { 0x00000001, gf100_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk110_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gk104_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gk208_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gk104_therm_new }, + .timer = { 0x00000001, nv41_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gk104_ce_new }, + .disp = { 0x00000001, gk110_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gk208_fifo_new }, + .gr = { 0x00000001, gk208_gr_new }, + .mspdec = { 0x00000001, gk104_mspdec_new }, + .msppp = { 0x00000001, gf100_msppp_new }, + .msvld = { 0x00000001, gk104_msvld_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv117_chipset = { .name = "GM107", - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gm107_devinit_new, - .fb = gm107_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk110_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gm107_ltc_new, - .mc = gk20a_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gm107_pmu_new, - .therm = gm107_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gm107_ce_new, - .ce[2] = gm107_ce_new, - .disp = gm107_disp_new, - .dma = gf119_dma_new, - .fifo = gm107_fifo_new, - .gr = gm107_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gm107_devinit_new }, + .fb = { 0x00000001, gm107_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk110_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gm107_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gm107_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gm107_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000005, gm107_ce_new }, + .disp = { 0x00000001, gm107_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gm107_fifo_new }, + .gr = { 0x00000001, gm107_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv118_chipset = { .name = "GM108", - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .clk = gk104_clk_new, - .devinit = gm107_devinit_new, - .fb = gm107_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gk110_i2c_new, - .ibus = gk104_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gm107_ltc_new, - .mc = gk20a_mc_new, - .mmu = gk104_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gm107_pmu_new, - .therm = gm107_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gm107_ce_new, - .ce[2] = gm107_ce_new, - .disp = gm107_disp_new, - .dma = gf119_dma_new, - .fifo = gm107_fifo_new, - .gr = gm107_gr_new, - .sw = gf100_sw_new, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gk104_clk_new }, + .devinit = { 0x00000001, gm107_devinit_new }, + .fb = { 0x00000001, gm107_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gk110_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gm107_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gk104_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gm107_pmu_new }, + .privring = { 0x00000001, gk104_privring_new }, + .therm = { 0x00000001, gm107_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000005, gm107_ce_new }, + .disp = { 0x00000001, gm107_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gm107_fifo_new }, + .gr = { 0x00000001, gm107_gr_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv120_chipset = { .name = "GM200", - .acr = gm200_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fb = gm200_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gm200_ltc_new, - .mc = gk20a_mc_new, - .mmu = gm200_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gm200_pmu_new, - .therm = gm200_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gm200_ce_new, - .ce[1] = gm200_ce_new, - .ce[2] = gm200_ce_new, - .disp = gm200_disp_new, - .dma = gf119_dma_new, - .fifo = gm200_fifo_new, - .gr = gm200_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .nvenc[1] = gm107_nvenc_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gm200_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fb = { 0x00000001, gm200_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gm200_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gm200_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gm200_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gm200_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gm200_ce_new }, + .disp = { 0x00000001, gm200_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gm200_fifo_new }, + .gr = { 0x00000001, gm200_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000003, gm107_nvenc_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv124_chipset = { .name = "GM204", - .acr = gm200_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fb = gm200_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gm200_ltc_new, - .mc = gk20a_mc_new, - .mmu = gm200_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gm200_pmu_new, - .therm = gm200_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gm200_ce_new, - .ce[1] = gm200_ce_new, - .ce[2] = gm200_ce_new, - .disp = gm200_disp_new, - .dma = gf119_dma_new, - .fifo = gm200_fifo_new, - .gr = gm200_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .nvenc[1] = gm107_nvenc_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gm200_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fb = { 0x00000001, gm200_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gm200_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gm200_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gm200_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gm200_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gm200_ce_new }, + .disp = { 0x00000001, gm200_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gm200_fifo_new }, + .gr = { 0x00000001, gm200_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000003, gm107_nvenc_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv126_chipset = { .name = "GM206", - .acr = gm200_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fb = gm200_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .iccsense = gf100_iccsense_new, - .imem = nv50_instmem_new, - .ltc = gm200_ltc_new, - .mc = gk20a_mc_new, - .mmu = gm200_mmu_new, - .mxm = nv50_mxm_new, - .pci = gk104_pci_new, - .pmu = gm200_pmu_new, - .therm = gm200_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .volt = gk104_volt_new, - .ce[0] = gm200_ce_new, - .ce[1] = gm200_ce_new, - .ce[2] = gm200_ce_new, - .disp = gm200_disp_new, - .dma = gf119_dma_new, - .fifo = gm200_fifo_new, - .gr = gm200_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gm200_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fb = { 0x00000001, gm200_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .iccsense = { 0x00000001, gf100_iccsense_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gm200_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gm200_mmu_new }, + .mxm = { 0x00000001, nv50_mxm_new }, + .pci = { 0x00000001, gk104_pci_new }, + .pmu = { 0x00000001, gm200_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gm200_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gk104_volt_new }, + .ce = { 0x00000007, gm200_ce_new }, + .disp = { 0x00000001, gm200_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gm200_fifo_new }, + .gr = { 0x00000001, gm200_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv12b_chipset = { .name = "GM20B", - .acr = gm20b_acr_new, - .bar = gm20b_bar_new, - .bus = gf100_bus_new, - .clk = gm20b_clk_new, - .fb = gm20b_fb_new, - .fuse = gm107_fuse_new, - .ibus = gk20a_ibus_new, - .imem = gk20a_instmem_new, - .ltc = gm200_ltc_new, - .mc = gk20a_mc_new, - .mmu = gm20b_mmu_new, - .pmu = gm20b_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[2] = gm200_ce_new, - .volt = gm20b_volt_new, - .dma = gf119_dma_new, - .fifo = gm20b_fifo_new, - .gr = gm20b_gr_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gm20b_acr_new }, + .bar = { 0x00000001, gm20b_bar_new }, + .bus = { 0x00000001, gf100_bus_new }, + .clk = { 0x00000001, gm20b_clk_new }, + .fb = { 0x00000001, gm20b_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .imem = { 0x00000001, gk20a_instmem_new }, + .ltc = { 0x00000001, gm200_ltc_new }, + .mc = { 0x00000001, gk20a_mc_new }, + .mmu = { 0x00000001, gm20b_mmu_new }, + .pmu = { 0x00000001, gm20b_pmu_new }, + .privring = { 0x00000001, gk20a_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .volt = { 0x00000001, gm20b_volt_new }, + .ce = { 0x00000004, gm200_ce_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gm20b_fifo_new }, + .gr = { 0x00000001, gm20b_gr_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv130_chipset = { .name = "GP100", - .acr = gm200_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fault = gp100_fault_new, - .fb = gp100_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp100_ltc_new, - .mc = gp100_mc_new, - .mmu = gp100_mmu_new, - .therm = gp100_therm_new, - .pci = gp100_pci_new, - .pmu = gm200_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = gp100_ce_new, - .ce[1] = gp100_ce_new, - .ce[2] = gp100_ce_new, - .ce[3] = gp100_ce_new, - .ce[4] = gp100_ce_new, - .ce[5] = gp100_ce_new, - .dma = gf119_dma_new, - .disp = gp100_disp_new, - .fifo = gp100_fifo_new, - .gr = gp100_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .nvenc[1] = gm107_nvenc_new, - .nvenc[2] = gm107_nvenc_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gm200_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fault = { 0x00000001, gp100_fault_new }, + .fb = { 0x00000001, gp100_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp100_ltc_new }, + .mc = { 0x00000001, gp100_mc_new }, + .mmu = { 0x00000001, gp100_mmu_new }, + .therm = { 0x00000001, gp100_therm_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gm200_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000003f, gp100_ce_new }, + .dma = { 0x00000001, gf119_dma_new }, + .disp = { 0x00000001, gp100_disp_new }, + .fifo = { 0x00000001, gp100_fifo_new }, + .gr = { 0x00000001, gp100_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000007, gm107_nvenc_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv132_chipset = { .name = "GP102", - .acr = gp102_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fault = gp100_fault_new, - .fb = gp102_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = gp100_mc_new, - .mmu = gp100_mmu_new, - .therm = gp100_therm_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = gp102_ce_new, - .ce[1] = gp102_ce_new, - .ce[2] = gp102_ce_new, - .ce[3] = gp102_ce_new, - .disp = gp102_disp_new, - .dma = gf119_dma_new, - .fifo = gp100_fifo_new, - .gr = gp102_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .nvenc[1] = gm107_nvenc_new, - .sec2 = gp102_sec2_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gp102_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fault = { 0x00000001, gp100_fault_new }, + .fb = { 0x00000001, gp102_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, gp100_mc_new }, + .mmu = { 0x00000001, gp100_mmu_new }, + .therm = { 0x00000001, gp100_therm_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000000f, gp102_ce_new }, + .disp = { 0x00000001, gp102_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gp100_fifo_new }, + .gr = { 0x00000001, gp102_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000003, gm107_nvenc_new }, + .sec2 = { 0x00000001, gp102_sec2_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv134_chipset = { .name = "GP104", - .acr = gp102_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fault = gp100_fault_new, - .fb = gp102_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = gp100_mc_new, - .mmu = gp100_mmu_new, - .therm = gp100_therm_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = gp102_ce_new, - .ce[1] = gp102_ce_new, - .ce[2] = gp102_ce_new, - .ce[3] = gp102_ce_new, - .disp = gp102_disp_new, - .dma = gf119_dma_new, - .fifo = gp100_fifo_new, - .gr = gp104_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .nvenc[1] = gm107_nvenc_new, - .sec2 = gp102_sec2_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gp102_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fault = { 0x00000001, gp100_fault_new }, + .fb = { 0x00000001, gp102_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, gp100_mc_new }, + .mmu = { 0x00000001, gp100_mmu_new }, + .therm = { 0x00000001, gp100_therm_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000000f, gp102_ce_new }, + .disp = { 0x00000001, gp102_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gp100_fifo_new }, + .gr = { 0x00000001, gp104_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000003, gm107_nvenc_new }, + .sec2 = { 0x00000001, gp102_sec2_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv136_chipset = { .name = "GP106", - .acr = gp102_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fault = gp100_fault_new, - .fb = gp102_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = gp100_mc_new, - .mmu = gp100_mmu_new, - .therm = gp100_therm_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = gp102_ce_new, - .ce[1] = gp102_ce_new, - .ce[2] = gp102_ce_new, - .ce[3] = gp102_ce_new, - .disp = gp102_disp_new, - .dma = gf119_dma_new, - .fifo = gp100_fifo_new, - .gr = gp104_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sec2 = gp102_sec2_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gp102_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fault = { 0x00000001, gp100_fault_new }, + .fb = { 0x00000001, gp102_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, gp100_mc_new }, + .mmu = { 0x00000001, gp100_mmu_new }, + .therm = { 0x00000001, gp100_therm_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000000f, gp102_ce_new }, + .disp = { 0x00000001, gp102_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gp100_fifo_new }, + .gr = { 0x00000001, gp104_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sec2 = { 0x00000001, gp102_sec2_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv137_chipset = { .name = "GP107", - .acr = gp102_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fault = gp100_fault_new, - .fb = gp102_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = gp100_mc_new, - .mmu = gp100_mmu_new, - .therm = gp100_therm_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = gp102_ce_new, - .ce[1] = gp102_ce_new, - .ce[2] = gp102_ce_new, - .ce[3] = gp102_ce_new, - .disp = gp102_disp_new, - .dma = gf119_dma_new, - .fifo = gp100_fifo_new, - .gr = gp107_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .nvenc[1] = gm107_nvenc_new, - .sec2 = gp102_sec2_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gp102_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fault = { 0x00000001, gp100_fault_new }, + .fb = { 0x00000001, gp102_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, gp100_mc_new }, + .mmu = { 0x00000001, gp100_mmu_new }, + .therm = { 0x00000001, gp100_therm_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000000f, gp102_ce_new }, + .disp = { 0x00000001, gp102_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gp100_fifo_new }, + .gr = { 0x00000001, gp107_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000003, gm107_nvenc_new }, + .sec2 = { 0x00000001, gp102_sec2_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv138_chipset = { .name = "GP108", - .acr = gp108_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gm200_devinit_new, - .fault = gp100_fault_new, - .fb = gp102_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = gp100_mc_new, - .mmu = gp100_mmu_new, - .therm = gp100_therm_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = gp102_ce_new, - .ce[1] = gp102_ce_new, - .ce[2] = gp102_ce_new, - .ce[3] = gp102_ce_new, - .disp = gp102_disp_new, - .dma = gf119_dma_new, - .fifo = gp100_fifo_new, - .gr = gp108_gr_new, - .nvdec[0] = gm107_nvdec_new, - .sec2 = gp108_sec2_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gp108_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gm200_devinit_new }, + .fault = { 0x00000001, gp100_fault_new }, + .fb = { 0x00000001, gp102_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, gp100_mc_new }, + .mmu = { 0x00000001, gp100_mmu_new }, + .therm = { 0x00000001, gp100_therm_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000000f, gp102_ce_new }, + .disp = { 0x00000001, gp102_disp_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gp100_fifo_new }, + .gr = { 0x00000001, gp108_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .sec2 = { 0x00000001, gp108_sec2_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv13b_chipset = { .name = "GP10B", - .acr = gp10b_acr_new, - .bar = gm20b_bar_new, - .bus = gf100_bus_new, - .fault = gp10b_fault_new, - .fb = gp10b_fb_new, - .fuse = gm107_fuse_new, - .ibus = gp10b_ibus_new, - .imem = gk20a_instmem_new, - .ltc = gp10b_ltc_new, - .mc = gp10b_mc_new, - .mmu = gp10b_mmu_new, - .pmu = gp10b_pmu_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = gp100_ce_new, - .dma = gf119_dma_new, - .fifo = gp10b_fifo_new, - .gr = gp10b_gr_new, - .sw = gf100_sw_new, + .acr = { 0x00000001, gp10b_acr_new }, + .bar = { 0x00000001, gm20b_bar_new }, + .bus = { 0x00000001, gf100_bus_new }, + .fault = { 0x00000001, gp10b_fault_new }, + .fb = { 0x00000001, gp10b_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .imem = { 0x00000001, gk20a_instmem_new }, + .ltc = { 0x00000001, gp10b_ltc_new }, + .mc = { 0x00000001, gp10b_mc_new }, + .mmu = { 0x00000001, gp10b_mmu_new }, + .pmu = { 0x00000001, gp10b_pmu_new }, + .privring = { 0x00000001, gp10b_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x00000001, gp100_ce_new }, + .dma = { 0x00000001, gf119_dma_new }, + .fifo = { 0x00000001, gp10b_fifo_new }, + .gr = { 0x00000001, gp10b_gr_new }, + .sw = { 0x00000001, gf100_sw_new }, }; static const struct nvkm_device_chip nv140_chipset = { .name = "GV100", - .acr = gp108_acr_new, - .bar = gm107_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = gv100_devinit_new, - .fault = gv100_fault_new, - .fb = gv100_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .gsp = gv100_gsp_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = gp100_mc_new, - .mmu = gv100_mmu_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .therm = gp100_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .disp = gv100_disp_new, - .ce[0] = gv100_ce_new, - .ce[1] = gv100_ce_new, - .ce[2] = gv100_ce_new, - .ce[3] = gv100_ce_new, - .ce[4] = gv100_ce_new, - .ce[5] = gv100_ce_new, - .ce[6] = gv100_ce_new, - .ce[7] = gv100_ce_new, - .ce[8] = gv100_ce_new, - .dma = gv100_dma_new, - .fifo = gv100_fifo_new, - .gr = gv100_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .nvenc[1] = gm107_nvenc_new, - .nvenc[2] = gm107_nvenc_new, - .sec2 = gp108_sec2_new, + .acr = { 0x00000001, gp108_acr_new }, + .bar = { 0x00000001, gm107_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, gv100_devinit_new }, + .fault = { 0x00000001, gv100_fault_new }, + .fb = { 0x00000001, gv100_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .gsp = { 0x00000001, gv100_gsp_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, gp100_mc_new }, + .mmu = { 0x00000001, gv100_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gp100_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x000001ff, gv100_ce_new }, + .disp = { 0x00000001, gv100_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, gv100_fifo_new }, + .gr = { 0x00000001, gv100_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000007, gm107_nvenc_new }, + .sec2 = { 0x00000001, gp108_sec2_new }, }; static const struct nvkm_device_chip nv162_chipset = { .name = "TU102", - .acr = tu102_acr_new, - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = tu102_devinit_new, - .fault = tu102_fault_new, - .fb = gv100_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .gsp = gv100_gsp_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = tu102_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .therm = gp100_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = tu102_ce_new, - .ce[1] = tu102_ce_new, - .ce[2] = tu102_ce_new, - .ce[3] = tu102_ce_new, - .ce[4] = tu102_ce_new, - .disp = tu102_disp_new, - .dma = gv100_dma_new, - .fifo = tu102_fifo_new, - .gr = tu102_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sec2 = tu102_sec2_new, + .acr = { 0x00000001, tu102_acr_new }, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, tu102_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, gv100_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .gsp = { 0x00000001, gv100_gsp_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, tu102_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gp100_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000001f, tu102_ce_new }, + .disp = { 0x00000001, tu102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, tu102_fifo_new }, + .gr = { 0x00000001, tu102_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sec2 = { 0x00000001, tu102_sec2_new }, }; static const struct nvkm_device_chip nv164_chipset = { .name = "TU104", - .acr = tu102_acr_new, - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = tu102_devinit_new, - .fault = tu102_fault_new, - .fb = gv100_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .gsp = gv100_gsp_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = tu102_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .therm = gp100_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = tu102_ce_new, - .ce[1] = tu102_ce_new, - .ce[2] = tu102_ce_new, - .ce[3] = tu102_ce_new, - .ce[4] = tu102_ce_new, - .disp = tu102_disp_new, - .dma = gv100_dma_new, - .fifo = tu102_fifo_new, - .gr = tu102_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvdec[1] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sec2 = tu102_sec2_new, + .acr = { 0x00000001, tu102_acr_new }, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, tu102_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, gv100_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .gsp = { 0x00000001, gv100_gsp_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, tu102_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gp100_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000001f, tu102_ce_new }, + .disp = { 0x00000001, tu102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, tu102_fifo_new }, + .gr = { 0x00000001, tu102_gr_new }, + .nvdec = { 0x00000003, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sec2 = { 0x00000001, tu102_sec2_new }, }; static const struct nvkm_device_chip nv166_chipset = { .name = "TU106", - .acr = tu102_acr_new, - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = tu102_devinit_new, - .fault = tu102_fault_new, - .fb = gv100_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .gsp = gv100_gsp_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = tu102_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .therm = gp100_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = tu102_ce_new, - .ce[1] = tu102_ce_new, - .ce[2] = tu102_ce_new, - .ce[3] = tu102_ce_new, - .ce[4] = tu102_ce_new, - .disp = tu102_disp_new, - .dma = gv100_dma_new, - .fifo = tu102_fifo_new, - .gr = tu102_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvdec[1] = gm107_nvdec_new, - .nvdec[2] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sec2 = tu102_sec2_new, + .acr = { 0x00000001, tu102_acr_new }, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, tu102_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, gv100_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .gsp = { 0x00000001, gv100_gsp_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, tu102_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gp100_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000001f, tu102_ce_new }, + .disp = { 0x00000001, tu102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, tu102_fifo_new }, + .gr = { 0x00000001, tu102_gr_new }, + .nvdec = { 0x00000007, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sec2 = { 0x00000001, tu102_sec2_new }, }; static const struct nvkm_device_chip nv167_chipset = { .name = "TU117", - .acr = tu102_acr_new, - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = tu102_devinit_new, - .fault = tu102_fault_new, - .fb = gv100_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .gsp = gv100_gsp_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = tu102_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .therm = gp100_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = tu102_ce_new, - .ce[1] = tu102_ce_new, - .ce[2] = tu102_ce_new, - .ce[3] = tu102_ce_new, - .ce[4] = tu102_ce_new, - .disp = tu102_disp_new, - .dma = gv100_dma_new, - .fifo = tu102_fifo_new, - .gr = tu102_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sec2 = tu102_sec2_new, + .acr = { 0x00000001, tu102_acr_new }, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, tu102_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, gv100_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .gsp = { 0x00000001, gv100_gsp_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, tu102_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gp100_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000001f, tu102_ce_new }, + .disp = { 0x00000001, tu102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, tu102_fifo_new }, + .gr = { 0x00000001, tu102_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sec2 = { 0x00000001, tu102_sec2_new }, }; static const struct nvkm_device_chip nv168_chipset = { .name = "TU116", - .acr = tu102_acr_new, - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .bus = gf100_bus_new, - .devinit = tu102_devinit_new, - .fault = tu102_fault_new, - .fb = gv100_fb_new, - .fuse = gm107_fuse_new, - .gpio = gk104_gpio_new, - .gsp = gv100_gsp_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .ltc = gp102_ltc_new, - .mc = tu102_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .pmu = gp102_pmu_new, - .therm = gp100_therm_new, - .timer = gk20a_timer_new, - .top = gk104_top_new, - .ce[0] = tu102_ce_new, - .ce[1] = tu102_ce_new, - .ce[2] = tu102_ce_new, - .ce[3] = tu102_ce_new, - .ce[4] = tu102_ce_new, - .disp = tu102_disp_new, - .dma = gv100_dma_new, - .fifo = tu102_fifo_new, - .gr = tu102_gr_new, - .nvdec[0] = gm107_nvdec_new, - .nvenc[0] = gm107_nvenc_new, - .sec2 = tu102_sec2_new, + .acr = { 0x00000001, tu102_acr_new }, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .bus = { 0x00000001, gf100_bus_new }, + .devinit = { 0x00000001, tu102_devinit_new }, + .fault = { 0x00000001, tu102_fault_new }, + .fb = { 0x00000001, gv100_fb_new }, + .fuse = { 0x00000001, gm107_fuse_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .gsp = { 0x00000001, gv100_gsp_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .ltc = { 0x00000001, gp102_ltc_new }, + .mc = { 0x00000001, tu102_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .pmu = { 0x00000001, gp102_pmu_new }, + .privring = { 0x00000001, gm200_privring_new }, + .therm = { 0x00000001, gp100_therm_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, gk104_top_new }, + .ce = { 0x0000001f, tu102_ce_new }, + .disp = { 0x00000001, tu102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, tu102_fifo_new }, + .gr = { 0x00000001, tu102_gr_new }, + .nvdec = { 0x00000001, gm107_nvdec_new }, + .nvenc = { 0x00000001, gm107_nvenc_new }, + .sec2 = { 0x00000001, tu102_sec2_new }, }; static const struct nvkm_device_chip nv170_chipset = { .name = "GA100", - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .devinit = ga100_devinit_new, - .fb = ga100_fb_new, - .gpio = gk104_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .mc = ga100_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .timer = gk20a_timer_new, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga100_fb_new }, + .gpio = { 0x00000001, gk104_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, }; static const struct nvkm_device_chip nv172_chipset = { .name = "GA102", - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .devinit = ga100_devinit_new, - .fb = ga102_fb_new, - .gpio = ga102_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .mc = ga100_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .timer = gk20a_timer_new, - .disp = ga102_disp_new, - .dma = gv100_dma_new, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, }; static const struct nvkm_device_chip nv174_chipset = { .name = "GA104", - .bar = tu102_bar_new, - .bios = nvkm_bios_new, - .devinit = ga100_devinit_new, - .fb = ga102_fb_new, - .gpio = ga102_gpio_new, - .i2c = gm200_i2c_new, - .ibus = gm200_ibus_new, - .imem = nv50_instmem_new, - .mc = ga100_mc_new, - .mmu = tu102_mmu_new, - .pci = gp100_pci_new, - .timer = gk20a_timer_new, - .disp = ga102_disp_new, - .dma = gv100_dma_new, + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, }; static int @@ -2726,97 +2643,24 @@ nvkm_device_event_func = { }; struct nvkm_subdev * -nvkm_device_subdev(struct nvkm_device *device, int index) +nvkm_device_subdev(struct nvkm_device *device, int type, int inst) { - struct nvkm_engine *engine; - - if (device->disable_mask & (1ULL << index)) - return NULL; - - switch (index) { -#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break - _(ACR , device->acr , &device->acr->subdev); - _(BAR , device->bar , &device->bar->subdev); - _(VBIOS , device->bios , &device->bios->subdev); - _(BUS , device->bus , &device->bus->subdev); - _(CLK , device->clk , &device->clk->subdev); - _(DEVINIT , device->devinit , &device->devinit->subdev); - _(FAULT , device->fault , &device->fault->subdev); - _(FB , device->fb , &device->fb->subdev); - _(FUSE , device->fuse , &device->fuse->subdev); - _(GPIO , device->gpio , &device->gpio->subdev); - _(GSP , device->gsp , &device->gsp->subdev); - _(I2C , device->i2c , &device->i2c->subdev); - _(IBUS , device->ibus , device->ibus); - _(ICCSENSE, device->iccsense, &device->iccsense->subdev); - _(INSTMEM , device->imem , &device->imem->subdev); - _(LTC , device->ltc , &device->ltc->subdev); - _(MC , device->mc , &device->mc->subdev); - _(MMU , device->mmu , &device->mmu->subdev); - _(MXM , device->mxm , device->mxm); - _(PCI , device->pci , &device->pci->subdev); - _(PMU , device->pmu , &device->pmu->subdev); - _(THERM , device->therm , &device->therm->subdev); - _(TIMER , device->timer , &device->timer->subdev); - _(TOP , device->top , &device->top->subdev); - _(VOLT , device->volt , &device->volt->subdev); -#undef _ - default: - engine = nvkm_device_engine(device, index); - if (engine) - return &engine->subdev; - break; + struct nvkm_subdev *subdev; + + list_for_each_entry(subdev, &device->subdev, head) { + if (subdev->type == type && subdev->inst == inst) + return subdev; } + return NULL; } struct nvkm_engine * -nvkm_device_engine(struct nvkm_device *device, int index) +nvkm_device_engine(struct nvkm_device *device, int type, int inst) { - if (device->disable_mask & (1ULL << index)) - return NULL; - - switch (index) { -#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break - _(BSP , device->bsp , device->bsp); - _(CE0 , device->ce[0] , device->ce[0]); - _(CE1 , device->ce[1] , device->ce[1]); - _(CE2 , device->ce[2] , device->ce[2]); - _(CE3 , device->ce[3] , device->ce[3]); - _(CE4 , device->ce[4] , device->ce[4]); - _(CE5 , device->ce[5] , device->ce[5]); - _(CE6 , device->ce[6] , device->ce[6]); - _(CE7 , device->ce[7] , device->ce[7]); - _(CE8 , device->ce[8] , device->ce[8]); - _(CIPHER , device->cipher , device->cipher); - _(DISP , device->disp , &device->disp->engine); - _(DMAOBJ , device->dma , &device->dma->engine); - _(FIFO , device->fifo , &device->fifo->engine); - _(GR , device->gr , &device->gr->engine); - _(IFB , device->ifb , device->ifb); - _(ME , device->me , device->me); - _(MPEG , device->mpeg , device->mpeg); - _(MSENC , device->msenc , device->msenc); - _(MSPDEC , device->mspdec , device->mspdec); - _(MSPPP , device->msppp , device->msppp); - _(MSVLD , device->msvld , device->msvld); - _(NVENC0 , device->nvenc[0], &device->nvenc[0]->engine); - _(NVENC1 , device->nvenc[1], &device->nvenc[1]->engine); - _(NVENC2 , device->nvenc[2], &device->nvenc[2]->engine); - _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine); - _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine); - _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine); - _(PM , device->pm , &device->pm->engine); - _(SEC , device->sec , device->sec); - _(SEC2 , device->sec2 , &device->sec2->engine); - _(SW , device->sw , &device->sw->engine); - _(VIC , device->vic , device->vic); - _(VP , device->vp , device->vp); -#undef _ - default: - WARN_ON(1); - break; - } + struct nvkm_subdev *subdev = nvkm_device_subdev(device, type, inst); + if (subdev && subdev->func == &nvkm_engine) + return container_of(subdev, struct nvkm_engine, subdev); return NULL; } @@ -2825,7 +2669,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend) { const char *action = suspend ? "suspend" : "fini"; struct nvkm_subdev *subdev; - int ret, i; + int ret; s64 time; nvdev_trace(device, "%s running...\n", action); @@ -2833,12 +2677,10 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend) nvkm_acpi_fini(device); - for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) { - if ((subdev = nvkm_device_subdev(device, i))) { - ret = nvkm_subdev_fini(subdev, suspend); - if (ret && suspend) - goto fail; - } + list_for_each_entry_reverse(subdev, &device->subdev, head) { + ret = nvkm_subdev_fini(subdev, suspend); + if (ret && suspend) + goto fail; } nvkm_therm_clkgate_fini(device->therm, suspend); @@ -2851,13 +2693,11 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend) return 0; fail: - do { - if ((subdev = nvkm_device_subdev(device, i))) { - int rret = nvkm_subdev_init(subdev); - if (rret) - nvkm_fatal(subdev, "failed restart, %d\n", ret); - } - } while (++i < NVKM_SUBDEV_NR); + list_for_each_entry_from(subdev, &device->subdev, head) { + int rret = nvkm_subdev_init(subdev); + if (rret) + nvkm_fatal(subdev, "failed restart, %d\n", ret); + } nvdev_trace(device, "%s failed with %d\n", action, ret); return ret; @@ -2867,7 +2707,7 @@ static int nvkm_device_preinit(struct nvkm_device *device) { struct nvkm_subdev *subdev; - int ret, i; + int ret; s64 time; nvdev_trace(device, "preinit running...\n"); @@ -2879,15 +2719,13 @@ nvkm_device_preinit(struct nvkm_device *device) goto fail; } - for (i = 0; i < NVKM_SUBDEV_NR; i++) { - if ((subdev = nvkm_device_subdev(device, i))) { - ret = nvkm_subdev_preinit(subdev); - if (ret) - goto fail; - } + list_for_each_entry(subdev, &device->subdev, head) { + ret = nvkm_subdev_preinit(subdev); + if (ret) + goto fail; } - ret = nvkm_devinit_post(device->devinit, &device->disable_mask); + ret = nvkm_devinit_post(device->devinit); if (ret) goto fail; @@ -2904,7 +2742,7 @@ int nvkm_device_init(struct nvkm_device *device) { struct nvkm_subdev *subdev; - int ret, i; + int ret; s64 time; ret = nvkm_device_preinit(device); @@ -2922,12 +2760,10 @@ nvkm_device_init(struct nvkm_device *device) goto fail; } - for (i = 0; i < NVKM_SUBDEV_NR; i++) { - if ((subdev = nvkm_device_subdev(device, i))) { - ret = nvkm_subdev_init(subdev); - if (ret) - goto fail_subdev; - } + list_for_each_entry(subdev, &device->subdev, head) { + ret = nvkm_subdev_init(subdev); + if (ret) + goto fail_subdev; } nvkm_acpi_init(device); @@ -2938,11 +2774,8 @@ nvkm_device_init(struct nvkm_device *device) return 0; fail_subdev: - do { - if ((subdev = nvkm_device_subdev(device, i))) - nvkm_subdev_fini(subdev, false); - } while (--i >= 0); - + list_for_each_entry_from(subdev, &device->subdev, head) + nvkm_subdev_fini(subdev, false); fail: nvkm_device_fini(device, false); @@ -2954,15 +2787,12 @@ void nvkm_device_del(struct nvkm_device **pdevice) { struct nvkm_device *device = *pdevice; - int i; + struct nvkm_subdev *subdev, *subtmp; if (device) { mutex_lock(&nv_devices_mutex); - device->disable_mask = 0; - for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) { - struct nvkm_subdev *subdev = - nvkm_device_subdev(device, i); + + list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head) nvkm_subdev_del(&subdev); - } nvkm_event_fini(&device->event); @@ -3021,7 +2851,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, struct nvkm_subdev *subdev; u64 mmio_base, mmio_size; u32 boot0, boot1, strap; - int ret = -EEXIST, i; + int ret = -EEXIST, j; unsigned chipset; mutex_lock(&nv_devices_mutex); @@ -3038,6 +2868,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, device->name = name; list_add_tail(&device->head, &nv_devices); device->debug = nvkm_dbgopt(device->dbgopt, "device"); + INIT_LIST_HEAD(&device->subdev); ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event); if (ret) @@ -3271,88 +3102,46 @@ nvkm_device_ctor(const struct nvkm_device_func *func, mutex_init(&device->mutex); - for (i = 0; i < NVKM_SUBDEV_NR; i++) { -#define _(s,m) case s: \ - if (device->chip->m && (subdev_mask & (1ULL << (s)))) { \ - ret = device->chip->m(device, (s), &device->m); \ - if (ret) { \ - subdev = nvkm_device_subdev(device, (s)); \ - nvkm_subdev_del(&subdev); \ - device->m = NULL; \ - if (ret != -ENODEV) { \ - nvdev_error(device, "%s ctor failed, %d\n", \ - nvkm_subdev_name[s], ret); \ - goto done; \ - } \ - } \ - } \ - break - switch (i) { - _(NVKM_SUBDEV_ACR , acr); - _(NVKM_SUBDEV_BAR , bar); - _(NVKM_SUBDEV_VBIOS , bios); - _(NVKM_SUBDEV_BUS , bus); - _(NVKM_SUBDEV_CLK , clk); - _(NVKM_SUBDEV_DEVINIT , devinit); - _(NVKM_SUBDEV_FAULT , fault); - _(NVKM_SUBDEV_FB , fb); - _(NVKM_SUBDEV_FUSE , fuse); - _(NVKM_SUBDEV_GPIO , gpio); - _(NVKM_SUBDEV_GSP , gsp); - _(NVKM_SUBDEV_I2C , i2c); - _(NVKM_SUBDEV_IBUS , ibus); - _(NVKM_SUBDEV_ICCSENSE, iccsense); - _(NVKM_SUBDEV_INSTMEM , imem); - _(NVKM_SUBDEV_LTC , ltc); - _(NVKM_SUBDEV_MC , mc); - _(NVKM_SUBDEV_MMU , mmu); - _(NVKM_SUBDEV_MXM , mxm); - _(NVKM_SUBDEV_PCI , pci); - _(NVKM_SUBDEV_PMU , pmu); - _(NVKM_SUBDEV_THERM , therm); - _(NVKM_SUBDEV_TIMER , timer); - _(NVKM_SUBDEV_TOP , top); - _(NVKM_SUBDEV_VOLT , volt); - _(NVKM_ENGINE_BSP , bsp); - _(NVKM_ENGINE_CE0 , ce[0]); - _(NVKM_ENGINE_CE1 , ce[1]); - _(NVKM_ENGINE_CE2 , ce[2]); - _(NVKM_ENGINE_CE3 , ce[3]); - _(NVKM_ENGINE_CE4 , ce[4]); - _(NVKM_ENGINE_CE5 , ce[5]); - _(NVKM_ENGINE_CE6 , ce[6]); - _(NVKM_ENGINE_CE7 , ce[7]); - _(NVKM_ENGINE_CE8 , ce[8]); - _(NVKM_ENGINE_CIPHER , cipher); - _(NVKM_ENGINE_DISP , disp); - _(NVKM_ENGINE_DMAOBJ , dma); - _(NVKM_ENGINE_FIFO , fifo); - _(NVKM_ENGINE_GR , gr); - _(NVKM_ENGINE_IFB , ifb); - _(NVKM_ENGINE_ME , me); - _(NVKM_ENGINE_MPEG , mpeg); - _(NVKM_ENGINE_MSENC , msenc); - _(NVKM_ENGINE_MSPDEC , mspdec); - _(NVKM_ENGINE_MSPPP , msppp); - _(NVKM_ENGINE_MSVLD , msvld); - _(NVKM_ENGINE_NVENC0 , nvenc[0]); - _(NVKM_ENGINE_NVENC1 , nvenc[1]); - _(NVKM_ENGINE_NVENC2 , nvenc[2]); - _(NVKM_ENGINE_NVDEC0 , nvdec[0]); - _(NVKM_ENGINE_NVDEC1 , nvdec[1]); - _(NVKM_ENGINE_NVDEC2 , nvdec[2]); - _(NVKM_ENGINE_PM , pm); - _(NVKM_ENGINE_SEC , sec); - _(NVKM_ENGINE_SEC2 , sec2); - _(NVKM_ENGINE_SW , sw); - _(NVKM_ENGINE_VIC , vic); - _(NVKM_ENGINE_VP , vp); - default: - WARN_ON(1); - continue; - } -#undef _ +#define NVKM_LAYOUT_ONCE(type,data,ptr) \ + if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \ + WARN_ON(device->chip->ptr.inst != 0x00000001); \ + ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr); \ + subdev = nvkm_device_subdev(device, (type), 0); \ + if (ret) { \ + nvkm_subdev_del(&subdev); \ + device->ptr = NULL; \ + if (ret != -ENODEV) { \ + nvdev_error(device, "%s ctor failed: %d\n", \ + nvkm_subdev_type[(type)], ret); \ + goto done; \ + } \ + } else { \ + subdev->pself = (void **)&device->ptr; \ + } \ + } +#define NVKM_LAYOUT_INST(type,data,ptr,cnt) \ + WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \ + for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \ + if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \ + int inst = (device->chip->ptr.inst == 1) ? -1 : (j); \ + ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \ + subdev = nvkm_device_subdev(device, (type), (j)); \ + if (ret) { \ + nvkm_subdev_del(&subdev); \ + device->ptr[j] = NULL; \ + if (ret != -ENODEV) { \ + nvdev_error(device, "%s%d ctor failed: %d\n", \ + nvkm_subdev_type[(type)], (j), ret); \ + goto done; \ + } \ + } else { \ + subdev->pself = (void **)&device->ptr[j]; \ + } \ + } \ } +#include <core/layout.h> +#undef NVKM_LAYOUT_INST +#undef NVKM_LAYOUT_ONCE ret = 0; done: diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h index 54eab5e04230..93949b3c7214 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h @@ -15,7 +15,6 @@ #include <subdev/gpio.h> #include <subdev/gsp.h> #include <subdev/i2c.h> -#include <subdev/ibus.h> #include <subdev/iccsense.h> #include <subdev/instmem.h> #include <subdev/ltc.h> @@ -24,6 +23,7 @@ #include <subdev/mxm.h> #include <subdev/pci.h> #include <subdev/pmu.h> +#include <subdev/privring.h> #include <subdev/therm.h> #include <subdev/timer.h> #include <subdev/top.h> diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 147894798786..fea9d8f2b10c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -43,15 +43,15 @@ static int nvkm_udevice_info_subdev(struct nvkm_device *device, u64 mthd, u64 *data) { struct nvkm_subdev *subdev; - enum nvkm_devidx subidx; + enum nvkm_subdev_type type; switch (mthd & NV_DEVICE_INFO_UNIT) { - case NV_DEVICE_FIFO(0): subidx = NVKM_ENGINE_FIFO; break; + case NV_DEVICE_HOST(0): type = NVKM_ENGINE_FIFO; break; default: return -EINVAL; } - subdev = nvkm_device_subdev(device, subidx); + subdev = nvkm_device_subdev(device, type, 0); if (subdev) return nvkm_subdev_info(subdev, mthd, data); return -ENODEV; @@ -66,37 +66,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device, args->mthd = NV_DEVICE_INFO_INVALID; return; } - - switch (args->mthd) { -#define ENGINE__(A,B,C) NV_DEVICE_INFO_ENGINE_##A: { int _i; \ - for (_i = (B), args->data = 0ULL; _i <= (C); _i++) { \ - if (nvkm_device_engine(device, _i)) \ - args->data |= BIT_ULL(_i); \ - } \ -} -#define ENGINE_A(A) ENGINE__(A, NVKM_ENGINE_##A , NVKM_ENGINE_##A) -#define ENGINE_B(A) ENGINE__(A, NVKM_ENGINE_##A##0, NVKM_ENGINE_##A##_LAST) - case ENGINE_A(SW ); break; - case ENGINE_A(GR ); break; - case ENGINE_A(MPEG ); break; - case ENGINE_A(ME ); break; - case ENGINE_A(CIPHER); break; - case ENGINE_A(BSP ); break; - case ENGINE_A(VP ); break; - case ENGINE_B(CE ); break; - case ENGINE_A(SEC ); break; - case ENGINE_A(MSVLD ); break; - case ENGINE_A(MSPDEC); break; - case ENGINE_A(MSPPP ); break; - case ENGINE_A(MSENC ); break; - case ENGINE_A(VIC ); break; - case ENGINE_A(SEC2 ); break; - case ENGINE_B(NVDEC ); break; - case ENGINE_B(NVENC ); break; - default: - args->mthd = NV_DEVICE_INFO_INVALID; - break; - } + args->mthd = NV_DEVICE_INFO_INVALID; } static int @@ -357,7 +327,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index, int i; for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) { - if (!(engine = nvkm_device_engine(device, i)) || + if (!(engine = nvkm_device_engine(device, i, 0)) || !(engine->func->base.sclass)) continue; oclass->engine = engine; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index cbd33e87b799..5daa77755276 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -149,10 +149,10 @@ static void nvkm_disp_class_del(struct nvkm_oproxy *oproxy) { struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine); - mutex_lock(&disp->engine.subdev.mutex); - if (disp->client == oproxy) - disp->client = NULL; - mutex_unlock(&disp->engine.subdev.mutex); + spin_lock(&disp->client.lock); + if (disp->client.object == oproxy) + disp->client.object = NULL; + spin_unlock(&disp->client.lock); } static const struct nvkm_oproxy_func @@ -175,13 +175,13 @@ nvkm_disp_class_new(struct nvkm_device *device, return ret; *pobject = &oproxy->base; - mutex_lock(&disp->engine.subdev.mutex); - if (disp->client) { - mutex_unlock(&disp->engine.subdev.mutex); + spin_lock(&disp->client.lock); + if (disp->client.object) { + spin_unlock(&disp->client.lock); return -EBUSY; } - disp->client = oproxy; - mutex_unlock(&disp->engine.subdev.mutex); + disp->client.object = oproxy; + spin_unlock(&disp->client.lock); return sclass->ctor(disp, oclass, data, size, &oproxy->object); } @@ -473,21 +473,22 @@ nvkm_disp = { int nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device, - int index, struct nvkm_disp *disp) + enum nvkm_subdev_type type, int inst, struct nvkm_disp *disp) { disp->func = func; INIT_LIST_HEAD(&disp->head); INIT_LIST_HEAD(&disp->ior); INIT_LIST_HEAD(&disp->outp); INIT_LIST_HEAD(&disp->conn); - return nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine); + spin_lock_init(&disp->client.lock); + return nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine); } int nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device, - int index, struct nvkm_disp **pdisp) + enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) { if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL))) return -ENOMEM; - return nvkm_disp_ctor(func, device, index, *pdisp); + return nvkm_disp_ctor(func, device, type, inst, *pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c index 50e3539f33d2..a7a7eb041515 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c @@ -278,7 +278,7 @@ nv50_disp_chan_child_get(struct nvkm_object *object, int index, const struct nvkm_device_oclass *oclass = NULL; if (chan->func->bind) - sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ); + sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ, 0); else sclass->engine = NULL; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c index 731f188fc1ee..156bbe8b2de3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c @@ -41,7 +41,8 @@ g84_disp = { }; int -g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +g84_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&g84_disp, device, index, pdisp); + return nv50_disp_new_(&g84_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c index def54fe1951e..3425b5d3bc72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c @@ -41,7 +41,8 @@ g94_disp = { }; int -g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +g94_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&g94_disp, device, index, pdisp); + return nv50_disp_new_(&g94_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c index aa2e5645fe36..68aa52588d92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c @@ -40,7 +40,8 @@ ga102_disp = { }; int -ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&ga102_disp, device, index, pdisp); + return nv50_disp_new_(&ga102_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index e675d9b9d5d7..a6bafe7fea1f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c @@ -266,7 +266,8 @@ gf119_disp = { }; int -gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gf119_disp, device, index, pdisp); + return nv50_disp_new_(&gf119_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c index 4c3439b1a62d..3b79cf233ac5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c @@ -41,7 +41,8 @@ gk104_disp = { }; int -gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gk104_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gk104_disp, device, index, pdisp); + return nv50_disp_new_(&gk104_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c index bc6f4750c942..988eb12237a6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c @@ -41,7 +41,8 @@ gk110_disp = { }; int -gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gk110_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gk110_disp, device, index, pdisp); + return nv50_disp_new_(&gk110_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index 031cf6b03a76..5d8108feeacd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c @@ -41,7 +41,8 @@ gm107_disp = { }; int -gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gm107_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gm107_disp, device, index, pdisp); + return nv50_disp_new_(&gm107_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c index ec9c33a5162d..f7bb66087476 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c @@ -41,7 +41,8 @@ gm200_disp = { }; int -gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gm200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gm200_disp, device, index, pdisp); + return nv50_disp_new_(&gm200_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c index 8471de3f3b61..af0ca812a394 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c @@ -40,7 +40,8 @@ gp100_disp = { }; int -gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gp100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gp100_disp, device, index, pdisp); + return nv50_disp_new_(&gp100_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c index a3779c5046ea..065fea1bdfd1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c @@ -67,7 +67,8 @@ gp102_disp = { }; int -gp102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gp102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gp102_disp, device, index, pdisp); + return nv50_disp_new_(&gp102_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c index f80183701f44..22bc269df64a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c @@ -41,7 +41,8 @@ gt200_disp = { }; int -gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gt200_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(>200_disp, device, index, pdisp); + return nv50_disp_new_(>200_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c index 7581efc1357e..63a912b174d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c @@ -41,7 +41,8 @@ gt215_disp = { }; int -gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gt215_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(>215_disp, device, index, pdisp); + return nv50_disp_new_(>215_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c index c1032527f791..53879d5271cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c @@ -441,7 +441,8 @@ gv100_disp = { }; int -gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +gv100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&gv100_disp, device, index, pdisp); + return nv50_disp_new_(&gv100_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c index cfdce23ab83a..762a59f24bbb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c @@ -39,7 +39,8 @@ mcp77_disp = { }; int -mcp77_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +mcp77_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&mcp77_disp, device, index, pdisp); + return nv50_disp_new_(&mcp77_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c index 85d9329cfa0e..e5c58aae15de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c @@ -39,7 +39,8 @@ mcp89_disp = { }; int -mcp89_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +mcp89_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&mcp89_disp, device, index, pdisp); + return nv50_disp_new_(&mcp89_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c index b780ba1a3bc7..a12097db2c2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c @@ -64,11 +64,12 @@ nv04_disp = { }; int -nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +nv04_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { int ret, i; - ret = nvkm_disp_new_(&nv04_disp, device, index, pdisp); + ret = nvkm_disp_new_(&nv04_disp, device, type, inst, pdisp); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index e21556bf2cb1..3f20e49070ce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c @@ -154,7 +154,7 @@ nv50_disp_ = { int nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device, - int index, struct nvkm_disp **pdisp) + enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp) { struct nv50_disp *disp; int ret; @@ -164,7 +164,7 @@ nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device, disp->func = func; *pdisp = &disp->base; - ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base); + ret = nvkm_disp_ctor(&nv50_disp_, device, type, inst, &disp->base); if (ret) return ret; @@ -769,7 +769,8 @@ nv50_disp = { }; int -nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&nv50_disp, device, index, pdisp); + return nv50_disp_new_(&nv50_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index db31b37752a2..025cacd7c3b0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h @@ -47,8 +47,8 @@ void nv50_disp_super_2_1(struct nv50_disp *, struct nvkm_head *); void nv50_disp_super_2_2(struct nv50_disp *, struct nvkm_head *); void nv50_disp_super_3_0(struct nv50_disp *, struct nvkm_head *); -int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *, - int index, struct nvkm_disp **); +int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_disp **); struct nv50_disp_func { int (*init)(struct nv50_disp *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h index f815a5342880..ec57d8b6bce9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h @@ -4,10 +4,10 @@ #include <engine/disp.h> #include "outp.h" -int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, - int index, struct nvkm_disp *); -int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, - int index, struct nvkm_disp **); +int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_disp *); +int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_disp **); void nvkm_disp_vblank(struct nvkm_disp *, int head); struct nvkm_disp_func { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c index 4c85d1d4fbd4..f5f8dc8e8f35 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c @@ -146,7 +146,8 @@ tu102_disp = { }; int -tu102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_disp **pdisp) { - return nv50_disp_new_(&tu102_disp, device, index, pdisp); + return nv50_disp_new_(&tu102_disp, device, type, inst, pdisp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c index 11b7b8fd5dda..425cde35f128 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c @@ -104,7 +104,7 @@ nvkm_dma = { int nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device, - int index, struct nvkm_dma **pdma) + enum nvkm_subdev_type type, int inst, struct nvkm_dma **pdma) { struct nvkm_dma *dma; @@ -112,5 +112,5 @@ nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device, return -ENOMEM; dma->func = func; - return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine); + return nvkm_engine_ctor(&nvkm_dma, device, type, inst, true, &dma->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c index efec5d322179..99a1e07fa204 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c @@ -30,7 +30,8 @@ gf100_dma = { }; int -gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma) +gf100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_dma **pdma) { - return nvkm_dma_new_(&gf100_dma, device, index, pdma); + return nvkm_dma_new_(&gf100_dma, device, type, inst, pdma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c index 34c766039aed..fd1d1fc22dc6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c @@ -30,7 +30,8 @@ gf119_dma = { }; int -gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma) +gf119_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_dma **pdma) { - return nvkm_dma_new_(&gf119_dma, device, index, pdma); + return nvkm_dma_new_(&gf119_dma, device, type, inst, pdma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c index c65a4c2ea93d..a5af0df30663 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gv100.c @@ -28,7 +28,8 @@ gv100_dma = { }; int -gv100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma) +gv100_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_dma **pdma) { - return nvkm_dma_new_(&gv100_dma, device, index, pdma); + return nvkm_dma_new_(&gv100_dma, device, type, inst, pdma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c index 30747a0ce488..ea5a889f60c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c @@ -30,7 +30,8 @@ nv04_dma = { }; int -nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma) +nv04_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_dma **pdma) { - return nvkm_dma_new_(&nv04_dma, device, index, pdma); + return nvkm_dma_new_(&nv04_dma, device, type, inst, pdma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c index 77aca7b71c83..6e8f79660014 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c @@ -30,7 +30,8 @@ nv50_dma = { }; int -nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma) +nv50_dma_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_dma **pdma) { - return nvkm_dma_new_(&nv50_dma, device, index, pdma); + return nvkm_dma_new_(&nv50_dma, device, type, inst, pdma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h index 0c9d9640a59d..d403bedb485a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h @@ -9,8 +9,8 @@ struct nvkm_dmaobj_func { struct nvkm_gpuobj **); }; -int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *, - int index, struct nvkm_dma **); +int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_dma **); struct nvkm_dma_func { int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c index 8675613e142b..43b7dec45179 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c @@ -108,7 +108,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) } } - if (nvkm_mc_enabled(device, engine->subdev.index)) { + if (nvkm_mc_enabled(device, engine->subdev.type, engine->subdev.inst)) { nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); nvkm_wr32(device, base + 0x014, 0xffffffff); } @@ -335,9 +335,9 @@ nvkm_falcon = { }; int -nvkm_falcon_new_(const struct nvkm_falcon_func *func, - struct nvkm_device *device, int index, bool enable, - u32 addr, struct nvkm_engine **pengine) +nvkm_falcon_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool enable, u32 addr, + struct nvkm_engine **pengine) { struct nvkm_falcon *falcon; @@ -351,6 +351,5 @@ nvkm_falcon_new_(const struct nvkm_falcon_func *func, falcon->data.size = func->data.size; *pengine = &falcon->engine; - return nvkm_engine_ctor(&nvkm_falcon, device, index, - enable, &falcon->engine); + return nvkm_engine_ctor(&nvkm_falcon, device, type, inst, enable, &falcon->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index c773caf21f6b..2ed4ff05d207 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c @@ -292,7 +292,7 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data) { struct nvkm_fifo *fifo = nvkm_fifo(engine); switch (mthd) { - case NV_DEVICE_FIFO_CHANNELS: *data = fifo->nr; return 0; + case NV_DEVICE_HOST_CHANNELS: *data = fifo->nr; return 0; default: if (fifo->func->info) return fifo->func->info(fifo, mthd, data); @@ -313,7 +313,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine) static void nvkm_fifo_preinit(struct nvkm_engine *engine) { - nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO); + nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0); } static int @@ -334,6 +334,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine) nvkm_event_fini(&fifo->kevent); nvkm_event_fini(&fifo->cevent); nvkm_event_fini(&fifo->uevent); + mutex_destroy(&fifo->mutex); return data; } @@ -351,13 +352,14 @@ nvkm_fifo = { int nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device, - int index, int nr, struct nvkm_fifo *fifo) + enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo *fifo) { int ret; fifo->func = func; INIT_LIST_HEAD(&fifo->chan); spin_lock_init(&fifo->lock); + mutex_init(&fifo->mutex); if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR)) fifo->nr = NVKM_FIFO_CHID_NR; @@ -365,7 +367,7 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device, fifo->nr = nr; bitmap_clear(fifo->mask, 0, fifo->nr); - ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine); + ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index d83485385934..8d957643940a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c @@ -35,6 +35,15 @@ struct nvkm_fifo_chan_object { int hash; }; +static struct nvkm_fifo_engn * +nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine) +{ + int engi = chan->fifo->func->engine_id(chan->fifo, engine); + if (engi >= 0) + return &chan->engn[engi]; + return NULL; +} + static int nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend) { @@ -42,8 +51,8 @@ nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend) container_of(base, typeof(*object), oproxy); struct nvkm_engine *engine = object->oproxy.object->engine; struct nvkm_fifo_chan *chan = object->chan; - struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; - const char *name = nvkm_subdev_name[engine->subdev.index]; + struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine); + const char *name = engine->subdev.name; int ret = 0; if (--engn->usecount) @@ -75,8 +84,8 @@ nvkm_fifo_chan_child_init(struct nvkm_oproxy *base) container_of(base, typeof(*object), oproxy); struct nvkm_engine *engine = object->oproxy.object->engine; struct nvkm_fifo_chan *chan = object->chan; - struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; - const char *name = nvkm_subdev_name[engine->subdev.index]; + struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine); + const char *name = engine->subdev.name; int ret; if (engn->usecount++) @@ -108,7 +117,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base) container_of(base, typeof(*object), oproxy); struct nvkm_engine *engine = object->oproxy.base.engine; struct nvkm_fifo_chan *chan = object->chan; - struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; + struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine); if (chan->func->object_dtor) chan->func->object_dtor(chan, object->hash); @@ -118,7 +127,7 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base) chan->func->engine_dtor(chan, engine); nvkm_object_del(&engn->object); if (chan->vmm) - atomic_dec(&chan->vmm->engref[engine->subdev.index]); + atomic_dec(&chan->vmm->engref[engine->subdev.type]); } } @@ -135,7 +144,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size, { struct nvkm_engine *engine = oclass->engine; struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent); - struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index]; + struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine); struct nvkm_fifo_chan_object *object; int ret = 0; @@ -152,7 +161,7 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size, }; if (chan->vmm) - atomic_inc(&chan->vmm->engref[engine->subdev.index]); + atomic_inc(&chan->vmm->engref[engine->subdev.type]); if (engine->func->fifo.cclass) { ret = engine->func->fifo.cclass(chan, &cclass, @@ -203,13 +212,12 @@ nvkm_fifo_chan_child_get(struct nvkm_object *object, int index, { struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); struct nvkm_fifo *fifo = chan->fifo; - struct nvkm_device *device = fifo->engine.subdev.device; struct nvkm_engine *engine; - u64 mask = chan->engines; - int ret, i, c; + u32 engm = chan->engm; + int engi, ret, c; - for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) { - if (!(engine = nvkm_device_engine(device, i))) + for (; c = 0, engi = __ffs(engm), engm; engm &= ~(1ULL << engi)) { + if (!(engine = fifo->func->id_engine(fifo, engi))) continue; oclass->engine = engine; oclass->base.oclass = 0; @@ -352,7 +360,7 @@ nvkm_fifo_chan_func = { int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func, struct nvkm_fifo *fifo, u32 size, u32 align, bool zero, - u64 hvmm, u64 push, u64 engines, int bar, u32 base, + u64 hvmm, u64 push, u32 engm, int bar, u32 base, u32 user, const struct nvkm_oclass *oclass, struct nvkm_fifo_chan *chan) { @@ -365,7 +373,7 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func, nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object); chan->func = func; chan->fifo = fifo; - chan->engines = engines; + chan->engm = engm; INIT_LIST_HEAD(&chan->head); /* instance memory */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h index 177e10562600..e53504354841 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h @@ -22,7 +22,7 @@ struct nvkm_fifo_chan_func { int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *, u32 size, u32 align, bool zero, u64 vm, u64 push, - u64 engines, int bar, u32 base, u32 user, + u32 engm, int bar, u32 base, u32 user, const struct nvkm_oclass *, struct nvkm_fifo_chan *); struct nvkm_fifo_chan_oclass { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c index a5c998fe4485..353b77d9b3dc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c @@ -45,29 +45,9 @@ g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, } static int -g84_fifo_chan_engine(struct nvkm_engine *engine) -{ - switch (engine->subdev.index) { - case NVKM_ENGINE_GR : return 0; - case NVKM_ENGINE_MPEG : - case NVKM_ENGINE_MSPPP : return 1; - case NVKM_ENGINE_CE0 : return 2; - case NVKM_ENGINE_VP : - case NVKM_ENGINE_MSPDEC: return 3; - case NVKM_ENGINE_CIPHER: - case NVKM_ENGINE_SEC : return 4; - case NVKM_ENGINE_BSP : - case NVKM_ENGINE_MSVLD : return 5; - default: - WARN_ON(1); - return 0; - } -} - -static int g84_fifo_chan_engine_addr(struct nvkm_engine *engine) { - switch (engine->subdev.index) { + switch (engine->subdev.type) { case NVKM_ENGINE_DMAOBJ: case NVKM_ENGINE_SW : return -1; case NVKM_ENGINE_GR : return 0x0020; @@ -79,7 +59,7 @@ g84_fifo_chan_engine_addr(struct nvkm_engine *engine) case NVKM_ENGINE_MSVLD : return 0x0080; case NVKM_ENGINE_CIPHER: case NVKM_ENGINE_SEC : return 0x00a0; - case NVKM_ENGINE_CE0 : return 0x00c0; + case NVKM_ENGINE_CE : return 0x00c0; default: WARN_ON(1); return -1; @@ -102,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, if (offset < 0) return 0; - engn = g84_fifo_chan_engine(engine); + engn = fifo->base.func->engine_id(&fifo->base, engine); save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn); nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); done = nvkm_msec(device, 2000, @@ -134,7 +114,7 @@ g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct nv50_fifo_chan *chan = nv50_fifo_chan(base); - struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index]; + struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine); u64 limit, start; int offset; @@ -162,12 +142,11 @@ g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base, struct nvkm_object *object) { struct nv50_fifo_chan *chan = nv50_fifo_chan(base); - int engn = engine->subdev.index; if (g84_fifo_chan_engine_addr(engine) < 0) return 0; - return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); + return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine)); } static int @@ -178,14 +157,14 @@ g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base, u32 handle = object->handle; u32 context; - switch (object->engine->subdev.index) { + switch (object->engine->subdev.type) { case NVKM_ENGINE_DMAOBJ: case NVKM_ENGINE_SW : context = 0x00000000; break; case NVKM_ENGINE_GR : context = 0x00100000; break; case NVKM_ENGINE_MPEG : case NVKM_ENGINE_MSPPP : context = 0x00200000; break; case NVKM_ENGINE_ME : - case NVKM_ENGINE_CE0 : context = 0x00300000; break; + case NVKM_ENGINE_CE : context = 0x00300000; break; case NVKM_ENGINE_VP : case NVKM_ENGINE_MSPDEC: context = 0x00400000; break; case NVKM_ENGINE_CIPHER: @@ -241,20 +220,20 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push, ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base, 0x10000, 0x1000, false, vmm, push, - (1ULL << NVKM_ENGINE_BSP) | - (1ULL << NVKM_ENGINE_CE0) | - (1ULL << NVKM_ENGINE_CIPHER) | - (1ULL << NVKM_ENGINE_DMAOBJ) | - (1ULL << NVKM_ENGINE_GR) | - (1ULL << NVKM_ENGINE_ME) | - (1ULL << NVKM_ENGINE_MPEG) | - (1ULL << NVKM_ENGINE_MSPDEC) | - (1ULL << NVKM_ENGINE_MSPPP) | - (1ULL << NVKM_ENGINE_MSVLD) | - (1ULL << NVKM_ENGINE_SEC) | - (1ULL << NVKM_ENGINE_SW) | - (1ULL << NVKM_ENGINE_VIC) | - (1ULL << NVKM_ENGINE_VP), + BIT(G84_FIFO_ENGN_SW) | + BIT(G84_FIFO_ENGN_GR) | + BIT(G84_FIFO_ENGN_MPEG) | + BIT(G84_FIFO_ENGN_MSPPP) | + BIT(G84_FIFO_ENGN_ME) | + BIT(G84_FIFO_ENGN_CE0) | + BIT(G84_FIFO_ENGN_VP) | + BIT(G84_FIFO_ENGN_MSPDEC) | + BIT(G84_FIFO_ENGN_CIPHER) | + BIT(G84_FIFO_ENGN_SEC) | + BIT(G84_FIFO_ENGN_VIC) | + BIT(G84_FIFO_ENGN_BSP) | + BIT(G84_FIFO_ENGN_MSVLD) | + BIT(G84_FIFO_ENGN_DMA), 0, 0xc00000, 0x2000, oclass, &chan->base); chan->fifo = fifo; if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h index 7c125a15f963..f7ac1061fa84 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h @@ -12,10 +12,17 @@ struct gf100_fifo_chan { struct list_head head; bool killed; - struct { +#define GF100_FIFO_ENGN_GR 0 +#define GF100_FIFO_ENGN_MSPDEC 1 +#define GF100_FIFO_ENGN_MSPPP 2 +#define GF100_FIFO_ENGN_MSVLD 3 +#define GF100_FIFO_ENGN_CE0 4 +#define GF100_FIFO_ENGN_CE1 5 +#define GF100_FIFO_ENGN_SW 15 + struct gf100_fifo_engn { struct nvkm_gpuobj *inst; struct nvkm_vma *vma; - } engn[NVKM_SUBDEV_NR]; + } engn[NVKM_FIFO_ENGN_NR]; }; extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h index 22698661aa85..cfbe096e604f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h @@ -16,10 +16,11 @@ struct gk104_fifo_chan { struct nvkm_memory *mthd; - struct { +#define GK104_FIFO_ENGN_SW 15 + struct gk104_fifo_engn { struct nvkm_gpuobj *inst; struct nvkm_vma *vma; - } engn[NVKM_SUBDEV_NR]; + } engn[NVKM_FIFO_ENGN_NR]; }; extern const struct nvkm_fifo_chan_func gk104_fifo_gpfifo_func; @@ -29,6 +30,7 @@ int gk104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *, void *gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *); void gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *); void gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *); +struct gk104_fifo_engn *gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *, struct nvkm_engine *); int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *, struct nvkm_object *); void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h index 60ca79465aff..727bc8976b40 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h @@ -9,7 +9,11 @@ struct nv04_fifo_chan { struct nvkm_fifo_chan base; struct nv04_fifo *fifo; u32 ramfc; - struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR]; +#define NV04_FIFO_ENGN_SW 0 +#define NV04_FIFO_ENGN_GR 1 +#define NV04_FIFO_ENGN_MPEG 2 +#define NV04_FIFO_ENGN_DMA 3 + struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR]; }; extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c index 85f7dbf53c99..c44d7c81dd52 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c @@ -31,7 +31,7 @@ static int nv50_fifo_chan_engine_addr(struct nvkm_engine *engine) { - switch (engine->subdev.index) { + switch (engine->subdev.type) { case NVKM_ENGINE_DMAOBJ: case NVKM_ENGINE_SW : return -1; case NVKM_ENGINE_GR : return 0x0000; @@ -42,6 +42,15 @@ nv50_fifo_chan_engine_addr(struct nvkm_engine *engine) } } +struct nvkm_gpuobj ** +nv50_fifo_chan_engine(struct nv50_fifo_chan *chan, struct nvkm_engine *engine) +{ + int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine); + if (engi >= 0) + return &chan->engn[engi]; + return NULL; +} + static int nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, struct nvkm_engine *engine, bool suspend) @@ -103,7 +112,7 @@ nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct nv50_fifo_chan *chan = nv50_fifo_chan(base); - struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index]; + struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine); u64 limit, start; int offset; @@ -130,7 +139,7 @@ nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct nv50_fifo_chan *chan = nv50_fifo_chan(base); - nvkm_gpuobj_del(&chan->engn[engine->subdev.index]); + nvkm_gpuobj_del(nv50_fifo_chan_engine(chan, engine)); } static int @@ -139,12 +148,11 @@ nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base, struct nvkm_object *object) { struct nv50_fifo_chan *chan = nv50_fifo_chan(base); - int engn = engine->subdev.index; if (nv50_fifo_chan_engine_addr(engine) < 0) return 0; - return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); + return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine)); } void @@ -162,7 +170,7 @@ nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base, u32 handle = object->handle; u32 context; - switch (object->engine->subdev.index) { + switch (object->engine->subdev.type) { case NVKM_ENGINE_DMAOBJ: case NVKM_ENGINE_SW : context = 0x00000000; break; case NVKM_ENGINE_GR : context = 0x00100000; break; @@ -240,10 +248,10 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push, ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base, 0x10000, 0x1000, false, vmm, push, - (1ULL << NVKM_ENGINE_DMAOBJ) | - (1ULL << NVKM_ENGINE_SW) | - (1ULL << NVKM_ENGINE_GR) | - (1ULL << NVKM_ENGINE_MPEG), + BIT(NV50_FIFO_ENGN_SW) | + BIT(NV50_FIFO_ENGN_GR) | + BIT(NV50_FIFO_ENGN_MPEG) | + BIT(NV50_FIFO_ENGN_DMA), 0, 0xc00000, 0x2000, oclass, &chan->base); chan->fifo = fifo; if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h index 5735ff72a9d1..af8bdf275552 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h @@ -15,13 +15,33 @@ struct nv50_fifo_chan { struct nvkm_gpuobj *pgd; struct nvkm_ramht *ramht; - struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR]; +#define NV50_FIFO_ENGN_SW 0 +#define NV50_FIFO_ENGN_GR 1 +#define NV50_FIFO_ENGN_MPEG 2 +#define NV50_FIFO_ENGN_DMA 3 + +#define G84_FIFO_ENGN_SW 0 +#define G84_FIFO_ENGN_GR 1 +#define G84_FIFO_ENGN_MPEG 2 +#define G84_FIFO_ENGN_MSPPP 2 +#define G84_FIFO_ENGN_ME 3 +#define G84_FIFO_ENGN_CE0 3 +#define G84_FIFO_ENGN_VP 4 +#define G84_FIFO_ENGN_MSPDEC 4 +#define G84_FIFO_ENGN_CIPHER 5 +#define G84_FIFO_ENGN_SEC 5 +#define G84_FIFO_ENGN_VIC 5 +#define G84_FIFO_ENGN_BSP 6 +#define G84_FIFO_ENGN_MSVLD 6 +#define G84_FIFO_ENGN_DMA 7 + struct nvkm_gpuobj *engn[NVKM_FIFO_ENGN_NR]; }; int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push, const struct nvkm_oclass *, struct nv50_fifo_chan *); void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *); void nv50_fifo_chan_fini(struct nvkm_fifo_chan *); +struct nvkm_gpuobj **nv50_fifo_chan_engine(struct nv50_fifo_chan *, struct nvkm_engine *); void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *); void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c index c213122cf088..dbcdc5fab990 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c @@ -38,9 +38,9 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie) struct nv04_fifo_chan *chan = nv04_fifo_chan(base); struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; - mutex_lock(&chan->fifo->base.engine.subdev.mutex); + mutex_lock(&chan->fifo->base.mutex); nvkm_ramht_remove(imem->ramht, cookie); - mutex_unlock(&chan->fifo->base.engine.subdev.mutex); + mutex_unlock(&chan->fifo->base.mutex); } static int @@ -53,7 +53,7 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, u32 handle = object->handle; int hash; - switch (object->engine->subdev.index) { + switch (object->engine->subdev.type) { case NVKM_ENGINE_DMAOBJ: case NVKM_ENGINE_SW : context |= 0x00000000; break; case NVKM_ENGINE_GR : context |= 0x00010000; break; @@ -63,10 +63,10 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, return -EINVAL; } - mutex_lock(&chan->fifo->base.engine.subdev.mutex); + mutex_lock(&chan->fifo->base.mutex); hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4, handle, context); - mutex_unlock(&chan->fifo->base.engine.subdev.mutex); + mutex_unlock(&chan->fifo->base.mutex); return hash; } @@ -191,9 +191,9 @@ nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base, 0x1000, 0x1000, false, 0, args->v0.pushbuf, - (1ULL << NVKM_ENGINE_DMAOBJ) | - (1ULL << NVKM_ENGINE_GR) | - (1ULL << NVKM_ENGINE_SW), + BIT(NV04_FIFO_ENGN_SW) | + BIT(NV04_FIFO_ENGN_GR) | + BIT(NV04_FIFO_ENGN_DMA), 0, 0x800000, 0x10000, oclass, &chan->base); chan->fifo = fifo; if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c index f5f355ff005d..07d80d54a07c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c @@ -62,9 +62,9 @@ nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base, 0x1000, 0x1000, false, 0, args->v0.pushbuf, - (1ULL << NVKM_ENGINE_DMAOBJ) | - (1ULL << NVKM_ENGINE_GR) | - (1ULL << NVKM_ENGINE_SW), + BIT(NV04_FIFO_ENGN_SW) | + BIT(NV04_FIFO_ENGN_GR) | + BIT(NV04_FIFO_ENGN_DMA), 0, 0x800000, 0x10000, oclass, &chan->base); chan->fifo = fifo; if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c index 7edc6a564b5d..edd70a114218 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c @@ -62,10 +62,10 @@ nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base, 0x1000, 0x1000, false, 0, args->v0.pushbuf, - (1ULL << NVKM_ENGINE_DMAOBJ) | - (1ULL << NVKM_ENGINE_GR) | - (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */ - (1ULL << NVKM_ENGINE_SW), + BIT(NV04_FIFO_ENGN_SW) | + BIT(NV04_FIFO_ENGN_GR) | + BIT(NV04_FIFO_ENGN_MPEG) | /* NV31- */ + BIT(NV04_FIFO_ENGN_DMA), 0, 0x800000, 0x10000, oclass, &chan->base); chan->fifo = fifo; if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c index 5f722c6e8a2f..0411fb908457 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c @@ -35,7 +35,7 @@ static bool nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx) { - switch (engine->subdev.index) { + switch (engine->subdev.type) { case NVKM_ENGINE_DMAOBJ: case NVKM_ENGINE_SW: return false; @@ -55,6 +55,15 @@ nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx) } } +static struct nvkm_gpuobj ** +nv40_fifo_dma_engn(struct nv04_fifo_chan *chan, struct nvkm_engine *engine) +{ + int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine); + if (engi >= 0) + return &chan->engn[engi]; + return NULL; +} + static int nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base, struct nvkm_engine *engine, bool suspend) @@ -99,7 +108,7 @@ nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base, if (!nv40_fifo_dma_engine(engine, ®, &ctx)) return 0; - inst = chan->engn[engine->subdev.index]->addr >> 4; + inst = (*nv40_fifo_dma_engn(chan, engine))->addr >> 4; spin_lock_irqsave(&fifo->base.lock, flags); nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); @@ -121,7 +130,7 @@ nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct nv04_fifo_chan *chan = nv04_fifo_chan(base); - nvkm_gpuobj_del(&chan->engn[engine->subdev.index]); + nvkm_gpuobj_del(nv40_fifo_dma_engn(chan, engine)); } static int @@ -130,13 +139,12 @@ nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base, struct nvkm_object *object) { struct nv04_fifo_chan *chan = nv04_fifo_chan(base); - const int engn = engine->subdev.index; u32 reg, ctx; if (!nv40_fifo_dma_engine(engine, ®, &ctx)) return 0; - return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]); + return nvkm_object_bind(object, NULL, 0, nv40_fifo_dma_engn(chan, engine)); } static int @@ -149,7 +157,7 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, u32 handle = object->handle; int hash; - switch (object->engine->subdev.index) { + switch (object->engine->subdev.type) { case NVKM_ENGINE_DMAOBJ: case NVKM_ENGINE_SW : context |= 0x00000000; break; case NVKM_ENGINE_GR : context |= 0x00100000; break; @@ -159,10 +167,10 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, return -EINVAL; } - mutex_lock(&chan->fifo->base.engine.subdev.mutex); + mutex_lock(&chan->fifo->base.mutex); hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4, handle, context); - mutex_unlock(&chan->fifo->base.engine.subdev.mutex); + mutex_unlock(&chan->fifo->base.mutex); return hash; } @@ -209,10 +217,10 @@ nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base, 0x1000, 0x1000, false, 0, args->v0.pushbuf, - (1ULL << NVKM_ENGINE_DMAOBJ) | - (1ULL << NVKM_ENGINE_GR) | - (1ULL << NVKM_ENGINE_MPEG) | - (1ULL << NVKM_ENGINE_SW), + BIT(NV04_FIFO_ENGN_SW) | + BIT(NV04_FIFO_ENGN_GR) | + BIT(NV04_FIFO_ENGN_MPEG) | + BIT(NV04_FIFO_ENGN_DMA), 0, 0xc00000, 0x1000, oclass, &chan->base); chan->fifo = fifo; if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c index ff7b529764fe..c0a7d0f21dac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c @@ -38,12 +38,82 @@ g84_fifo_uevent_init(struct nvkm_fifo *fifo) nvkm_mask(device, 0x002140, 0x40000000, 0x40000000); } +static struct nvkm_engine * +g84_fifo_id_engine(struct nvkm_fifo *fifo, int engi) +{ + struct nvkm_device *device = fifo->engine.subdev.device; + struct nvkm_engine *engine; + enum nvkm_subdev_type type; + + switch (engi) { + case G84_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break; + case G84_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break; + case G84_FIFO_ENGN_MPEG : + if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPPP, 0))) + return engine; + type = NVKM_ENGINE_MPEG; + break; + case G84_FIFO_ENGN_ME : + if ((engine = nvkm_device_engine(device, NVKM_ENGINE_CE, 0))) + return engine; + type = NVKM_ENGINE_ME; + break; + case G84_FIFO_ENGN_VP : + if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSPDEC, 0))) + return engine; + type = NVKM_ENGINE_VP; + break; + case G84_FIFO_ENGN_CIPHER: + if ((engine = nvkm_device_engine(device, NVKM_ENGINE_VIC, 0))) + return engine; + if ((engine = nvkm_device_engine(device, NVKM_ENGINE_SEC, 0))) + return engine; + type = NVKM_ENGINE_CIPHER; + break; + case G84_FIFO_ENGN_BSP : + if ((engine = nvkm_device_engine(device, NVKM_ENGINE_MSVLD, 0))) + return engine; + type = NVKM_ENGINE_BSP; + break; + case G84_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break; + default: + WARN_ON(1); + return NULL; + } + + return nvkm_device_engine(fifo->engine.subdev.device, type, 0); +} + +static int +g84_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) +{ + switch (engine->subdev.type) { + case NVKM_ENGINE_SW : return G84_FIFO_ENGN_SW; + case NVKM_ENGINE_GR : return G84_FIFO_ENGN_GR; + case NVKM_ENGINE_MPEG : + case NVKM_ENGINE_MSPPP : return G84_FIFO_ENGN_MPEG; + case NVKM_ENGINE_CE : return G84_FIFO_ENGN_CE0; + case NVKM_ENGINE_VP : + case NVKM_ENGINE_MSPDEC: return G84_FIFO_ENGN_VP; + case NVKM_ENGINE_CIPHER: + case NVKM_ENGINE_SEC : return G84_FIFO_ENGN_CIPHER; + case NVKM_ENGINE_BSP : + case NVKM_ENGINE_MSVLD : return G84_FIFO_ENGN_BSP; + case NVKM_ENGINE_DMAOBJ: return G84_FIFO_ENGN_DMA; + default: + WARN_ON(1); + return -1; + } +} + static const struct nvkm_fifo_func g84_fifo = { .dtor = nv50_fifo_dtor, .oneinit = nv50_fifo_oneinit, .init = nv50_fifo_init, .intr = nv04_fifo_intr, + .engine_id = g84_fifo_engine_id, + .id_engine = g84_fifo_id_engine, .pause = nv04_fifo_pause, .start = nv04_fifo_start, .uevent_init = g84_fifo_uevent_init, @@ -56,7 +126,8 @@ g84_fifo = { }; int -g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +g84_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return nv50_fifo_new_(&g84_fifo, device, index, pfifo); + return nv50_fifo_new_(&g84_fifo, device, type, inst, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c index 5a39e51d42d7..8b4f36b3e34b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c @@ -57,7 +57,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo) int nr = 0; int target; - mutex_lock(&subdev->mutex); + mutex_lock(&fifo->base.mutex); cur = fifo->runlist.mem[fifo->runlist.active]; fifo->runlist.active = !fifo->runlist.active; @@ -73,7 +73,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo) case NVKM_MEM_TARGET_VRAM: target = 0; break; case NVKM_MEM_TARGET_NCOH: target = 3; break; default: - mutex_unlock(&subdev->mutex); + mutex_unlock(&fifo->base.mutex); WARN_ON(1); return; } @@ -86,59 +86,61 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo) !(nvkm_rd32(device, 0x00227c) & 0x00100000), msecs_to_jiffies(2000)) == 0) nvkm_error(subdev, "runlist update timeout\n"); - mutex_unlock(&subdev->mutex); + mutex_unlock(&fifo->base.mutex); } void gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan) { - mutex_lock(&fifo->base.engine.subdev.mutex); + mutex_lock(&fifo->base.mutex); list_del_init(&chan->head); - mutex_unlock(&fifo->base.engine.subdev.mutex); + mutex_unlock(&fifo->base.mutex); } void gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan) { - mutex_lock(&fifo->base.engine.subdev.mutex); + mutex_lock(&fifo->base.mutex); list_add_tail(&chan->head, &fifo->chan); - mutex_unlock(&fifo->base.engine.subdev.mutex); + mutex_unlock(&fifo->base.mutex); } -static inline int -gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn) +static struct nvkm_engine * +gf100_fifo_id_engine(struct nvkm_fifo *fifo, int engi) { - switch (engn) { - case NVKM_ENGINE_GR : engn = 0; break; - case NVKM_ENGINE_MSVLD : engn = 1; break; - case NVKM_ENGINE_MSPPP : engn = 2; break; - case NVKM_ENGINE_MSPDEC: engn = 3; break; - case NVKM_ENGINE_CE0 : engn = 4; break; - case NVKM_ENGINE_CE1 : engn = 5; break; + enum nvkm_subdev_type type; + int inst; + + switch (engi) { + case GF100_FIFO_ENGN_GR : type = NVKM_ENGINE_GR ; inst = 0; break; + case GF100_FIFO_ENGN_MSPDEC: type = NVKM_ENGINE_MSPDEC; inst = 0; break; + case GF100_FIFO_ENGN_MSPPP : type = NVKM_ENGINE_MSPPP ; inst = 0; break; + case GF100_FIFO_ENGN_MSVLD : type = NVKM_ENGINE_MSVLD ; inst = 0; break; + case GF100_FIFO_ENGN_CE0 : type = NVKM_ENGINE_CE ; inst = 0; break; + case GF100_FIFO_ENGN_CE1 : type = NVKM_ENGINE_CE ; inst = 1; break; + case GF100_FIFO_ENGN_SW : type = NVKM_ENGINE_SW ; inst = 0; break; default: - return -1; + WARN_ON(1); + return NULL; } - return engn; + return nvkm_device_engine(fifo->engine.subdev.device, type, inst); } -static inline struct nvkm_engine * -gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn) +static int +gf100_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) { - struct nvkm_device *device = fifo->base.engine.subdev.device; - - switch (engn) { - case 0: engn = NVKM_ENGINE_GR; break; - case 1: engn = NVKM_ENGINE_MSVLD; break; - case 2: engn = NVKM_ENGINE_MSPPP; break; - case 3: engn = NVKM_ENGINE_MSPDEC; break; - case 4: engn = NVKM_ENGINE_CE0; break; - case 5: engn = NVKM_ENGINE_CE1; break; + switch (engine->subdev.type) { + case NVKM_ENGINE_GR : return GF100_FIFO_ENGN_GR; + case NVKM_ENGINE_MSPDEC: return GF100_FIFO_ENGN_MSPDEC; + case NVKM_ENGINE_MSPPP : return GF100_FIFO_ENGN_MSPPP; + case NVKM_ENGINE_MSVLD : return GF100_FIFO_ENGN_MSVLD; + case NVKM_ENGINE_CE : return GF100_FIFO_ENGN_CE0 + engine->subdev.inst; + case NVKM_ENGINE_SW : return GF100_FIFO_ENGN_SW; default: - return NULL; + WARN_ON(1); + return -1; } - - return nvkm_device_engine(device, engn); } static void @@ -148,20 +150,17 @@ gf100_fifo_recover_work(struct work_struct *w) struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_engine *engine; unsigned long flags; - u32 engn, engm = 0; - u64 mask, todo; + u32 engm, engn, todo; spin_lock_irqsave(&fifo->base.lock, flags); - mask = fifo->recover.mask; + engm = fifo->recover.mask; fifo->recover.mask = 0ULL; spin_unlock_irqrestore(&fifo->base.lock, flags); - for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) - engm |= 1 << gf100_fifo_engidx(fifo, engn); nvkm_mask(device, 0x002630, engm, engm); - for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) { - if ((engine = nvkm_device_engine(device, engn))) { + for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT_ULL(engn)) { + if ((engine = gf100_fifo_id_engine(&fifo->base, engn))) { nvkm_subdev_fini(&engine->subdev, false); WARN_ON(nvkm_subdev_init(&engine->subdev)); } @@ -179,17 +178,18 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine, struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_device *device = subdev->device; u32 chid = chan->base.chid; + int engi = gf100_fifo_engine_id(&fifo->base, engine); nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n", - nvkm_subdev_name[engine->subdev.index], chid); + engine->subdev.name, chid); assert_spin_locked(&fifo->base.lock); nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); list_del_init(&chan->head); chan->killed = true; - if (engine != &fifo->base.engine) - fifo->recover.mask |= 1ULL << engine->subdev.index; + if (engi >= 0 && engi != GF100_FIFO_ENGN_SW) + fifo->recover.mask |= BIT(engi); schedule_work(&fifo->recover.work); nvkm_fifo_kevent(&fifo->base, chid); } @@ -205,8 +205,8 @@ gf100_fifo_fault_engine[] = { { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP }, { 0x13, "PCOUNTER" }, { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC }, - { 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 }, - { 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 }, + { 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 }, + { 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 }, { 0x17, "PMU" }, {} }; @@ -286,7 +286,7 @@ gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); break; default: - engine = nvkm_device_engine(device, eu->data2); + engine = nvkm_device_engine(device, eu->data2, eu->inst); break; } } @@ -335,7 +335,7 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo) if (busy && unk0 && unk1) { list_for_each_entry(chan, &fifo->chan, head) { if (chan->base.chid == chid) { - engine = gf100_fifo_engine(fifo, engn); + engine = gf100_fifo_id_engine(&fifo->base, engn); if (!engine) break; gf100_fifo_recover(fifo, engine, chan); @@ -673,6 +673,8 @@ gf100_fifo = { .fini = gf100_fifo_fini, .intr = gf100_fifo_intr, .fault = gf100_fifo_fault, + .engine_id = gf100_fifo_engine_id, + .id_engine = gf100_fifo_id_engine, .uevent_init = gf100_fifo_uevent_init, .uevent_fini = gf100_fifo_uevent_fini, .chan = { @@ -682,7 +684,8 @@ gf100_fifo = { }; int -gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { struct gf100_fifo *fifo; @@ -692,5 +695,5 @@ gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work); *pfifo = &fifo->base; - return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base); + return nvkm_fifo_ctor(&gf100_fifo, device, type, inst, 128, &fifo->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index c73b7eab776e..e771bd519ee2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -168,12 +168,11 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) { const struct gk104_fifo_runlist_func *func = fifo->func->runlist; struct gk104_fifo_chan *chan; - struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_memory *mem; struct nvkm_fifo_cgrp *cgrp; int nr = 0; - mutex_lock(&subdev->mutex); + mutex_lock(&fifo->base.mutex); mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; fifo->runlist[runl].next = !fifo->runlist[runl].next; @@ -191,27 +190,27 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) nvkm_done(mem); func->commit(fifo, runl, mem, nr); - mutex_unlock(&subdev->mutex); + mutex_unlock(&fifo->base.mutex); } void gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) { struct nvkm_fifo_cgrp *cgrp = chan->cgrp; - mutex_lock(&fifo->base.engine.subdev.mutex); + mutex_lock(&fifo->base.mutex); if (!list_empty(&chan->head)) { list_del_init(&chan->head); if (cgrp && !--cgrp->chan_nr) list_del_init(&cgrp->head); } - mutex_unlock(&fifo->base.engine.subdev.mutex); + mutex_unlock(&fifo->base.mutex); } void gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) { struct nvkm_fifo_cgrp *cgrp = chan->cgrp; - mutex_lock(&fifo->base.engine.subdev.mutex); + mutex_lock(&fifo->base.mutex); if (cgrp) { if (!cgrp->chan_nr++) list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); @@ -219,7 +218,7 @@ gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) } else { list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); } - mutex_unlock(&fifo->base.engine.subdev.mutex); + mutex_unlock(&fifo->base.mutex); } void @@ -259,6 +258,33 @@ gk104_fifo_pbdma = { .init = gk104_fifo_pbdma_init, }; +struct nvkm_engine * +gk104_fifo_id_engine(struct nvkm_fifo *base, int engi) +{ + if (engi == GK104_FIFO_ENGN_SW) + return nvkm_device_engine(base->engine.subdev.device, NVKM_ENGINE_SW, 0); + + return gk104_fifo(base)->engine[engi].engine; +} + +int +gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) +{ + struct gk104_fifo *fifo = gk104_fifo(base); + int engn; + + if (engine->subdev.type == NVKM_ENGINE_SW) + return GK104_FIFO_ENGN_SW; + + for (engn = 0; engn < fifo->engine_nr && engine; engn++) { + if (fifo->engine[engn].engine == engine) + return engn; + } + + WARN_ON(1); + return -1; +} + static void gk104_fifo_recover_work(struct work_struct *w) { @@ -410,11 +436,12 @@ gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn) * called from the fault handler already. */ if (!status.faulted && engine) { - mmui = nvkm_top_fault_id(device, engine->subdev.index); + mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst); if (mmui < 0) { const struct nvkm_enum *en = fifo->func->fault.engine; for (; en && en->name; en++) { - if (en->data2 == engine->subdev.index) { + if (en->data2 == engine->subdev.type && + en->inst == engine->subdev.inst) { mmui = en->value; break; } @@ -459,8 +486,8 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) struct nvkm_engine *engine = NULL; struct nvkm_fifo_chan *chan; unsigned long flags; - char ct[8] = "HUB/", en[16] = ""; - int engn; + const char *en = ""; + char ct[8] = "HUB/"; er = nvkm_enum_find(fifo->func->fault.reason, info->reason); ee = nvkm_enum_find(fifo->func->fault.engine, info->engine); @@ -484,23 +511,20 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); break; default: - engine = nvkm_device_engine(device, ee->data2); + engine = nvkm_device_engine(device, ee->data2, 0); break; } } if (ee == NULL) { - enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine); - if (engidx < NVKM_SUBDEV_NR) { - const char *src = nvkm_subdev_name[engidx]; - char *dst = en; - do { - *dst++ = toupper(*src++); - } while(*src); - engine = nvkm_device_engine(device, engidx); + struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine); + if (subdev) { + if (subdev->func == &nvkm_engine) + engine = container_of(subdev, typeof(*engine), subdev); + en = engine->subdev.name; } } else { - snprintf(en, sizeof(en), "%s", ee->name); + en = ee->name; } spin_lock_irqsave(&fifo->base.lock, flags); @@ -523,11 +547,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) * correct engine(s), but just in case we can't find the channel * information... */ - for (engn = 0; engn < fifo->engine_nr && engine; engn++) { - if (fifo->engine[engn].engine == engine) { + if (engine) { + int engn = fifo->base.func->engine_id(&fifo->base, engine); + if (engn >= 0 && engn != GK104_FIFO_ENGN_SW) gk104_fifo_recover_engn(fifo, engn); - break; - } } spin_unlock_irqrestore(&fifo->base.lock, flags); @@ -864,19 +887,41 @@ gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data) { struct gk104_fifo *fifo = gk104_fifo(base); switch (mthd) { - case NV_DEVICE_FIFO_RUNLISTS: + case NV_DEVICE_HOST_RUNLISTS: *data = (1ULL << fifo->runlist_nr) - 1; return 0; - case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)... - NV_DEVICE_FIFO_RUNLIST_ENGINES(63): { - int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn; - if (runl < fifo->runlist_nr) { - unsigned long engm = fifo->runlist[runl].engm; + case NV_DEVICE_HOST_RUNLIST_ENGINES: { + if (*data < fifo->runlist_nr) { + unsigned long engm = fifo->runlist[*data].engm; struct nvkm_engine *engine; + int engn; *data = 0; for_each_set_bit(engn, &engm, fifo->engine_nr) { - if ((engine = fifo->engine[engn].engine)) - *data |= BIT_ULL(engine->subdev.index); + if ((engine = fifo->engine[engn].engine)) { +#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break + switch (engine->subdev.type) { + CASE(SW ); + CASE(GR ); + CASE(MPEG ); + CASE(ME ); + CASE(CIPHER); + CASE(BSP ); + CASE(VP ); + CASE(CE ); + CASE(SEC ); + CASE(MSVLD ); + CASE(MSPDEC); + CASE(MSPPP ); + CASE(MSENC ); + CASE(VIC ); + CASE(SEC2 ); + CASE(NVDEC ); + CASE(NVENC ); + default: + WARN_ON(1); + break; + } + } } return 0; } @@ -894,8 +939,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_device *device = subdev->device; struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device); - int engn, runl, pbid, ret, i, j; - enum nvkm_devidx engidx; + struct nvkm_top_device *tdev; + int pbid, ret, i, j; u32 *map; fifo->pbdma_nr = fifo->func->pbdma->nr(fifo); @@ -909,25 +954,41 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04)); /* Determine runlist configuration from topology device info. */ - i = 0; - while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { + list_for_each_entry(tdev, &device->top->device, head) { + const int engn = tdev->engine; + char _en[16], *en; + + if (engn < 0) + continue; + /* Determine which PBDMA handles requests for this engine. */ for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { - if (map[j] & (1 << runl)) { + if (map[j] & BIT(tdev->runlist)) { pbid = j; break; } } + fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst); + if (!fifo->engine[engn].engine) { + snprintf(_en, sizeof(_en), "%s, %d", + nvkm_subdev_type[tdev->type], tdev->inst); + en = _en; + } else { + en = fifo->engine[engn].engine->subdev.name; + } + nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", - engn, runl, pbid, nvkm_subdev_name[engidx]); + tdev->engine, tdev->runlist, pbid, en); - fifo->engine[engn].engine = nvkm_device_engine(device, engidx); - fifo->engine[engn].runl = runl; + fifo->engine[engn].runl = tdev->runlist; fifo->engine[engn].pbid = pbid; fifo->engine_nr = max(fifo->engine_nr, engn + 1); - fifo->runlist[runl].engm |= 1 << engn; - fifo->runlist_nr = max(fifo->runlist_nr, runl + 1); + fifo->runlist[tdev->runlist].engm |= BIT(engn); + fifo->runlist[tdev->runlist].engm_sw |= BIT(engn); + if (tdev->type == NVKM_ENGINE_GR) + fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW); + fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1); } kfree(map); @@ -1021,6 +1082,8 @@ gk104_fifo_ = { .fini = gk104_fifo_fini, .intr = gk104_fifo_intr, .fault = gk104_fifo_fault, + .engine_id = gk104_fifo_engine_id, + .id_engine = gk104_fifo_id_engine, .uevent_init = gk104_fifo_uevent_init, .uevent_fini = gk104_fifo_uevent_fini, .recover_chan = gk104_fifo_recover_chan, @@ -1030,7 +1093,7 @@ gk104_fifo_ = { int gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, - int index, int nr, struct nvkm_fifo **pfifo) + enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo) { struct gk104_fifo *fifo; @@ -1040,7 +1103,7 @@ gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device, INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work); *pfifo = &fifo->base; - return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base); + return nvkm_fifo_ctor(&gk104_fifo_, device, type, inst, nr, &fifo->base); } const struct nvkm_enum @@ -1072,12 +1135,12 @@ gk104_fifo_fault_engine[] = { { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP }, { 0x13, "PERF" }, { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC }, - { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 }, - { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 }, + { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 }, + { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 }, { 0x17, "PMU" }, { 0x18, "PTP" }, { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC }, - { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 }, + { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 }, {} }; @@ -1179,7 +1242,8 @@ gk104_fifo = { }; int -gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo); + return gk104_fifo_new_(&gk104_fifo, device, type, inst, 4096, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h index 4398b340e514..f2d12ae73944 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h @@ -35,6 +35,7 @@ struct gk104_fifo { struct list_head cgrp; struct list_head chan; u32 engm; + u32 engm_sw; } runlist[16]; int runlist_nr; @@ -99,7 +100,7 @@ struct gk104_fifo_engine_status { } prev, next, *chan; }; -int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, +int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int index, int nr, struct nvkm_fifo **); void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *); void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c index f820969e4405..915278c7e012 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c @@ -60,7 +60,8 @@ gk110_fifo = { }; int -gk110_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gk110_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gk110_fifo, device, index, 4096, pfifo); + return gk104_fifo_new_(&gk110_fifo, device, type, inst, 4096, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c index 2f54787b5fd0..cb703693de52 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c @@ -57,7 +57,8 @@ gk208_fifo = { }; int -gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gk208_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo); + return gk104_fifo_new_(&gk208_fifo, device, type, inst, 1024, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c index a814c4e0ed3e..6e35cf44c640 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c @@ -38,7 +38,8 @@ gk20a_fifo = { }; int -gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gk20a_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo); + return gk104_fifo_new_(&gk20a_fifo, device, type, inst, 128, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c index c2a2e4572f6c..7af6e687d474 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c @@ -106,7 +106,8 @@ gm107_fifo = { }; int -gm107_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gm107_fifo, device, index, 2048, pfifo); + return gk104_fifo_new_(&gm107_fifo, device, type, inst, 2048, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c index b8cfe3b28c4f..573658cb6c73 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c @@ -54,7 +54,8 @@ gm200_fifo = { }; int -gm200_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gm200_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gm200_fifo, device, index, 4096, pfifo); + return gk104_fifo_new_(&gm200_fifo, device, type, inst, 4096, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c index 70b4feebc1fa..556c97e54f14 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c @@ -38,7 +38,8 @@ gm20b_fifo = { }; int -gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gm20b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo); + return gk104_fifo_new_(&gm20b_fifo, device, type, inst, 512, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c index 2c7a0176b3c8..6b46b6b65b87 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c @@ -91,7 +91,8 @@ gp100_fifo = { }; int -gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gp100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo); + return gk104_fifo_new_(&gp100_fifo, device, type, inst, 4096, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c index 8c65ad4feedb..7a5929cb4d29 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c @@ -39,7 +39,8 @@ gp10b_fifo = { }; int -gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gp10b_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo); + return gk104_fifo_new_(&gp10b_fifo, device, type, inst, 512, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c index 75f9632789b3..4e78bbe3b94b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c @@ -52,11 +52,10 @@ gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type, static u32 gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) { - switch (engine->subdev.index) { + switch (engine->subdev.type) { case NVKM_ENGINE_SW : return 0; case NVKM_ENGINE_GR : return 0x0210; - case NVKM_ENGINE_CE0 : return 0x0230; - case NVKM_ENGINE_CE1 : return 0x0240; + case NVKM_ENGINE_CE : return 0x0230 + (engine->subdev.inst * 0x10); case NVKM_ENGINE_MSPDEC: return 0x0250; case NVKM_ENGINE_MSPPP : return 0x0260; case NVKM_ENGINE_MSVLD : return 0x0270; @@ -66,6 +65,15 @@ gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) } } +static struct gf100_fifo_engn * +gf100_fifo_gpfifo_engine(struct gf100_fifo_chan *chan, struct nvkm_engine *engine) +{ + int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine); + if (engi >= 0) + return &chan->engn[engi]; + return NULL; +} + static int gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, struct nvkm_engine *engine, bool suspend) @@ -77,7 +85,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, struct nvkm_gpuobj *inst = chan->base.inst; int ret = 0; - mutex_lock(&subdev->mutex); + mutex_lock(&chan->fifo->base.mutex); nvkm_wr32(device, 0x002634, chan->base.chid); if (nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x002634) == chan->base.chid) @@ -87,7 +95,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, chan->base.chid, chan->base.object.client->name); ret = -ETIMEDOUT; } - mutex_unlock(&subdev->mutex); + mutex_unlock(&chan->fifo->base.mutex); if (ret && suspend) return ret; @@ -108,13 +116,13 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base, { const u32 offset = gf100_fifo_gpfifo_engine_addr(engine); struct gf100_fifo_chan *chan = gf100_fifo_chan(base); + struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine); struct nvkm_gpuobj *inst = chan->base.inst; if (offset) { - u64 addr = chan->engn[engine->subdev.index].vma->addr; nvkm_kmap(inst); - nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4); - nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr)); + nvkm_wo32(inst, offset + 0x00, lower_32_bits(engn->vma->addr) | 4); + nvkm_wo32(inst, offset + 0x04, upper_32_bits(engn->vma->addr)); nvkm_done(inst); } @@ -126,8 +134,9 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct gf100_fifo_chan *chan = gf100_fifo_chan(base); - nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma); - nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); + struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine); + nvkm_vmm_put(chan->base.vmm, &engn->vma); + nvkm_gpuobj_del(&engn->inst); } static int @@ -136,23 +145,21 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, struct nvkm_object *object) { struct gf100_fifo_chan *chan = gf100_fifo_chan(base); - int engn = engine->subdev.index; + struct gf100_fifo_engn *engn = gf100_fifo_gpfifo_engine(chan, engine); int ret; if (!gf100_fifo_gpfifo_engine_addr(engine)) return 0; - ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst); + ret = nvkm_object_bind(object, NULL, 0, &engn->inst); if (ret) return ret; - ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size, - &chan->engn[engn].vma); + ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma); if (ret) return ret; - return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm, - chan->engn[engn].vma, NULL, 0); + return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0); } static void @@ -243,13 +250,13 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base, 0x1000, 0x1000, true, args->v0.vmm, 0, - (1ULL << NVKM_ENGINE_CE0) | - (1ULL << NVKM_ENGINE_CE1) | - (1ULL << NVKM_ENGINE_GR) | - (1ULL << NVKM_ENGINE_MSPDEC) | - (1ULL << NVKM_ENGINE_MSPPP) | - (1ULL << NVKM_ENGINE_MSVLD) | - (1ULL << NVKM_ENGINE_SW), + BIT(GF100_FIFO_ENGN_GR) | + BIT(GF100_FIFO_ENGN_MSPDEC) | + BIT(GF100_FIFO_ENGN_MSPPP) | + BIT(GF100_FIFO_ENGN_MSVLD) | + BIT(GF100_FIFO_ENGN_CE0) | + BIT(GF100_FIFO_ENGN_CE1) | + BIT(GF100_FIFO_ENGN_SW), 1, fifo->user.bar->addr, 0x1000, oclass, &chan->base); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c index 728a1edbf98c..b6900a52bcce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c @@ -65,19 +65,18 @@ int gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan) { int ret; - mutex_lock(&chan->base.fifo->engine.subdev.mutex); + mutex_lock(&chan->base.fifo->mutex); ret = gk104_fifo_gpfifo_kick_locked(chan); - mutex_unlock(&chan->base.fifo->engine.subdev.mutex); + mutex_unlock(&chan->base.fifo->mutex); return ret; } static u32 gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) { - switch (engine->subdev.index) { + switch (engine->subdev.type) { case NVKM_ENGINE_SW : - case NVKM_ENGINE_CE0...NVKM_ENGINE_CE_LAST: - return 0; + case NVKM_ENGINE_CE : return 0; case NVKM_ENGINE_GR : return 0x0210; case NVKM_ENGINE_SEC : return 0x0220; case NVKM_ENGINE_MSPDEC: return 0x0250; @@ -85,15 +84,26 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine) case NVKM_ENGINE_MSVLD : return 0x0270; case NVKM_ENGINE_VIC : return 0x0280; case NVKM_ENGINE_MSENC : return 0x0290; - case NVKM_ENGINE_NVDEC0: return 0x02100270; - case NVKM_ENGINE_NVENC0: return 0x02100290; - case NVKM_ENGINE_NVENC1: return 0x0210; + case NVKM_ENGINE_NVDEC : return 0x02100270; + case NVKM_ENGINE_NVENC : + if (engine->subdev.inst) + return 0x0210; + return 0x02100290; default: WARN_ON(1); return 0; } } +struct gk104_fifo_engn * +gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *chan, struct nvkm_engine *engine) +{ + int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine); + if (engi >= 0) + return &chan->engn[engi]; + return NULL; +} + static int gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, struct nvkm_engine *engine, bool suspend) @@ -126,13 +136,13 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct gk104_fifo_chan *chan = gk104_fifo_chan(base); + struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine); struct nvkm_gpuobj *inst = chan->base.inst; u32 offset = gk104_fifo_gpfifo_engine_addr(engine); if (offset) { - u64 addr = chan->engn[engine->subdev.index].vma->addr; - u32 datalo = lower_32_bits(addr) | 0x00000004; - u32 datahi = upper_32_bits(addr); + u32 datalo = lower_32_bits(engn->vma->addr) | 0x00000004; + u32 datahi = upper_32_bits(engn->vma->addr); nvkm_kmap(inst); nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo); nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi); @@ -151,8 +161,9 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct gk104_fifo_chan *chan = gk104_fifo_chan(base); - nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma); - nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst); + struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine); + nvkm_vmm_put(chan->base.vmm, &engn->vma); + nvkm_gpuobj_del(&engn->inst); } int @@ -161,23 +172,21 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base, struct nvkm_object *object) { struct gk104_fifo_chan *chan = gk104_fifo_chan(base); - int engn = engine->subdev.index; + struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine); int ret; if (!gk104_fifo_gpfifo_engine_addr(engine)) return 0; - ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst); + ret = nvkm_object_bind(object, NULL, 0, &engn->inst); if (ret) return ret; - ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size, - &chan->engn[engn].vma); + ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma); if (ret) return ret; - return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm, - chan->engn[engn].vma, NULL, 0); + return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0); } void @@ -247,23 +256,12 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid, { struct gk104_fifo_chan *chan; int runlist = ffs(*runlists) -1, ret, i; - unsigned long engm; - u64 subdevs = 0; u64 usermem; if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr) return -EINVAL; *runlists = BIT_ULL(runlist); - engm = fifo->runlist[runlist].engm; - for_each_set_bit(i, &engm, fifo->engine_nr) { - if (fifo->engine[i].engine) - subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index); - } - - if (subdevs & BIT_ULL(NVKM_ENGINE_GR)) - subdevs |= BIT_ULL(NVKM_ENGINE_SW); - /* Allocate the channel. */ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; @@ -273,7 +271,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid, INIT_LIST_HEAD(&chan->head); ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base, - 0x1000, 0x1000, true, vmm, 0, subdevs, + 0x1000, 0x1000, true, vmm, 0, fifo->runlist[runlist].engm_sw, 1, fifo->user.bar->addr, 0x200, oclass, &chan->base); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c index a7462cf59d65..ee4967b706a7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c @@ -44,7 +44,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid int ret; /* Block runlist to prevent the channel from being rescheduled. */ - mutex_lock(&subdev->mutex); + mutex_lock(&chan->fifo->base.mutex); nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl)); /* Preempt the channel. */ @@ -58,7 +58,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid /* Resume runlist. */ nvkm_mask(device, 0x002630, BIT(chan->runl), 0); - mutex_unlock(&subdev->mutex); + mutex_unlock(&chan->fifo->base.mutex); return ret; } @@ -70,8 +70,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, struct nvkm_gpuobj *inst = chan->base.inst; int ret; - if (engine->subdev.index >= NVKM_ENGINE_CE0 && - engine->subdev.index <= NVKM_ENGINE_CE_LAST) + if (engine->subdev.type == NVKM_ENGINE_CE) return gk104_fifo_gpfifo_kick(chan); ret = gv100_fifo_gpfifo_engine_valid(chan, false, false); @@ -90,17 +89,15 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct gk104_fifo_chan *chan = gk104_fifo_chan(base); + struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine); struct nvkm_gpuobj *inst = chan->base.inst; - u64 addr; - if (engine->subdev.index >= NVKM_ENGINE_CE0 && - engine->subdev.index <= NVKM_ENGINE_CE_LAST) + if (engine->subdev.type == NVKM_ENGINE_CE) return 0; - addr = chan->engn[engine->subdev.index].vma->addr; nvkm_kmap(inst); - nvkm_wo32(inst, 0x210, lower_32_bits(addr) | 0x00000004); - nvkm_wo32(inst, 0x214, upper_32_bits(addr)); + nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004); + nvkm_wo32(inst, 0x214, upper_32_bits(engn->vma->addr)); nvkm_done(inst); return gv100_fifo_gpfifo_engine_valid(chan, false, true); @@ -129,8 +126,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func, struct nvkm_device *device = fifo->base.engine.subdev.device; struct gk104_fifo_chan *chan; int runlist = ffs(*runlists) -1, ret, i; - unsigned long engm; - u64 subdevs = 0; u64 usermem, mthd; u32 size; @@ -138,12 +133,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func, return -EINVAL; *runlists = BIT_ULL(runlist); - engm = fifo->runlist[runlist].engm; - for_each_set_bit(i, &engm, fifo->engine_nr) { - if (fifo->engine[i].engine) - subdevs |= BIT_ULL(fifo->engine[i].engine->subdev.index); - } - /* Allocate the channel. */ if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; @@ -153,7 +142,7 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func, INIT_LIST_HEAD(&chan->head); ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm, - 0, subdevs, 1, fifo->user.bar->addr, 0x200, + 0, fifo->runlist[runlist].engm, 1, fifo->user.bar->addr, 0x200, oclass, &chan->base); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c index 6ee1bb32a071..70e16a91ac12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c @@ -301,7 +301,8 @@ gv100_fifo = { }; int -gv100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +gv100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return gk104_fifo_new_(&gv100_fifo, device, index, 4096, pfifo); + return gk104_fifo_new_(&gv100_fifo, device, type, inst, 4096, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index c1d1b1aa5bc6..c6730c124769 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c @@ -94,6 +94,38 @@ __releases(fifo->base.lock) spin_unlock_irqrestore(&fifo->base.lock, flags); } +struct nvkm_engine * +nv04_fifo_id_engine(struct nvkm_fifo *fifo, int engi) +{ + enum nvkm_subdev_type type; + + switch (engi) { + case NV04_FIFO_ENGN_SW : type = NVKM_ENGINE_SW; break; + case NV04_FIFO_ENGN_GR : type = NVKM_ENGINE_GR; break; + case NV04_FIFO_ENGN_MPEG: type = NVKM_ENGINE_MPEG; break; + case NV04_FIFO_ENGN_DMA : type = NVKM_ENGINE_DMAOBJ; break; + default: + WARN_ON(1); + return NULL; + } + + return nvkm_device_engine(fifo->engine.subdev.device, type, 0); +} + +int +nv04_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine) +{ + switch (engine->subdev.type) { + case NVKM_ENGINE_SW : return NV04_FIFO_ENGN_SW; + case NVKM_ENGINE_GR : return NV04_FIFO_ENGN_GR; + case NVKM_ENGINE_MPEG : return NV04_FIFO_ENGN_MPEG; + case NVKM_ENGINE_DMAOBJ: return NV04_FIFO_ENGN_DMA; + default: + WARN_ON(1); + return 0; + } +} + static const char * nv_dma_state_err(u32 state) { @@ -326,7 +358,7 @@ nv04_fifo_init(struct nvkm_fifo *base) int nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, - int index, int nr, const struct nv04_fifo_ramfc *ramfc, + enum nvkm_subdev_type type, int inst, int nr, const struct nv04_fifo_ramfc *ramfc, struct nvkm_fifo **pfifo) { struct nv04_fifo *fifo; @@ -337,7 +369,7 @@ nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, fifo->ramfc = ramfc; *pfifo = &fifo->base; - ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base); + ret = nvkm_fifo_ctor(func, device, type, inst, nr, &fifo->base); if (ret) return ret; @@ -349,6 +381,8 @@ static const struct nvkm_fifo_func nv04_fifo = { .init = nv04_fifo_init, .intr = nv04_fifo_intr, + .engine_id = nv04_fifo_engine_id, + .id_engine = nv04_fifo_id_engine, .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { @@ -358,8 +392,8 @@ nv04_fifo = { }; int -nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return nv04_fifo_new_(&nv04_fifo, device, index, 16, - nv04_fifo_ramfc, pfifo); + return nv04_fifo_new_(&nv04_fifo, device, type, inst, 16, nv04_fifo_ramfc, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h index e5ecceee77ae..3f23bcde4a54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h @@ -17,8 +17,7 @@ struct nv04_fifo { const struct nv04_fifo_ramfc *ramfc; }; -int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, - int index, int nr, const struct nv04_fifo_ramfc *, - struct nvkm_fifo **); +int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + int nr, const struct nv04_fifo_ramfc *, struct nvkm_fifo **); void nv04_fifo_init(struct nvkm_fifo *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c index f9a87deb2b3d..f8887f0f2f82 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c @@ -43,6 +43,8 @@ static const struct nvkm_fifo_func nv10_fifo = { .init = nv04_fifo_init, .intr = nv04_fifo_intr, + .engine_id = nv04_fifo_engine_id, + .id_engine = nv04_fifo_id_engine, .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { @@ -52,8 +54,8 @@ nv10_fifo = { }; int -nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return nv04_fifo_new_(&nv10_fifo, device, index, 32, - nv10_fifo_ramfc, pfifo); + return nv04_fifo_new_(&nv10_fifo, device, type, inst, 32, nv10_fifo_ramfc, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c index f6d383a21222..3f94c7b5b054 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c @@ -81,6 +81,8 @@ static const struct nvkm_fifo_func nv17_fifo = { .init = nv17_fifo_init, .intr = nv04_fifo_intr, + .engine_id = nv04_fifo_engine_id, + .id_engine = nv04_fifo_id_engine, .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { @@ -90,8 +92,8 @@ nv17_fifo = { }; int -nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return nv04_fifo_new_(&nv17_fifo, device, index, 32, - nv17_fifo_ramfc, pfifo); + return nv04_fifo_new_(&nv17_fifo, device, type, inst, 32, nv17_fifo_ramfc, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c index 2d61fd832ddb..f9ea46809bc0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c @@ -112,6 +112,8 @@ static const struct nvkm_fifo_func nv40_fifo = { .init = nv40_fifo_init, .intr = nv04_fifo_intr, + .engine_id = nv04_fifo_engine_id, + .id_engine = nv04_fifo_id_engine, .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { @@ -121,8 +123,8 @@ nv40_fifo = { }; int -nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return nv04_fifo_new_(&nv40_fifo, device, index, 32, - nv40_fifo_ramfc, pfifo); + return nv04_fifo_new_(&nv40_fifo, device, type, inst, 32, nv40_fifo_ramfc, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index fa6e094d8068..be94156ea248 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c @@ -51,9 +51,9 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo) void nv50_fifo_runlist_update(struct nv50_fifo *fifo) { - mutex_lock(&fifo->base.engine.subdev.mutex); + mutex_lock(&fifo->base.mutex); nv50_fifo_runlist_update_locked(fifo); - mutex_unlock(&fifo->base.engine.subdev.mutex); + mutex_unlock(&fifo->base.mutex); } int @@ -107,7 +107,7 @@ nv50_fifo_dtor(struct nvkm_fifo *base) int nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, - int index, struct nvkm_fifo **pfifo) + enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo) { struct nv50_fifo *fifo; int ret; @@ -116,7 +116,7 @@ nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device, return -ENOMEM; *pfifo = &fifo->base; - ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base); + ret = nvkm_fifo_ctor(func, device, type, inst, 128, &fifo->base); if (ret) return ret; @@ -131,6 +131,8 @@ nv50_fifo = { .oneinit = nv50_fifo_oneinit, .init = nv50_fifo_init, .intr = nv04_fifo_intr, + .engine_id = nv04_fifo_engine_id, + .id_engine = nv04_fifo_id_engine, .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { @@ -141,7 +143,8 @@ nv50_fifo = { }; int -nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { - return nv50_fifo_new_(&nv50_fifo, device, index, pfifo); + return nv50_fifo_new_(&nv50_fifo, device, type, inst, pfifo); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h index 87d30b6bd2ea..0111e7e5a4e3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h @@ -10,8 +10,8 @@ struct nv50_fifo { int cur_runlist; }; -int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, - int index, struct nvkm_fifo **); +int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_fifo **); void *nv50_fifo_dtor(struct nvkm_fifo *); int nv50_fifo_oneinit(struct nvkm_fifo *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h index 0ef8baab513e..899272801a8b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h @@ -4,8 +4,8 @@ #define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine) #include <engine/fifo.h> -int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, - int index, int nr, struct nvkm_fifo *); +int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + int nr, struct nvkm_fifo *); void nvkm_fifo_uevent(struct nvkm_fifo *); void nvkm_fifo_cevent(struct nvkm_fifo *); void nvkm_fifo_kevent(struct nvkm_fifo *, int chid); @@ -23,6 +23,8 @@ struct nvkm_fifo_func { void (*fini)(struct nvkm_fifo *); void (*intr)(struct nvkm_fifo *); void (*fault)(struct nvkm_fifo *, struct nvkm_fault_data *); + int (*engine_id)(struct nvkm_fifo *, struct nvkm_engine *); + struct nvkm_engine *(*id_engine)(struct nvkm_fifo *, int engi); void (*pause)(struct nvkm_fifo *, unsigned long *); void (*start)(struct nvkm_fifo *, unsigned long *); void (*uevent_init)(struct nvkm_fifo *); @@ -35,8 +37,13 @@ struct nvkm_fifo_func { }; void nv04_fifo_intr(struct nvkm_fifo *); +int nv04_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *); +struct nvkm_engine *nv04_fifo_id_engine(struct nvkm_fifo *, int); void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *); void nv04_fifo_start(struct nvkm_fifo *, unsigned long *); void gf100_fifo_intr_fault(struct nvkm_fifo *, int); + +int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *); +struct nvkm_engine *gk104_fifo_id_engine(struct nvkm_fifo *, int); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c index 14e5b70e0255..e417044cc347 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c @@ -278,7 +278,8 @@ tu102_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) struct nvkm_engine *engine = NULL; struct nvkm_fifo_chan *chan; unsigned long flags; - char ct[8] = "HUB/", en[16] = ""; + const char *en = ""; + char ct[8] = "HUB/"; int engn; er = nvkm_enum_find(fifo->func->fault.reason, info->reason); @@ -303,25 +304,20 @@ tu102_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) nvkm_mask(device, 0x001718, 0x00000000, 0x00000000); break; default: - engine = nvkm_device_engine(device, ee->data2); + engine = nvkm_device_engine(device, ee->data2, 0); break; } } if (ee == NULL) { - enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine); - - if (engidx < NVKM_SUBDEV_NR) { - const char *src = nvkm_subdev_name[engidx]; - char *dst = en; - - do { - *dst++ = toupper(*src++); - } while (*src); - engine = nvkm_device_engine(device, engidx); + struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine); + if (subdev) { + if (subdev->func == &nvkm_engine) + engine = container_of(subdev, typeof(*engine), subdev); + en = engine->subdev.name; } } else { - snprintf(en, sizeof(en), "%s", ee->name); + en = ee->name; } spin_lock_irqsave(&fifo->base.lock, flags); @@ -456,6 +452,8 @@ tu102_fifo_ = { .fini = gk104_fifo_fini, .intr = tu102_fifo_intr, .fault = tu102_fifo_fault, + .engine_id = gk104_fifo_engine_id, + .id_engine = gk104_fifo_id_engine, .uevent_init = gk104_fifo_uevent_init, .uevent_fini = gk104_fifo_uevent_fini, .recover_chan = tu102_fifo_recover_chan, @@ -464,7 +462,8 @@ tu102_fifo_ = { }; int -tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) { struct gk104_fifo *fifo; @@ -474,5 +473,5 @@ tu102_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) INIT_WORK(&fifo->recover.work, tu102_fifo_recover_work); *pfifo = &fifo->base; - return nvkm_fifo_ctor(&tu102_fifo_, device, index, 4096, &fifo->base); + return nvkm_fifo_ctor(&tu102_fifo_, device, type, inst, 4096, &fifo->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c index d41fb94524e9..61759f54406e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c @@ -175,8 +175,8 @@ nvkm_gr = { int nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device, - int index, bool enable, struct nvkm_gr *gr) + enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_gr *gr) { gr->func = func; - return nvkm_engine_ctor(&nvkm_gr, device, index, enable, &gr->engine); + return nvkm_engine_ctor(&nvkm_gr, device, type, inst, enable, &gr->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c index da1ba74682b4..65c332118fd6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c @@ -192,7 +192,7 @@ g84_gr = { }; int -g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +g84_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv50_gr_new_(&g84_gr, device, index, pgr); + return nv50_gr_new_(&g84_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 749f73fc45a8..397ff4fe9df8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -2087,8 +2087,8 @@ gf100_gr_flcn = { }; int -gf100_gr_new_(const struct gf100_gr_fwif *fwif, - struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf100_gr_new_(const struct gf100_gr_fwif *fwif, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { struct gf100_gr *gr; int ret; @@ -2097,7 +2097,7 @@ gf100_gr_new_(const struct gf100_gr_fwif *fwif, return -ENOMEM; *pgr = &gr->base; - ret = nvkm_gr_ctor(&gf100_gr_, device, index, true, &gr->base); + ret = nvkm_gr_ctor(&gf100_gr_, device, type, inst, true, &gr->base); if (ret) return ret; @@ -2483,7 +2483,7 @@ gf100_gr_fwif[] = { }; int -gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gf100_gr_fwif, device, index, pgr); + return gf100_gr_new_(gf100_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index dfd5dd74f0d5..c0038f906135 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -416,6 +416,6 @@ void gm20b_gr_acr_bld_patch(struct nvkm_acr *, u32, s64); extern const struct nvkm_acr_lsf_func gp108_gr_gpccs_acr; extern const struct nvkm_acr_lsf_func gp108_gr_fecs_acr; -int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, int, +int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c index 0536fe8b2b92..3acd99c306f2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c @@ -152,7 +152,7 @@ gf104_gr_fwif[] = { }; int -gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gf104_gr_fwif, device, index, pgr); + return gf100_gr_new_(gf104_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c index 14284b06112f..030640bb3dca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c @@ -151,7 +151,7 @@ gf108_gr_fwif[] = { }; int -gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gf108_gr_fwif, device, index, pgr); + return gf100_gr_new_(gf108_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c index 280752551a3a..616e2def1865 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c @@ -127,7 +127,7 @@ gf110_gr_fwif[] = { }; int -gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gf110_gr_fwif, device, index, pgr); + return gf100_gr_new_(gf110_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c index 235c3fbe4b95..669e7536970e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c @@ -192,7 +192,7 @@ gf117_gr_fwif[] = { }; int -gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf117_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gf117_gr_fwif, device, index, pgr); + return gf100_gr_new_(gf117_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c index 7eac385ece97..5b09bda8110c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c @@ -218,7 +218,7 @@ gf119_gr_fwif[] = { }; int -gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gf119_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gf119_gr_fwif, device, index, pgr); + return gf100_gr_new_(gf119_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c index 89f51d76082b..b680eaa0f350 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c @@ -497,7 +497,7 @@ gk104_gr_fwif[] = { }; int -gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gk104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gk104_gr_fwif, device, index, pgr); + return gf100_gr_new_(gk104_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c index 735f05e54d62..103e06a77e65 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c @@ -393,7 +393,7 @@ gk110_gr_fwif[] = { }; int -gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gk110_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gk110_gr_fwif, device, index, pgr); + return gf100_gr_new_(gk110_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c index adc971be8f3b..034d0b11a17d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c @@ -144,7 +144,8 @@ gk110b_gr_fwif[] = { }; int -gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gk110b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gr **pgr) { - return gf100_gr_new_(gk110b_gr_fwif, device, index, pgr); + return gf100_gr_new_(gk110b_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c index aa0eff6795ac..116d682f9f96 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c @@ -202,7 +202,7 @@ gk208_gr_fwif[] = { }; int -gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gk208_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gk208_gr_fwif, device, index, pgr); + return gf100_gr_new_(gk208_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c index 6d4d72851610..be0b2cefd8e8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c @@ -357,7 +357,7 @@ gk20a_gr_fwif[] = { }; int -gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gk20a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gk20a_gr_fwif, device, index, pgr); + return gf100_gr_new_(gk20a_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c index 09bb78ba9d00..310987174cb5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c @@ -437,7 +437,7 @@ gm107_gr_fwif[] = { }; int -gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gm107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gm107_gr_fwif, device, index, pgr); + return gf100_gr_new_(gm107_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c index 815137047518..5c38ff0fe7f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c @@ -288,7 +288,7 @@ gm200_gr_fwif[] = { }; int -gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gm200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gm200_gr_fwif, device, index, pgr); + return gf100_gr_new_(gm200_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c index 1aab691fa71c..ec1c46e47e00 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c @@ -181,7 +181,7 @@ gm20b_gr_fwif[] = { }; int -gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gm20b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gm20b_gr_fwif, device, index, pgr); + return gf100_gr_new_(gm20b_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c index ddba7ce937c7..0550dd6f46f1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c @@ -156,7 +156,7 @@ gp100_gr_fwif[] = { }; int -gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gp100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gp100_gr_fwif, device, index, pgr); + return gf100_gr_new_(gp100_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c index c083f3757ff7..5b001f374be0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c @@ -152,7 +152,7 @@ gp102_gr_fwif[] = { }; int -gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gp102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gp102_gr_fwif, device, index, pgr); + return gf100_gr_new_(gp102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c index f6a31e9a8cc8..2655574ec63b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp104.c @@ -93,7 +93,7 @@ gp104_gr_fwif[] = { }; int -gp104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gp104_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gp104_gr_fwif, device, index, pgr); + return gf100_gr_new_(gp104_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c index 2c80c6a75b56..adabc04d4f3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c @@ -82,7 +82,7 @@ gp107_gr_fwif[] = { }; int -gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gp107_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gp107_gr_fwif, device, index, pgr); + return gf100_gr_new_(gp107_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c index 2be8f416dd6f..7310f0466bb7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp108.c @@ -92,7 +92,7 @@ gp108_gr_fwif[] = { }; int -gp108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gp108_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gp108_gr_fwif, device, index, pgr); + return gf100_gr_new_(gp108_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c index 6edc4bc7ed44..e13683b6e7b1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c @@ -94,7 +94,7 @@ gp10b_gr_fwif[] = { }; int -gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gp10b_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gp10b_gr_fwif, device, index, pgr); + return gf100_gr_new_(gp10b_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c index c711a55ce392..1dfc65d45b52 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c @@ -43,7 +43,7 @@ gt200_gr = { }; int -gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gt200_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv50_gr_new_(>200_gr, device, index, pgr); + return nv50_gr_new_(>200_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c index fa103df32ec7..fcb5ead345a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c @@ -44,7 +44,7 @@ gt215_gr = { }; int -gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gt215_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv50_gr_new_(>215_gr, device, index, pgr); + return nv50_gr_new_(>215_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c index 2189a8f4e644..4d043c1173ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c @@ -141,7 +141,7 @@ gv100_gr_fwif[] = { }; int -gv100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +gv100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(gv100_gr_fwif, device, index, pgr); + return gf100_gr_new_(gv100_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c index eb1a90644752..cf782b64f62e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c @@ -42,7 +42,7 @@ mcp79_gr = { }; int -mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +mcp79_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv50_gr_new_(&mcp79_gr, device, index, pgr); + return nv50_gr_new_(&mcp79_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c index c91eb56e9327..6f90a6395453 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c @@ -44,7 +44,7 @@ mcp89_gr = { }; int -mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +mcp89_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv50_gr_new_(&mcp89_gr, device, index, pgr); + return nv50_gr_new_(&mcp89_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c index 9c2e985dc079..0bc1a238de43 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c @@ -1413,7 +1413,7 @@ nv04_gr = { }; int -nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv04_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { struct nv04_gr *gr; @@ -1422,5 +1422,5 @@ nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) spin_lock_init(&gr->lock); *pgr = &gr->base; - return nvkm_gr_ctor(&nv04_gr, device, index, true, &gr->base); + return nvkm_gr_ctor(&nv04_gr, device, type, inst, true, &gr->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c index 4ebbfbdd8240..942450b33bc6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c @@ -1173,7 +1173,7 @@ nv10_gr_init(struct nvkm_gr *base) int nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) + enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { struct nv10_gr *gr; @@ -1182,7 +1182,7 @@ nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, spin_lock_init(&gr->lock); *pgr = &gr->base; - return nvkm_gr_ctor(func, device, index, true, &gr->base); + return nvkm_gr_ctor(func, device, type, inst, true, &gr->base); } static const struct nvkm_gr_func @@ -1215,7 +1215,7 @@ nv10_gr = { }; int -nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv10_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv10_gr_new_(&nv10_gr, device, index, pgr); + return nv10_gr_new_(&nv10_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h index 4327baea02af..5cfe927c9123 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h @@ -3,7 +3,7 @@ #define __NV10_GR_H__ #include "priv.h" -int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index, +int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gr **); int nv10_gr_init(struct nvkm_gr *); void nv10_gr_intr(struct nvkm_gr *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c index 3e2c6856b4c4..69ece259df86 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c @@ -53,7 +53,7 @@ nv15_gr = { }; int -nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv15_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv10_gr_new_(&nv15_gr, device, index, pgr); + return nv10_gr_new_(&nv15_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c index 12437d085a73..e39dfc7d4077 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c @@ -53,7 +53,7 @@ nv17_gr = { }; int -nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv17_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv10_gr_new_(&nv17_gr, device, index, pgr); + return nv10_gr_new_(&nv17_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index d837630a3625..6bff10cee71b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c @@ -330,7 +330,7 @@ nv20_gr_dtor(struct nvkm_gr *base) int nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) + enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { struct nv20_gr *gr; @@ -338,7 +338,7 @@ nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, return -ENOMEM; *pgr = &gr->base; - return nvkm_gr_ctor(func, device, index, true, &gr->base); + return nvkm_gr_ctor(func, device, type, inst, true, &gr->base); } static const struct nvkm_gr_func @@ -370,7 +370,7 @@ nv20_gr = { }; int -nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv20_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv20_gr_new_(&nv20_gr, device, index, pgr); + return nv20_gr_new_(&nv20_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h index e57407a8a7c3..c0d2be53413e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h @@ -9,8 +9,8 @@ struct nv20_gr { struct nvkm_memory *ctxtab; }; -int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, - int, struct nvkm_gr **); +int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_gr **); void *nv20_gr_dtor(struct nvkm_gr *); int nv20_gr_oneinit(struct nvkm_gr *); int nv20_gr_init(struct nvkm_gr *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c index 32d29d3faee0..f3a56f17d94a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c @@ -129,7 +129,7 @@ nv25_gr = { }; int -nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv25_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv20_gr_new_(&nv25_gr, device, index, pgr); + return nv20_gr_new_(&nv25_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c index f941062c66f0..f268d2642d29 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c @@ -120,7 +120,7 @@ nv2a_gr = { }; int -nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv2a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv20_gr_new_(&nv2a_gr, device, index, pgr); + return nv20_gr_new_(&nv2a_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c index 785ec956df0f..e5737cdf2fa1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c @@ -194,7 +194,7 @@ nv30_gr = { }; int -nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv30_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv20_gr_new_(&nv30_gr, device, index, pgr); + return nv20_gr_new_(&nv30_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c index bd610d75c677..1ab2da8ebf4e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c @@ -131,7 +131,7 @@ nv34_gr = { }; int -nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv34_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv20_gr_new_(&nv34_gr, device, index, pgr); + return nv20_gr_new_(&nv34_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c index 89db7f523037..591260f5676b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c @@ -131,7 +131,7 @@ nv35_gr = { }; int -nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv35_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv20_gr_new_(&nv35_gr, device, index, pgr); + return nv20_gr_new_(&nv35_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index 5f1ad8344ea9..67f3535ff97e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -429,7 +429,7 @@ nv40_gr_init(struct nvkm_gr *base) int nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) + enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { struct nv40_gr *gr; @@ -438,7 +438,7 @@ nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, *pgr = &gr->base; INIT_LIST_HEAD(&gr->chan); - return nvkm_gr_ctor(func, device, index, true, &gr->base); + return nvkm_gr_ctor(func, device, type, inst, true, &gr->base); } static const struct nvkm_gr_func @@ -470,7 +470,7 @@ nv40_gr = { }; int -nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv40_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv40_gr_new_(&nv40_gr, device, index, pgr); + return nv40_gr_new_(&nv40_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h index e6128791b2d2..f3d3d3a5ae5b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h @@ -10,7 +10,7 @@ struct nv40_gr { struct list_head chan; }; -int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index, +int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gr **); int nv40_gr_init(struct nvkm_gr *); void nv40_gr_intr(struct nvkm_gr *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c index 45ff80254eb4..22b6a38a7031 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c @@ -102,7 +102,7 @@ nv44_gr = { }; int -nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv44_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv40_gr_new_(&nv44_gr, device, index, pgr); + return nv40_gr_new_(&nv44_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c index df16ffda1749..563a10097e95 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c @@ -761,7 +761,7 @@ nv50_gr_init(struct nvkm_gr *base) int nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, - int index, struct nvkm_gr **pgr) + enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { struct nv50_gr *gr; @@ -770,7 +770,7 @@ nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device, spin_lock_init(&gr->lock); *pgr = &gr->base; - return nvkm_gr_ctor(func, device, index, true, &gr->base); + return nvkm_gr_ctor(func, device, type, inst, true, &gr->base); } static const struct nvkm_gr_func @@ -790,7 +790,7 @@ nv50_gr = { }; int -nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +nv50_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return nv50_gr_new_(&nv50_gr, device, index, pgr); + return nv50_gr_new_(&nv50_gr, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h index 465f4da0ddfc..84388c42e5c6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h @@ -11,7 +11,7 @@ struct nv50_gr { u32 size; }; -int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index, +int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gr **); int nv50_gr_init(struct nvkm_gr *); void nv50_gr_intr(struct nvkm_gr *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h index 3b30f24032cc..9b2c66e8be90 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h @@ -7,8 +7,8 @@ struct nvkm_fb_tile; struct nvkm_fifo_chan; -int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *, - int index, bool enable, struct nvkm_gr *); +int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + bool enable, struct nvkm_gr *); bool nv04_gr_idle(struct nvkm_gr *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c index 6039f9948aa2..1a8a21844e12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -198,7 +198,7 @@ tu102_gr_fwif[] = { }; int -tu102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr) { - return gf100_gr_new_(tu102_gr_fwif, device, index, pgr); + return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c index c0e11a071843..0fcc0ffa1e40 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c @@ -37,7 +37,8 @@ g84_mpeg = { }; int -g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) +g84_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pmpeg) { - return nvkm_engine_new_(&g84_mpeg, device, index, true, pmpeg); + return nvkm_engine_new_(&g84_mpeg, device, type, inst, true, pmpeg); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c index 7fea7d45202f..b1054db4c1b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c @@ -274,7 +274,7 @@ nv31_mpeg_ = { int nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device, - int index, struct nvkm_engine **pmpeg) + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pmpeg) { struct nv31_mpeg *mpeg; @@ -283,8 +283,7 @@ nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device, mpeg->func = func; *pmpeg = &mpeg->engine; - return nvkm_engine_ctor(&nv31_mpeg_, device, index, - true, &mpeg->engine); + return nvkm_engine_ctor(&nv31_mpeg_, device, type, inst, true, &mpeg->engine); } static const struct nv31_mpeg_func @@ -293,7 +292,8 @@ nv31_mpeg = { }; int -nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) +nv31_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pmpeg) { - return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg); + return nv31_mpeg_new_(&nv31_mpeg, device, type, inst, pmpeg); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h index b3e131538858..9f30aaaf809e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h @@ -11,8 +11,8 @@ struct nv31_mpeg { struct nv31_mpeg_chan *chan; }; -int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *, - int index, struct nvkm_engine **); +int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_engine **); struct nv31_mpeg_func { bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c index b5ec7c504dc6..179167484ef1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c @@ -76,7 +76,8 @@ nv40_mpeg = { }; int -nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) +nv40_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pmpeg) { - return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg); + return nv31_mpeg_new_(&nv40_mpeg, device, type, inst, pmpeg); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c index c3cf02ed468e..521ce43a2871 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c @@ -203,7 +203,8 @@ nv44_mpeg = { }; int -nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) +nv44_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pmpeg) { struct nv44_mpeg *mpeg; @@ -212,5 +213,5 @@ nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) INIT_LIST_HEAD(&mpeg->chan); *pmpeg = &mpeg->engine; - return nvkm_engine_ctor(&nv44_mpeg, device, index, true, &mpeg->engine); + return nvkm_engine_ctor(&nv44_mpeg, device, type, inst, true, &mpeg->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c index 6df880a39019..e6374f36961c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c @@ -129,7 +129,8 @@ nv50_mpeg = { }; int -nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg) +nv50_mpeg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pmpeg) { - return nvkm_engine_new_(&nv50_mpeg, device, index, true, pmpeg); + return nvkm_engine_new_(&nv50_mpeg, device, type, inst, true, pmpeg); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c index 80211f76093b..842fcfbd28b8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c @@ -24,9 +24,8 @@ #include "priv.h" int -nvkm_mspdec_new_(const struct nvkm_falcon_func *func, - struct nvkm_device *device, int index, - struct nvkm_engine **pengine) +nvkm_mspdec_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine); + return nvkm_falcon_new_(func, device, type, inst, true, 0x085000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c index f30cf1dcfb30..ecb06d68f544 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c @@ -43,8 +43,8 @@ g98_mspdec = { }; int -g98_mspdec_new(struct nvkm_device *device, int index, - struct nvkm_engine **pengine) +g98_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) { - return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine); + return nvkm_mspdec_new_(&g98_mspdec, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c index cfe1aa81bd14..0a69bd767d69 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c @@ -43,8 +43,8 @@ gf100_mspdec = { }; int -gf100_mspdec_new(struct nvkm_device *device, int index, +gf100_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine); + return nvkm_mspdec_new_(&gf100_mspdec, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c index 24272b4927bc..a08991dca428 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c @@ -35,8 +35,8 @@ gk104_mspdec = { }; int -gk104_mspdec_new(struct nvkm_device *device, int index, +gk104_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine); + return nvkm_mspdec_new_(&gk104_mspdec, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c index cf6e59ad6ee2..791fb03a32ad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c @@ -35,8 +35,8 @@ gt215_mspdec = { }; int -gt215_mspdec_new(struct nvkm_device *device, int index, - struct nvkm_engine **pengine) +gt215_mspdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) { - return nvkm_mspdec_new_(>215_mspdec, device, index, pengine); + return nvkm_mspdec_new_(>215_mspdec, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h index 86445a2600d0..2bc5537d40a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h @@ -3,8 +3,8 @@ #define __NVKM_MSPDEC_PRIV_H__ #include <engine/mspdec.h> -int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *, - int index, struct nvkm_engine **); +int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type, + int, struct nvkm_engine **); void g98_mspdec_init(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c index bfae5e60e925..45a9411ab2e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c @@ -25,7 +25,7 @@ int nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device, - int index, struct nvkm_engine **pengine) + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine); + return nvkm_falcon_new_(func, device, type, inst, true, 0x086000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c index c45dbf79d1f9..160120b9bd64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c @@ -43,8 +43,8 @@ g98_msppp = { }; int -g98_msppp_new(struct nvkm_device *device, int index, +g98_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_msppp_new_(&g98_msppp, device, index, pengine); + return nvkm_msppp_new_(&g98_msppp, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c index 803c62ab516e..debed9ae8731 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c @@ -43,8 +43,8 @@ gf100_msppp = { }; int -gf100_msppp_new(struct nvkm_device *device, int index, +gf100_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_msppp_new_(&gf100_msppp, device, index, pengine); + return nvkm_msppp_new_(&gf100_msppp, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c index 49cbf72cee4b..a2fd736fef94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c @@ -35,8 +35,8 @@ gt215_msppp = { }; int -gt215_msppp_new(struct nvkm_device *device, int index, - struct nvkm_engine **pengine) +gt215_msppp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) { - return nvkm_msppp_new_(>215_msppp, device, index, pengine); + return nvkm_msppp_new_(>215_msppp, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h index f20b10915db2..582ab8ce1425 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h @@ -3,8 +3,8 @@ #define __NVKM_MSPPP_PRIV_H__ #include <engine/msppp.h> -int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *, - int index, struct nvkm_engine **); +int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type, + int, struct nvkm_engine **); void g98_msppp_init(struct nvkm_falcon *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c index 745bbb653dc0..7be42b980e57 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c @@ -25,7 +25,7 @@ int nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device, - int index, struct nvkm_engine **pengine) + enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine); + return nvkm_falcon_new_(func, device, type, inst, true, 0x084000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c index 4a2a9f0494af..cfa2065319a6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c @@ -43,8 +43,8 @@ g98_msvld = { }; int -g98_msvld_new(struct nvkm_device *device, int index, +g98_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_msvld_new_(&g98_msvld, device, index, pengine); + return nvkm_msvld_new_(&g98_msvld, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c index 1695e532c081..8d58ad8e04d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c @@ -43,8 +43,8 @@ gf100_msvld = { }; int -gf100_msvld_new(struct nvkm_device *device, int index, +gf100_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_msvld_new_(&gf100_msvld, device, index, pengine); + return nvkm_msvld_new_(&gf100_msvld, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c index b640cd63ebe8..b28be28046f1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c @@ -35,8 +35,8 @@ gk104_msvld = { }; int -gk104_msvld_new(struct nvkm_device *device, int index, +gk104_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_msvld_new_(&gk104_msvld, device, index, pengine); + return nvkm_msvld_new_(&gk104_msvld, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c index 201e8ef3519e..d7489f972c99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c @@ -35,8 +35,8 @@ gt215_msvld = { }; int -gt215_msvld_new(struct nvkm_device *device, int index, - struct nvkm_engine **pengine) +gt215_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) { - return nvkm_msvld_new_(>215_msvld, device, index, pengine); + return nvkm_msvld_new_(>215_msvld, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c index a0f540ef257b..16c30b62ab09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c @@ -35,8 +35,8 @@ mcp89_msvld = { }; int -mcp89_msvld_new(struct nvkm_device *device, int index, - struct nvkm_engine **pengine) +mcp89_msvld_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) { - return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine); + return nvkm_msvld_new_(&mcp89_msvld, device, type, inst, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h index 5cd1e83badbb..f729d919b054 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h @@ -3,8 +3,8 @@ #define __NVKM_MSVLD_PRIV_H__ #include <engine/msvld.h> -int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *, - int index, struct nvkm_engine **); +int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *, enum nvkm_subdev_type, + int, struct nvkm_engine **); void g98_msvld_init(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c index 9b23c1b70ebf..b0181cc5953b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c @@ -37,7 +37,7 @@ nvkm_nvdec = { int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device, - int index, struct nvkm_nvdec **pnvdec) + enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) { struct nvkm_nvdec *nvdec; int ret; @@ -45,7 +45,7 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device, if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL))) return -ENOMEM; - ret = nvkm_engine_ctor(&nvkm_nvdec, device, index, true, + ret = nvkm_engine_ctor(&nvkm_nvdec, device, type, inst, true, &nvdec->engine); if (ret) return ret; @@ -57,5 +57,5 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device, nvdec->func = fwif->func; return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev, - nvkm_subdev_name[index], 0, &nvdec->falcon); + nvdec->engine.subdev.name, 0, &nvdec->falcon); }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c index 0ab27ab4d8ee..8c44ce44a6d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c @@ -56,8 +56,8 @@ gm107_nvdec_fwif[] = { }; int -gm107_nvdec_new(struct nvkm_device *device, int index, +gm107_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec) { - return nvkm_nvdec_new_(gm107_nvdec_fwif, device, index, pnvdec); + return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, pnvdec); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h index e14da8b000d0..0920f6a887e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h @@ -14,6 +14,6 @@ struct nvkm_nvdec_fwif { const struct nvkm_nvdec_func *func; }; -int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, - struct nvkm_device *, int, struct nvkm_nvdec **); +int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *, + enum nvkm_subdev_type, int, struct nvkm_nvdec **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c index 484100e15668..c39e797dc7c9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c @@ -39,7 +39,7 @@ nvkm_nvenc = { int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device, - int index, struct nvkm_nvenc **pnvenc) + enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) { struct nvkm_nvenc *nvenc; int ret; @@ -47,7 +47,7 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device, if (!(nvenc = *pnvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL))) return -ENOMEM; - ret = nvkm_engine_ctor(&nvkm_nvenc, device, index, true, + ret = nvkm_engine_ctor(&nvkm_nvenc, device, type, inst, true, &nvenc->engine); if (ret) return ret; @@ -59,5 +59,5 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device, nvenc->func = fwif->func; return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev, - nvkm_subdev_name[index], 0, &nvenc->falcon); + nvenc->engine.subdev.name, 0, &nvenc->falcon); }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c index d249c8ffb2d5..f44d41bf2034 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c @@ -56,8 +56,8 @@ gm107_nvenc_fwif[] = { }; int -gm107_nvenc_new(struct nvkm_device *device, int index, +gm107_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc) { - return nvkm_nvenc_new_(gm107_nvenc_fwif, device, index, pnvenc); + return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h index 100fa5ebbeef..4130a2bfbb4f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h @@ -14,6 +14,6 @@ struct nvkm_nvenc_fwif { const struct nvkm_nvenc_func *func; }; -int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, +int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_nvenc **pnvenc); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c index b2785bee418e..8fe0444f761e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c @@ -628,10 +628,10 @@ nvkm_perfmon_dtor(struct nvkm_object *object) { struct nvkm_perfmon *perfmon = nvkm_perfmon(object); struct nvkm_pm *pm = perfmon->pm; - mutex_lock(&pm->engine.subdev.mutex); - if (pm->perfmon == &perfmon->object) - pm->perfmon = NULL; - mutex_unlock(&pm->engine.subdev.mutex); + spin_lock(&pm->client.lock); + if (pm->client.object == &perfmon->object) + pm->client.object = NULL; + spin_unlock(&pm->client.lock); return perfmon; } @@ -671,11 +671,11 @@ nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass, if (ret) return ret; - mutex_lock(&pm->engine.subdev.mutex); - if (pm->perfmon == NULL) - pm->perfmon = *pobject; - ret = (pm->perfmon == *pobject) ? 0 : -EBUSY; - mutex_unlock(&pm->engine.subdev.mutex); + spin_lock(&pm->client.lock); + if (pm->client.object == NULL) + pm->client.object = *pobject; + ret = (pm->client.object == *pobject) ? 0 : -EBUSY; + spin_unlock(&pm->client.lock); return ret; } @@ -858,10 +858,11 @@ nvkm_pm = { int nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device, - int index, struct nvkm_pm *pm) + enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm) { pm->func = func; INIT_LIST_HEAD(&pm->domains); INIT_LIST_HEAD(&pm->sources); - return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine); + spin_lock_init(&pm->client.lock); + return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c index 6e441ddafd86..0086d00eb162 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c @@ -159,7 +159,7 @@ g84_pm[] = { }; int -g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return nv40_pm_new_(g84_pm, device, index, ppm); + return nv40_pm_new_(g84_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c index fe2532ee4145..8e02701def8e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c @@ -187,7 +187,7 @@ gf100_pm_ = { int gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device, - int index, struct nvkm_pm **ppm) + enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { struct nvkm_pm *pm; u32 mask; @@ -196,7 +196,7 @@ gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device, if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL))) return -ENOMEM; - ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm); + ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm); if (ret) return ret; @@ -237,7 +237,7 @@ gf100_pm = { }; int -gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return gf100_pm_new_(&gf100_pm, device, index, ppm); + return gf100_pm_new_(&gf100_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h index 461bb219b1c0..bc4b014c4e8e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h @@ -9,8 +9,8 @@ struct gf100_pm_func { const struct nvkm_specdom *doms_part; }; -int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, - int index, struct nvkm_pm **); +int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_pm **); extern const struct nvkm_funcdom gf100_perfctr_func; extern const struct nvkm_specdom gf100_pm_gpc[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c index 49b24c98a7f7..505565866b59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c @@ -60,7 +60,7 @@ gf108_pm = { }; int -gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return gf100_pm_new_(&gf108_pm, device, index, ppm); + return gf100_pm_new_(&gf108_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c index 9170025fc988..c61e8c010bb3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c @@ -74,7 +74,7 @@ gf117_pm = { }; int -gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return gf100_pm_new_(&gf117_pm, device, index, ppm); + return gf100_pm_new_(&gf117_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c index 07f946d26ac6..75bf3df1cb18 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c @@ -178,7 +178,7 @@ gk104_pm = { }; int -gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return gf100_pm_new_(&gk104_pm, device, index, ppm); + return gf100_pm_new_(&gk104_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c index 5cf5dd536fd0..25874c541486 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c @@ -151,7 +151,7 @@ gt200_pm[] = { }; int -gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return nv40_pm_new_(gt200_pm, device, index, ppm); + return nv40_pm_new_(gt200_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c index c9227ad41b04..54c23e2b6645 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c @@ -132,7 +132,7 @@ gt215_pm[] = { }; int -gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return nv40_pm_new_(gt215_pm, device, index, ppm); + return nv40_pm_new_(gt215_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c index 3fda594700e0..eba5b3b79340 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c @@ -80,7 +80,7 @@ nv40_pm_ = { int nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device, - int index, struct nvkm_pm **ppm) + enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { struct nv40_pm *pm; int ret; @@ -89,7 +89,7 @@ nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device, return -ENOMEM; *ppm = &pm->base; - ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base); + ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base); if (ret) return ret; @@ -117,7 +117,7 @@ nv40_pm[] = { }; int -nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return nv40_pm_new_(nv40_pm, device, index, ppm); + return nv40_pm_new_(nv40_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h index 8ed19320fda1..afb79843723d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h @@ -9,7 +9,7 @@ struct nv40_pm { u32 sequence; }; -int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, - int index, struct nvkm_pm **); +int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_pm **); extern const struct nvkm_funcdom nv40_perfctr_func; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c index cc5a41d4c6f2..bbd3404901f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c @@ -169,7 +169,7 @@ nv50_pm[] = { }; int -nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm) +nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm) { - return nv40_pm_new_(nv50_pm, device, index, ppm); + return nv40_pm_new_(nv50_pm, device, type, inst, ppm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h index cd6f8f79b235..6ae25d3e7f45 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h @@ -4,8 +4,8 @@ #define nvkm_pm(p) container_of((p), struct nvkm_pm, engine) #include <engine/pm.h> -int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, - int index, struct nvkm_pm *); +int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_pm *); struct nvkm_pm_func { void (*fini)(struct nvkm_pm *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c index 6d2a7f0afbb5..1b87df03c823 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c @@ -74,9 +74,8 @@ g98_sec = { }; int -g98_sec_new(struct nvkm_device *device, int index, +g98_sec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(&g98_sec, device, index, - true, 0x087000, pengine); + return nvkm_falcon_new_(&g98_sec, device, type, inst, true, 0x087000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c index 41318aa0d481..092c6d0b8e01 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c @@ -85,7 +85,7 @@ nvkm_sec2 = { int nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device, - int index, u32 addr, struct nvkm_sec2 **psec2) + enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2) { struct nvkm_sec2 *sec2; int ret; @@ -93,7 +93,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device, if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL))) return -ENOMEM; - ret = nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine); + ret = nvkm_engine_ctor(&nvkm_sec2, device, type, inst, true, &sec2->engine); if (ret) return ret; @@ -104,7 +104,7 @@ nvkm_sec2_new_(const struct nvkm_sec2_fwif *fwif, struct nvkm_device *device, sec2->func = fwif->func; ret = nvkm_falcon_ctor(sec2->func->flcn, &sec2->engine.subdev, - nvkm_subdev_name[index], addr, &sec2->falcon); + sec2->engine.subdev.name, addr, &sec2->falcon); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c index bccf7acb7f98..44e39f5743d5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c @@ -343,7 +343,8 @@ gp102_sec2_fwif[] = { }; int -gp102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) +gp102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_sec2 **psec2) { - return nvkm_sec2_new_(gp102_sec2_fwif, device, index, 0, psec2); + return nvkm_sec2_new_(gp102_sec2_fwif, device, type, inst, 0, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c index e770c9497871..3e9f5c842f3c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c @@ -36,7 +36,8 @@ gp108_sec2_fwif[] = { }; int -gp108_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) +gp108_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_sec2 **psec2) { - return nvkm_sec2_new_(gp108_sec2_fwif, device, index, 0, psec2); + return nvkm_sec2_new_(gp108_sec2_fwif, device, type, inst, 0, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h index 8cbc0b7d0a27..af19229e885d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h @@ -25,6 +25,6 @@ int gp102_sec2_load(struct nvkm_sec2 *, int, const struct nvkm_sec2_fwif *); extern const struct nvkm_sec2_func gp102_sec2; extern const struct nvkm_acr_lsf_func gp102_sec2_acr_1; -int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, +int nvkm_sec2_new_(const struct nvkm_sec2_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_sec2 **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c index a231c1c6c0a5..f3faeb705575 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c @@ -72,10 +72,11 @@ tu102_sec2_fwif[] = { }; int -tu102_sec2_new(struct nvkm_device *device, int index, struct nvkm_sec2 **psec2) +tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_sec2 **psec2) { /* TOP info wasn't updated on Turing to reflect the PRI * address change for some reason. We override it here. */ - return nvkm_sec2_new_(tu102_sec2_fwif, device, index, 0x840000, psec2); + return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c index 7be3198e11de..14871d0bd746 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c @@ -97,7 +97,7 @@ nvkm_sw = { int nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device, - int index, struct nvkm_sw **psw) + enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) { struct nvkm_sw *sw; @@ -106,5 +106,5 @@ nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device, INIT_LIST_HEAD(&sw->chan); sw->func = func; - return nvkm_engine_ctor(&nvkm_sw, device, index, true, &sw->engine); + return nvkm_engine_ctor(&nvkm_sw, device, type, inst, true, &sw->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c index ea8f4247b628..55abf839f29d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c @@ -149,7 +149,7 @@ gf100_sw = { }; int -gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw) +gf100_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) { - return nvkm_sw_new_(&gf100_sw, device, index, psw); + return nvkm_sw_new_(&gf100_sw, device, type, inst, psw); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c index b6675fe1b0ce..4aa57573869c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c @@ -133,7 +133,7 @@ nv04_sw = { }; int -nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw) +nv04_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) { - return nvkm_sw_new_(&nv04_sw, device, index, psw); + return nvkm_sw_new_(&nv04_sw, device, type, inst, psw); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c index 09d22fcd194c..e79e640ae535 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c @@ -62,7 +62,7 @@ nv10_sw = { }; int -nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw) +nv10_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) { - return nvkm_sw_new_(&nv10_sw, device, index, psw); + return nvkm_sw_new_(&nv10_sw, device, type, inst, psw); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c index 01573d187f2c..1fdd094c8b7e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c @@ -142,7 +142,7 @@ nv50_sw = { }; int -nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw) +nv50_sw_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_sw **psw) { - return nvkm_sw_new_(&nv50_sw, device, index, psw); + return nvkm_sw_new_(&nv50_sw, device, type, inst, psw); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h index 6d18fc6180f2..d9d83b1b8849 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h @@ -5,8 +5,8 @@ #include <engine/sw.h> struct nvkm_sw_chan; -int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *, - int index, struct nvkm_sw **); +int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_sw **); struct nvkm_sw_chan_sclass { int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c index 7a96178786c4..b502266c76fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c @@ -36,8 +36,8 @@ g84_vp = { }; int -g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) +g84_vp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_engine **pengine) { - return nvkm_xtensa_new_(&g84_vp, device, index, - true, 0x00f000, pengine); + return nvkm_xtensa_new_(&g84_vp, device, type, inst, true, 0x00f000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c index 70549381e082..f7d3ba0afb55 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c @@ -175,9 +175,9 @@ nvkm_xtensa = { }; int -nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, - struct nvkm_device *device, int index, bool enable, - u32 addr, struct nvkm_engine **pengine) +nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool enable, u32 addr, + struct nvkm_engine **pengine) { struct nvkm_xtensa *xtensa; @@ -187,6 +187,5 @@ nvkm_xtensa_new_(const struct nvkm_xtensa_func *func, xtensa->addr = addr; *pengine = &xtensa->engine; - return nvkm_engine_ctor(&nvkm_xtensa, device, index, - enable, &xtensa->engine); + return nvkm_engine_ctor(&nvkm_xtensa, device, type, inst, enable, &xtensa->engine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index c6a3448180d6..262641a014b0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -88,13 +88,12 @@ int nvkm_falcon_enable(struct nvkm_falcon *falcon) { struct nvkm_device *device = falcon->owner->device; - enum nvkm_devidx id = falcon->owner->index; int ret; - nvkm_mc_enable(device, id); + nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst); ret = falcon->func->enable(falcon); if (ret) { - nvkm_mc_disable(device, id); + nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); return ret; } @@ -105,15 +104,14 @@ void nvkm_falcon_disable(struct nvkm_falcon *falcon) { struct nvkm_device *device = falcon->owner->device; - enum nvkm_devidx id = falcon->owner->index; /* already disabled, return or wait_idle will timeout */ - if (!nvkm_mc_enabled(device, id)) + if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst)) return; falcon->func->disable(falcon); - nvkm_mc_disable(device, id); + nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); } int @@ -143,7 +141,7 @@ nvkm_falcon_oneinit(struct nvkm_falcon *falcon) u32 reg; if (!falcon->addr) { - falcon->addr = nvkm_top_addr(subdev->device, subdev->index); + falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst); if (WARN_ON(!falcon->addr)) return -ENODEV; } @@ -188,7 +186,7 @@ nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) mutex_lock(&falcon->mutex); if (falcon->user) { nvkm_error(user, "%s falcon already acquired by %s!\n", - falcon->name, nvkm_subdev_name[falcon->user->index]); + falcon->name, falcon->user->name); mutex_unlock(&falcon->mutex); return -EBUSY; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild index fb4fff1222af..2cb24fff7e32 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild @@ -11,7 +11,6 @@ include $(src)/nvkm/subdev/fuse/Kbuild include $(src)/nvkm/subdev/gpio/Kbuild include $(src)/nvkm/subdev/gsp/Kbuild include $(src)/nvkm/subdev/i2c/Kbuild -include $(src)/nvkm/subdev/ibus/Kbuild include $(src)/nvkm/subdev/iccsense/Kbuild include $(src)/nvkm/subdev/instmem/Kbuild include $(src)/nvkm/subdev/ltc/Kbuild @@ -20,6 +19,7 @@ include $(src)/nvkm/subdev/mmu/Kbuild include $(src)/nvkm/subdev/mxm/Kbuild include $(src)/nvkm/subdev/pci/Kbuild include $(src)/nvkm/subdev/pmu/Kbuild +include $(src)/nvkm/subdev/privring/Kbuild include $(src)/nvkm/subdev/therm/Kbuild include $(src)/nvkm/subdev/timer/Kbuild include $(src)/nvkm/subdev/top/Kbuild diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c index c962df9910dd..af6cac696d43 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c @@ -410,14 +410,14 @@ nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) int nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, - int index, struct nvkm_acr **pacr) + enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) { struct nvkm_acr *acr; long wprfw; if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_acr, device, index, &acr->subdev); + nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev); INIT_LIST_HEAD(&acr->hsfw); INIT_LIST_HEAD(&acr->lsfw); INIT_LIST_HEAD(&acr->hsf); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c index cd41b2e6cc87..cdb1ead26d84 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -262,7 +262,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf, hsf->func->bld(acr, hsf); /* Boot the falcon. */ - nvkm_mc_intr_mask(device, falcon->owner->index, false); + nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, false); nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5); nvkm_falcon_set_start_addr(falcon, hsf->imem_tag << 8); @@ -279,7 +279,7 @@ gm200_acr_hsfw_boot(struct nvkm_acr *acr, struct nvkm_acr_hsf *hsf, return -EIO; nvkm_falcon_clear_interrupt(falcon, intr_clear); - nvkm_mc_intr_mask(device, falcon->owner->index, true); + nvkm_mc_intr_mask(device, falcon->owner->type, falcon->owner->inst, true); return ret; } @@ -478,7 +478,8 @@ gm200_acr_fwif[] = { }; int -gm200_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +gm200_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_acr **pacr) { - return nvkm_acr_new_(gm200_acr_fwif, device, index, pacr); + return nvkm_acr_new_(gm200_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c index b1ecc58152cc..54e996f2f630 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm20b.c @@ -129,7 +129,8 @@ gm20b_acr_fwif[] = { }; int -gm20b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +gm20b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_acr **pacr) { - return nvkm_acr_new_(gm20b_acr_fwif, device, index, pacr); + return nvkm_acr_new_(gm20b_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c index 80eb9d8dbc80..fb9132a39bb1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -276,7 +276,8 @@ gp102_acr_fwif[] = { }; int -gp102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +gp102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_acr **pacr) { - return nvkm_acr_new_(gp102_acr_fwif, device, index, pacr); + return nvkm_acr_new_(gp102_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c index 67a7c141004b..373d638a2177 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp108.c @@ -106,7 +106,8 @@ gp108_acr_fwif[] = { }; int -gp108_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +gp108_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_acr **pacr) { - return nvkm_acr_new_(gp108_acr_fwif, device, index, pacr); + return nvkm_acr_new_(gp108_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c index 8249f0d2d81d..f03ba028867b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp10b.c @@ -52,7 +52,8 @@ gp10b_acr_fwif[] = { }; int -gp10b_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +gp10b_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_acr **pacr) { - return nvkm_acr_new_(gp10b_acr_fwif, device, index, pacr); + return nvkm_acr_new_(gp10b_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h index d71af17a169a..c30b841c9d35 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/priv.h @@ -135,8 +135,8 @@ int gp102_acr_load_load(struct nvkm_acr *, struct nvkm_acr_hsfw *); extern const struct nvkm_acr_hsf_func gp108_acr_unload_0; void gp108_acr_hsfw_bld(struct nvkm_acr *, struct nvkm_acr_hsf *); -int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, int, - struct nvkm_acr **); +int nvkm_acr_new_(const struct nvkm_acr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, + int inst, struct nvkm_acr **); int nvkm_acr_hsf_boot(struct nvkm_acr *, const char *name); struct nvkm_acr_lsf { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c index c4981bce9a2b..05a87e77525f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c @@ -224,7 +224,8 @@ tu102_acr_fwif[] = { }; int -tu102_acr_new(struct nvkm_device *device, int index, struct nvkm_acr **pacr) +tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_acr **pacr) { - return nvkm_acr_new_(tu102_acr_fwif, device, index, pacr); + return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c index 209a6a40834a..d017a1b5e5dd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c @@ -134,9 +134,9 @@ nvkm_bar = { void nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device, - int index, struct nvkm_bar *bar) + enum nvkm_subdev_type type, int inst, struct nvkm_bar *bar) { - nvkm_subdev_ctor(&nvkm_bar, device, index, &bar->subdev); + nvkm_subdev_ctor(&nvkm_bar, device, type, inst, &bar->subdev); bar->func = func; spin_lock_init(&bar->lock); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c index 87f26f54b481..77a41bcf860e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c @@ -56,7 +56,8 @@ g84_bar_func = { }; int -g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +g84_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bar **pbar) { - return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar); + return nv50_bar_new_(&g84_bar_func, device, type, inst, 0x200, pbar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index a3dcb09a40ee..51070b7dda85 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -162,12 +162,12 @@ gf100_bar_dtor(struct nvkm_bar *base) int gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device, - int index, struct nvkm_bar **pbar) + enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) { struct gf100_bar *bar; if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL))) return -ENOMEM; - nvkm_bar_ctor(func, device, index, &bar->base); + nvkm_bar_ctor(func, device, type, inst, &bar->base); bar->bar2_halve = nvkm_boolopt(device->cfgopt, "NvBar2Halve", false); *pbar = &bar->base; return 0; @@ -189,7 +189,8 @@ gf100_bar_func = { }; int -gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +gf100_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bar **pbar) { - return gf100_bar_new_(&gf100_bar_func, device, index, pbar); + return gf100_bar_new_(&gf100_bar_func, device, type, inst, pbar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h index 4ae4c7145712..328a68b418d9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h @@ -15,7 +15,7 @@ struct gf100_bar { struct gf100_barN bar[2]; }; -int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, +int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bar **); void *gf100_bar_dtor(struct nvkm_bar *); int gf100_bar_oneinit(struct nvkm_bar *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c index 35878fb538f2..eead8ab88393 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c @@ -32,9 +32,10 @@ gk20a_bar_func = { }; int -gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +gk20a_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bar **pbar) { - int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar); + int ret = gf100_bar_new_(&gk20a_bar_func, device, type, inst, pbar); if (ret == 0) (*pbar)->iomap_uncached = true; return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c index 3ddf9222d935..da95307a7912 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c @@ -59,7 +59,8 @@ gm107_bar_func = { }; int -gm107_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +gm107_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bar **pbar) { - return gf100_bar_new_(&gm107_bar_func, device, index, pbar); + return gf100_bar_new_(&gm107_bar_func, device, type, inst, pbar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c index 1ed6170891c4..4acdb4fb0107 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c @@ -32,9 +32,10 @@ gm20b_bar_func = { }; int -gm20b_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +gm20b_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bar **pbar) { - int ret = gf100_bar_new_(&gm20b_bar_func, device, index, pbar); + int ret = gf100_bar_new_(&gm20b_bar_func, device, type, inst, pbar); if (ret == 0) (*pbar)->iomap_uncached = true; return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c index f23a0ccc2bec..27d8a1be43e4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c @@ -220,12 +220,12 @@ nv50_bar_dtor(struct nvkm_bar *base) int nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device, - int index, u32 pgd_addr, struct nvkm_bar **pbar) + enum nvkm_subdev_type type, int inst, u32 pgd_addr, struct nvkm_bar **pbar) { struct nv50_bar *bar; if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL))) return -ENOMEM; - nvkm_bar_ctor(func, device, index, &bar->base); + nvkm_bar_ctor(func, device, type, inst, &bar->base); bar->pgd_addr = pgd_addr; *pbar = &bar->base; return 0; @@ -248,7 +248,8 @@ nv50_bar_func = { }; int -nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +nv50_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bar **pbar) { - return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar); + return nv50_bar_new_(&nv50_bar_func, device, type, inst, 0x1400, pbar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h index e4193deb2e51..dedee9394079 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h @@ -16,7 +16,7 @@ struct nv50_bar { struct nvkm_gpuobj *bar2; }; -int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, +int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *, enum nvkm_subdev_type, int, u32 pgd_addr, struct nvkm_bar **); void *nv50_bar_dtor(struct nvkm_bar *); int nv50_bar_oneinit(struct nvkm_bar *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h index 869ad184f923..daebfc991c76 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h @@ -5,7 +5,7 @@ #include <subdev/bar.h> void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *, - int, struct nvkm_bar *); + enum nvkm_subdev_type, int, struct nvkm_bar *); struct nvkm_bar_func { void *(*dtor)(struct nvkm_bar *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c index 798f65ec3a86..c25ab407b85d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c @@ -92,7 +92,8 @@ tu102_bar = { }; int -tu102_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar) +tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bar **pbar) { - return gf100_bar_new_(&tu102_bar, device, index, pbar); + return gf100_bar_new_(&tu102_bar, device, type, inst, pbar); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index f3c30b2a788e..d0f52d59fc2f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -140,7 +140,8 @@ nvkm_bios = { }; int -nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios) +nvkm_bios_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bios **pbios) { struct nvkm_bios *bios; struct nvbios_image image; @@ -149,7 +150,7 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios) if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_bios, device, index, &bios->subdev); + nvkm_subdev_ctor(&nvkm_bios, device, type, inst, &bios->subdev); ret = nvbios_shadow(bios); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c index 52ad73bce5fe..0e5a46db52ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c @@ -53,12 +53,12 @@ nvkm_bus = { int nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device, - int index, struct nvkm_bus **pbus) + enum nvkm_subdev_type type, int inst, struct nvkm_bus **pbus) { struct nvkm_bus *bus; if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_bus, device, index, &bus->subdev); + nvkm_subdev_ctor(&nvkm_bus, device, type, inst, &bus->subdev); bus->func = func; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c index 9700b5c01cc6..a0d6e2d3f804 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c @@ -58,7 +58,8 @@ g94_bus = { }; int -g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) +g94_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bus **pbus) { - return nvkm_bus_new_(&g94_bus, device, index, pbus); + return nvkm_bus_new_(&g94_bus, device, type, inst, pbus); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c index e0930d5fdfb1..53a6651ac225 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c @@ -40,7 +40,7 @@ gf100_bus_intr(struct nvkm_bus *bus) (addr & 0x00000002) ? "write" : "read", data, (addr & 0x00fffffc), (stat & 0x00000002) ? "!ENGINE " : "", - (stat & 0x00000004) ? "IBUS " : "", + (stat & 0x00000004) ? "PRIVRING " : "", (stat & 0x00000008) ? "TIMEOUT " : ""); nvkm_wr32(device, 0x009084, 0x00000000); @@ -69,7 +69,8 @@ gf100_bus = { }; int -gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) +gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bus **pbus) { - return nvkm_bus_new_(&gf100_bus, device, index, pbus); + return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c index 2b44ba5cf4b0..cfed17c062ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c @@ -68,7 +68,8 @@ nv04_bus = { }; int -nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) +nv04_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bus **pbus) { - return nvkm_bus_new_(&nv04_bus, device, index, pbus); + return nvkm_bus_new_(&nv04_bus, device, type, inst, pbus); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c index 5153d89e1f0b..ad8da523bb22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c @@ -82,7 +82,8 @@ nv31_bus = { }; int -nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) +nv31_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bus **pbus) { - return nvkm_bus_new_(&nv31_bus, device, index, pbus); + return nvkm_bus_new_(&nv31_bus, device, type, inst, pbus); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c index 19e10fdc9291..3a1e45adeedc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c @@ -99,7 +99,8 @@ nv50_bus = { }; int -nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus) +nv50_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_bus **pbus) { - return nvkm_bus_new_(&nv50_bus, device, index, pbus); + return nvkm_bus_new_(&nv50_bus, device, type, inst, pbus); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h index 76f7ba1c6494..2e9345b17cf8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h @@ -11,7 +11,7 @@ struct nvkm_bus_func { u32 hwsq_size; }; -int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int, +int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bus **); void nv50_bus_init(struct nvkm_bus *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c index dc184e857f85..57199be082fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c @@ -649,7 +649,7 @@ nvkm_clk = { int nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device, - int index, bool allow_reclock, struct nvkm_clk *clk) + enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk *clk) { struct nvkm_subdev *subdev = &clk->subdev; struct nvkm_bios *bios = device->bios; @@ -657,7 +657,7 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device, const char *mode; struct nvbios_vpstate_header h; - nvkm_subdev_ctor(&nvkm_clk, device, index, subdev); + nvkm_subdev_ctor(&nvkm_clk, device, type, inst, subdev); if (bios && !nvbios_vpstate_parse(bios, &h)) { struct nvbios_vpstate_entry base, boost; @@ -716,9 +716,9 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device, int nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device, - int index, bool allow_reclock, struct nvkm_clk **pclk) + enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk) { if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL))) return -ENOMEM; - return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk); + return nvkm_clk_ctor(func, device, type, inst, allow_reclock, *pclk); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c index f97e3ec196bb..07157cf53c9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c @@ -41,8 +41,8 @@ g84_clk = { }; int -g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +g84_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { - return nv50_clk_new_(&g84_clk, device, index, - (device->chipset >= 0x94), pclk); + return nv50_clk_new_(&g84_clk, device, type, inst, (device->chipset >= 0x94), pclk); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c index 7f67f9f5a550..6eea11aefb70 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c @@ -468,7 +468,8 @@ gf100_clk = { }; int -gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +gf100_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { struct gf100_clk *clk; @@ -476,5 +477,5 @@ gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base); + return nvkm_clk_ctor(&gf100_clk, device, type, inst, false, &clk->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c index 0b37e3da7feb..0d8e2ddcc5ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c @@ -504,7 +504,8 @@ gk104_clk = { }; int -gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +gk104_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { struct gk104_clk *clk; @@ -512,5 +513,5 @@ gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base); + return nvkm_clk_ctor(&gk104_clk, device, type, inst, true, &clk->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c index 218893e3e5f9..d573fb0917fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c @@ -610,10 +610,9 @@ gk20a_clk = { }; int -gk20a_clk_ctor(struct nvkm_device *device, int index, - const struct nvkm_clk_func *func, - const struct gk20a_clk_pllg_params *params, - struct gk20a_clk *clk) +gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params, + struct gk20a_clk *clk) { struct nvkm_device_tegra *tdev = device->func->tegra(device); int ret; @@ -628,7 +627,7 @@ gk20a_clk_ctor(struct nvkm_device *device, int index, clk->params = params; clk->parent_rate = clk_get_rate(tdev->clk); - ret = nvkm_clk_ctor(func, device, index, true, &clk->base); + ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base); if (ret) return ret; @@ -639,7 +638,8 @@ gk20a_clk_ctor(struct nvkm_device *device, int index, } int -gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { struct gk20a_clk *clk; int ret; @@ -649,11 +649,9 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params, - clk); + ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk); clk->pl_to_div = pl_to_div; clk->div_to_pl = div_to_pl; - return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h index 0d1450972162..286413ff4a9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h @@ -146,8 +146,8 @@ gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll) clk->parent_rate / KHZ); } -int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *, - const struct gk20a_clk_pllg_params *, struct gk20a_clk *); +int gk20a_clk_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, const struct nvkm_clk_func *, + const struct gk20a_clk_pllg_params *, struct gk20a_clk *); void gk20a_clk_fini(struct nvkm_clk *); int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src); int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c index b284e949f732..a139dafffe06 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c @@ -908,7 +908,7 @@ gm20b_clk = { }; static int -gm20b_clk_new_speedo0(struct nvkm_device *device, int index, +gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_clk **pclk) { struct gk20a_clk *clk; @@ -919,12 +919,9 @@ gm20b_clk_new_speedo0(struct nvkm_device *device, int index, return -ENOMEM; *pclk = &clk->base; - ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0, - &gm20b_pllg_params, clk); - + ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk); clk->pl_to_div = pl_to_div; clk->div_to_pl = div_to_pl; - return ret; } @@ -1014,7 +1011,8 @@ gm20b_clk_init_safe_fmax(struct gm20b_clk *clk) } int -gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { struct nvkm_device_tegra *tdev = device->func->tegra(device); struct gm20b_clk *clk; @@ -1024,7 +1022,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) /* Speedo 0 GPUs cannot use noise-aware PLL */ if (tdev->gpu_speedo_id == 0) - return gm20b_clk_new_speedo0(device, index, pclk); + return gm20b_clk_new_speedo0(device, type, inst, pclk); /* Speedo >= 1, use NAPLL */ clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL); @@ -1036,8 +1034,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) /* duplicate the clock parameters since we will patch them below */ clk_params = (void *) (clk + 1); *clk_params = gm20b_pllg_params; - ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params, - &clk->base); + ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base); if (ret) return ret; @@ -1050,7 +1047,7 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) if (clk_params->max_m == 0) { nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n"); kfree(clk); - return gm20b_clk_new_speedo0(device, index, pclk); + return gm20b_clk_new_speedo0(device, type, inst, pclk); } clk->base.pl_to_div = pl_to_div; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c index f0a26881d9b9..b5f3969727a2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c @@ -537,7 +537,8 @@ gt215_clk = { }; int -gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +gt215_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { struct gt215_clk *clk; @@ -545,5 +546,5 @@ gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - return nvkm_clk_ctor(>215_clk, device, index, true, &clk->base); + return nvkm_clk_ctor(>215_clk, device, type, inst, true, &clk->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c index 4884eb4a9221..81f103f88dc8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c @@ -409,7 +409,8 @@ mcp77_clk = { }; int -mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +mcp77_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { struct mcp77_clk *clk; @@ -417,5 +418,5 @@ mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base); + return nvkm_clk_ctor(&mcp77_clk, device, type, inst, true, &clk->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c index b280f85e8827..ca13598c2caa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c @@ -72,9 +72,10 @@ nv04_clk = { }; int -nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +nv04_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { - int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk); + int ret = nvkm_clk_new_(&nv04_clk, device, type, inst, false, pclk); if (ret == 0) { (*pclk)->pll_calc = nv04_clk_pll_calc; (*pclk)->pll_prog = nv04_clk_pll_prog; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c index 2ab9b9b84018..7ddd8cecb805 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c @@ -218,7 +218,8 @@ nv40_clk = { }; int -nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +nv40_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { struct nv40_clk *clk; @@ -228,5 +229,5 @@ nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) clk->base.pll_prog = nv04_clk_pll_prog; *pclk = &clk->base; - return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base); + return nvkm_clk_ctor(&nv40_clk, device, type, inst, true, &clk->base); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c index da1770e47490..83067763c0ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c @@ -507,14 +507,14 @@ nv50_clk_tidy(struct nvkm_clk *base) int nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device, - int index, bool allow_reclock, struct nvkm_clk **pclk) + enum nvkm_subdev_type type, int inst, bool allow_reclock, struct nvkm_clk **pclk) { struct nv50_clk *clk; int ret; if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL))) return -ENOMEM; - ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base); + ret = nvkm_clk_ctor(func, device, type, inst, allow_reclock, &clk->base); *pclk = &clk->base; if (ret) return ret; @@ -555,7 +555,8 @@ nv50_clk = { }; int -nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +nv50_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_clk **pclk) { - return nv50_clk_new_(&nv50_clk, device, index, false, pclk); + return nv50_clk_new_(&nv50_clk, device, type, inst, false, pclk); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h index 7c7713238ec4..5b4cb7e5cff6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h @@ -20,7 +20,7 @@ struct nv50_clk { struct nv50_clk_hwsq hwsq; }; -int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int, +int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int, bool, struct nvkm_clk **); int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src); int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h index 81dfb37480ae..810cc572cd30 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h @@ -16,9 +16,9 @@ struct nvkm_clk_func { struct nvkm_domain domains[]; }; -int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int, +int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int, bool allow_reclock, struct nvkm_clk *); -int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int, +int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, enum nvkm_subdev_type, int, bool allow_reclock, struct nvkm_clk **); int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c index 4756019ddf3f..dd4981708fe4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c @@ -56,12 +56,12 @@ nvkm_devinit_disable(struct nvkm_devinit *init) } int -nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable) +nvkm_devinit_post(struct nvkm_devinit *init) { int ret = 0; if (init && init->func->post) ret = init->func->post(init, init->post); - *disable = nvkm_devinit_disable(init); + nvkm_devinit_disable(init); return ret; } @@ -126,11 +126,10 @@ nvkm_devinit = { }; void -nvkm_devinit_ctor(const struct nvkm_devinit_func *func, - struct nvkm_device *device, int index, - struct nvkm_devinit *init) +nvkm_devinit_ctor(const struct nvkm_devinit_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_devinit *init) { - nvkm_subdev_ctor(&nvkm_devinit, device, index, &init->subdev); + nvkm_subdev_ctor(&nvkm_devinit, device, type, inst, &init->subdev); init->func = func; init->force_post = nvkm_boolopt(device->cfgopt, "NvForcePost", false); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c index e895289bf3c1..c224702b7bed 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c @@ -35,18 +35,18 @@ g84_devinit_disable(struct nvkm_devinit *init) u64 disable = 0ULL; if (!(r001540 & 0x40000000)) { - disable |= (1ULL << NVKM_ENGINE_MPEG); - disable |= (1ULL << NVKM_ENGINE_VP); - disable |= (1ULL << NVKM_ENGINE_BSP); - disable |= (1ULL << NVKM_ENGINE_CIPHER); + nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_VP, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0); } if (!(r00154c & 0x00000004)) - disable |= (1ULL << NVKM_ENGINE_DISP); + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); if (!(r00154c & 0x00000020)) - disable |= (1ULL << NVKM_ENGINE_BSP); + nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0); if (!(r00154c & 0x00000040)) - disable |= (1ULL << NVKM_ENGINE_CIPHER); + nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0); return disable; } @@ -61,8 +61,8 @@ g84_devinit = { }; int -g84_devinit_new(struct nvkm_device *device, int index, +g84_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&g84_devinit, device, index, pinit); + return nv50_devinit_new_(&g84_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c index a9d45844df5a..05729ca19e9a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c @@ -35,17 +35,17 @@ g98_devinit_disable(struct nvkm_devinit *init) u64 disable = 0ULL; if (!(r001540 & 0x40000000)) { - disable |= (1ULL << NVKM_ENGINE_MSPDEC); - disable |= (1ULL << NVKM_ENGINE_MSVLD); - disable |= (1ULL << NVKM_ENGINE_MSPPP); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0); } if (!(r00154c & 0x00000004)) - disable |= (1ULL << NVKM_ENGINE_DISP); + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); if (!(r00154c & 0x00000020)) - disable |= (1ULL << NVKM_ENGINE_MSVLD); + nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0); if (!(r00154c & 0x00000040)) - disable |= (1ULL << NVKM_ENGINE_SEC); + nvkm_subdev_disable(device, NVKM_ENGINE_SEC, 0); return disable; } @@ -60,8 +60,8 @@ g98_devinit = { }; int -g98_devinit_new(struct nvkm_device *device, int index, +g98_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&g98_devinit, device, index, pinit); + return nv50_devinit_new_(&g98_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c index 636a92128f6c..6b280b05c4ca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c @@ -70,7 +70,8 @@ ga100_devinit = { }; int -ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit) +ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&ga100_devinit, device, index, pinit); + return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c index 8b1b34c3ad26..051cfd6a5caf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c @@ -71,21 +71,21 @@ gf100_devinit_disable(struct nvkm_devinit *init) u64 disable = 0ULL; if (r022500 & 0x00000001) - disable |= (1ULL << NVKM_ENGINE_DISP); + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); if (r022500 & 0x00000002) { - disable |= (1ULL << NVKM_ENGINE_MSPDEC); - disable |= (1ULL << NVKM_ENGINE_MSPPP); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0); } if (r022500 & 0x00000004) - disable |= (1ULL << NVKM_ENGINE_MSVLD); + nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0); if (r022500 & 0x00000008) - disable |= (1ULL << NVKM_ENGINE_MSENC); + nvkm_subdev_disable(device, NVKM_ENGINE_MSENC, 0); if (r022500 & 0x00000100) - disable |= (1ULL << NVKM_ENGINE_CE0); + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); if (r022500 & 0x00000200) - disable |= (1ULL << NVKM_ENGINE_CE1); + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 1); return disable; } @@ -114,8 +114,8 @@ gf100_devinit = { }; int -gf100_devinit_new(struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +gf100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&gf100_devinit, device, index, pinit); + return nv50_devinit_new_(&gf100_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c index 28ca01be3d38..4323732a3cb2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c @@ -35,11 +35,11 @@ gm107_devinit_disable(struct nvkm_devinit *init) u64 disable = 0ULL; if (r021c00 & 0x00000001) - disable |= (1ULL << NVKM_ENGINE_CE0); + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); if (r021c00 & 0x00000004) - disable |= (1ULL << NVKM_ENGINE_CE2); + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2); if (r021c04 & 0x00000001) - disable |= (1ULL << NVKM_ENGINE_DISP); + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); return disable; } @@ -54,8 +54,8 @@ gm107_devinit = { }; int -gm107_devinit_new(struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +gm107_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&gm107_devinit, device, index, pinit); + return nv50_devinit_new_(&gm107_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c index 59940dacc2ba..a308b9bde449 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c @@ -179,8 +179,8 @@ gm200_devinit = { }; int -gm200_devinit_new(struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +gm200_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&gm200_devinit, device, index, pinit); + return nv50_devinit_new_(&gm200_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c index 9a8522fa9c65..dc026ac1b595 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c @@ -71,16 +71,16 @@ gt215_devinit_disable(struct nvkm_devinit *init) u64 disable = 0ULL; if (!(r001540 & 0x40000000)) { - disable |= (1ULL << NVKM_ENGINE_MSPDEC); - disable |= (1ULL << NVKM_ENGINE_MSPPP); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0); } if (!(r00154c & 0x00000004)) - disable |= (1ULL << NVKM_ENGINE_DISP); + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); if (!(r00154c & 0x00000020)) - disable |= (1ULL << NVKM_ENGINE_MSVLD); + nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0); if (!(r00154c & 0x00000200)) - disable |= (1ULL << NVKM_ENGINE_CE0); + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); return disable; } @@ -146,8 +146,8 @@ gt215_devinit = { }; int -gt215_devinit_new(struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +gt215_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(>215_devinit, device, index, pinit); + return nv50_devinit_new_(>215_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c index fbde6828bd38..b4d1688517d5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c @@ -72,8 +72,8 @@ gv100_devinit = { }; int -gv100_devinit_new(struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +gv100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&gv100_devinit, device, index, pinit); + return nv50_devinit_new_(&gv100_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c index ce4f718e98a1..fb90d47e1225 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c @@ -35,18 +35,18 @@ mcp89_devinit_disable(struct nvkm_devinit *init) u64 disable = 0; if (!(r001540 & 0x40000000)) { - disable |= (1ULL << NVKM_ENGINE_MSPDEC); - disable |= (1ULL << NVKM_ENGINE_MSPPP); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0); + nvkm_subdev_disable(device, NVKM_ENGINE_MSPPP, 0); } if (!(r00154c & 0x00000004)) - disable |= (1ULL << NVKM_ENGINE_DISP); + nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); if (!(r00154c & 0x00000020)) - disable |= (1ULL << NVKM_ENGINE_MSVLD); + nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0); if (!(r00154c & 0x00000040)) - disable |= (1ULL << NVKM_ENGINE_VIC); + nvkm_subdev_disable(device, NVKM_ENGINE_VIC, 0); if (!(r00154c & 0x00000200)) - disable |= (1ULL << NVKM_ENGINE_CE0); + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); return disable; } @@ -61,8 +61,8 @@ mcp89_devinit = { }; int -mcp89_devinit_new(struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +mcp89_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&mcp89_devinit, device, index, pinit); + return nv50_devinit_new_(&mcp89_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c index 317ce9fb8225..88bc890f89a2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c @@ -434,9 +434,8 @@ nv04_devinit_dtor(struct nvkm_devinit *base) } int -nv04_devinit_new_(const struct nvkm_devinit_func *func, - struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +nv04_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { struct nv04_devinit *init; @@ -444,7 +443,7 @@ nv04_devinit_new_(const struct nvkm_devinit_func *func, return -ENOMEM; *pinit = &init->base; - nvkm_devinit_ctor(func, device, index, &init->base); + nvkm_devinit_ctor(func, device, type, inst, &init->base); init->owner = -1; return 0; } @@ -459,8 +458,8 @@ nv04_devinit = { }; int -nv04_devinit_new(struct nvkm_device *device, int index, +nv04_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv04_devinit_new_(&nv04_devinit, device, index, pinit); + return nv04_devinit_new_(&nv04_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h index 15b029ddf6df..06ad8a606bb8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h @@ -11,7 +11,7 @@ struct nv04_devinit { }; int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *, - int, struct nvkm_devinit **); + enum nvkm_subdev_type, int, struct nvkm_devinit **); void *nv04_devinit_dtor(struct nvkm_devinit *); void nv04_devinit_preinit(struct nvkm_devinit *); void nv04_devinit_fini(struct nvkm_devinit *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c index 9891eadca1ce..1410befd2285 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c @@ -136,8 +136,8 @@ nv05_devinit = { }; int -nv05_devinit_new(struct nvkm_device *device, int index, +nv05_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv04_devinit_new_(&nv05_devinit, device, index, pinit); + return nv04_devinit_new_(&nv05_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c index 570822f83acf..a6aa8786d610 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c @@ -106,8 +106,8 @@ nv10_devinit = { }; int -nv10_devinit_new(struct nvkm_device *device, int index, +nv10_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv04_devinit_new_(&nv10_devinit, device, index, pinit); + return nv04_devinit_new_(&nv10_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c index fefafec7e2a7..4cc5ef9a5a63 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c @@ -35,8 +35,8 @@ nv1a_devinit = { }; int -nv1a_devinit_new(struct nvkm_device *device, int index, +nv1a_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv04_devinit_new_(&nv1a_devinit, device, index, pinit); + return nv04_devinit_new_(&nv1a_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c index 4ef04e0d8826..67f46df723e4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c @@ -72,8 +72,8 @@ nv20_devinit = { }; int -nv20_devinit_new(struct nvkm_device *device, int index, +nv20_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv04_devinit_new_(&nv20_devinit, device, index, pinit); + return nv04_devinit_new_(&nv20_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c index d7947c4391dc..380995d398b1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c @@ -85,7 +85,7 @@ nv50_devinit_disable(struct nvkm_devinit *init) u64 disable = 0ULL; if (!(r001540 & 0x40000000)) - disable |= (1ULL << NVKM_ENGINE_MPEG); + nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0); return disable; } @@ -101,8 +101,8 @@ nv50_devinit_preinit(struct nvkm_devinit *base) * missing, assume it's a secondary gpu which requires post */ if (!base->post) { - u64 disable = nvkm_devinit_disable(base); - if (disable & (1ULL << NVKM_ENGINE_DISP)) + nvkm_devinit_disable(base); + if (!device->disp) base->post = true; } @@ -148,9 +148,8 @@ nv50_devinit_init(struct nvkm_devinit *base) } int -nv50_devinit_new_(const struct nvkm_devinit_func *func, - struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +nv50_devinit_new_(const struct nvkm_devinit_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { struct nv50_devinit *init; @@ -158,7 +157,7 @@ nv50_devinit_new_(const struct nvkm_devinit_func *func, return -ENOMEM; *pinit = &init->base; - nvkm_devinit_ctor(func, device, index, &init->base); + nvkm_devinit_ctor(func, device, type, inst, &init->base); return 0; } @@ -172,8 +171,8 @@ nv50_devinit = { }; int -nv50_devinit_new(struct nvkm_device *device, int index, +nv50_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&nv50_devinit, device, index, pinit); + return nv50_devinit_new_(&nv50_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h index e8d37a6145a2..987a7f478b84 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h @@ -9,7 +9,7 @@ struct nv50_devinit { u32 r001540; }; -int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *, +int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **); void nv50_devinit_preinit(struct nvkm_devinit *); void nv50_devinit_init(struct nvkm_devinit *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h index 05961e624264..dd8b038a8cee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h @@ -16,7 +16,8 @@ struct nvkm_devinit_func { }; void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *, - int index, struct nvkm_devinit *); + enum nvkm_subdev_type, int inst, struct nvkm_devinit *); +u64 nvkm_devinit_disable(struct nvkm_devinit *); int nv04_devinit_post(struct nvkm_devinit *, bool); int tu102_devinit_post(struct nvkm_devinit *, bool); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c index 9a469bf482f2..634f64f88fc8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c @@ -82,8 +82,8 @@ tu102_devinit = { }; int -tu102_devinit_new(struct nvkm_device *device, int index, - struct nvkm_devinit **pinit) +tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_devinit **pinit) { - return nv50_devinit_new_(&tu102_devinit, device, index, pinit); + return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c index f6dca97140d6..fd54fa504efa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c @@ -170,12 +170,12 @@ nvkm_fault = { int nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device, - int index, struct nvkm_fault **pfault) + enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) { struct nvkm_fault *fault; if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev); + nvkm_subdev_ctor(&nvkm_fault, device, type, inst, &fault->subdev); fault->func = func; fault->user.ctor = nvkm_ufault_new; fault->user.base = func->user.base; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c index f6b189cc4330..6af7959e02ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c @@ -30,7 +30,7 @@ void gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) { struct nvkm_device *device = buffer->fault->subdev.device; - nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable); + nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable); } void @@ -82,8 +82,8 @@ gp100_fault = { }; int -gp100_fault_new(struct nvkm_device *device, int index, +gp100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) { - return nvkm_fault_new_(&gp100_fault, device, index, pfault); + return nvkm_fault_new_(&gp100_fault, device, type, inst, pfault); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c index 9e66d1f7654d..89e0bc96fb92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp10b.c @@ -46,8 +46,8 @@ gp10b_fault = { }; int -gp10b_fault_new(struct nvkm_device *device, int index, +gp10b_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) { - return nvkm_fault_new_(&gp10b_fault, device, index, pfault); + return nvkm_fault_new_(&gp10b_fault, device, type, inst, pfault); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c index 2707be4ffabc..cd9d2ade5ac7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c @@ -228,8 +228,8 @@ gv100_fault = { }; int -gv100_fault_new(struct nvkm_device *device, int index, +gv100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) { - return nvkm_fault_new_(&gv100_fault, device, index, pfault); + return nvkm_fault_new_(&gv100_fault, device, type, inst, pfault); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h index f6f1dd7eee1f..36681c347fb5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h @@ -18,8 +18,8 @@ struct nvkm_fault_buffer { u64 addr; }; -int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *, - int index, struct nvkm_fault **); +int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *, enum nvkm_subdev_type, + int inst, struct nvkm_fault **); struct nvkm_fault_func { int (*oneinit)(struct nvkm_fault *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c index f080051b0c65..91eb6729c84d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c @@ -37,7 +37,7 @@ tu102_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) */ struct nvkm_device *device = buffer->fault->subdev.device; - nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable); + nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, 0, enable); } static void @@ -181,8 +181,8 @@ tu102_fault = { }; int -tu102_fault_new(struct nvkm_device *device, int index, +tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fault **pfault) { - return nvkm_fault_new_(&tu102_fault, device, index, pfault); + return nvkm_fault_new_(&tu102_fault, device, type, inst, pfault); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index 5940e0dea2f8..6faaea948fc4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -122,7 +122,7 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev) nvkm_debug(subdev, "%d comptags\n", tags); } - return nvkm_mm_init(&fb->tags, 0, 0, tags, 1); + return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1); } static int @@ -205,7 +205,9 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev) for (i = 0; i < fb->tile.regions; i++) fb->func->tile.fini(fb, i, &fb->tile.region[i]); - nvkm_mm_fini(&fb->tags); + nvkm_mm_fini(&fb->tags.mm); + mutex_destroy(&fb->tags.mutex); + nvkm_ram_del(&fb->ram); nvkm_blob_dtor(&fb->vpr_scrubber); @@ -225,21 +227,21 @@ nvkm_fb = { void nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device, - int index, struct nvkm_fb *fb) + enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb) { - nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev); + nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev); fb->func = func; fb->tile.regions = fb->func->tile.regions; - fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", - fb->func->default_bigpage); + fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage); + mutex_init(&fb->tags.mutex); } int nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, - int index, struct nvkm_fb **pfb) + enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL))) return -ENOMEM; - nvkm_fb_ctor(func, device, index, *pfb); + nvkm_fb_ctor(func, device, type, inst, *pfb); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c index 06bf95c0c549..770a4ad39122 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c @@ -32,7 +32,7 @@ g84_fb = { }; int -g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +g84_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nv50_fb_new_(&g84_fb, device, index, pfb); + return nv50_fb_new_(&g84_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c index bf82686851cd..b47bebfbc26f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c @@ -34,7 +34,7 @@ ga100_fb = { }; int -ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&ga100_fb, device, index, pfb); + return gp102_fb_new_(&ga100_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c index bcecf84a6e67..6ea7908f0563 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c @@ -34,7 +34,7 @@ ga102_fb = { }; int -ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&ga102_fb, device, index, pfb); + return gp102_fb_new_(&ga102_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c index e8dc4e913494..9dcc40f9ef79 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c @@ -117,13 +117,13 @@ gf100_fb_dtor(struct nvkm_fb *base) int gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, - int index, struct nvkm_fb **pfb) + enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { struct gf100_fb *fb; if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL))) return -ENOMEM; - nvkm_fb_ctor(func, device, index, &fb->base); + nvkm_fb_ctor(func, device, type, inst, &fb->base); *pfb = &fb->base; return 0; @@ -141,7 +141,7 @@ gf100_fb = { }; int -gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gf100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gf100_fb, device, index, pfb); + return gf100_fb_new_(&gf100_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h index 2ed7cdaab37c..0cac7b06acc8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h @@ -10,8 +10,8 @@ struct gf100_fb { dma_addr_t r100c10; }; -int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, - int index, struct nvkm_fb **); +int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_fb **); void *gf100_fb_dtor(struct nvkm_fb *); void gf100_fb_init(struct nvkm_fb *); void gf100_fb_intr(struct nvkm_fb *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c index 4a9f463745b5..76678dd60f93 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c @@ -36,7 +36,7 @@ gf108_fb = { }; int -gf108_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gf108_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gf108_fb, device, index, pfb); + return gf100_fb_new_(&gf108_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c index 48fd98e08baa..f73442ccb424 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c @@ -83,7 +83,7 @@ gk104_fb = { }; int -gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gk104_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gk104_fb, device, index, pfb); + return gf100_fb_new_(&gk104_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c index 0695e5dd360e..45d6cdffafee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c @@ -65,7 +65,7 @@ gk110_fb = { }; int -gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gk110_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gk110_fb, device, index, pfb); + return gf100_fb_new_(&gk110_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c index a7e29b125094..6bc42f89d8c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c @@ -34,7 +34,7 @@ gk20a_fb = { }; int -gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gk20a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gk20a_fb, device, index, pfb); + return gf100_fb_new_(&gk20a_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c index 69c876d5d1c1..de52462a92bf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c @@ -36,7 +36,7 @@ gm107_fb = { }; int -gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gm107_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gm107_fb, device, index, pfb); + return gf100_fb_new_(&gm107_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c index d3b8c3367152..5acf8d15d06f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c @@ -67,7 +67,7 @@ gm200_fb = { }; int -gm200_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gm200_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gm200_fb, device, index, pfb); + return gf100_fb_new_(&gm200_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c index 12db61e31128..86f61a3f2fea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c @@ -34,7 +34,7 @@ gm20b_fb = { }; int -gm20b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gm20b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gm20b_fb, device, index, pfb); + return gf100_fb_new_(&gm20b_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c index 8205ce436b3e..09e943edc362 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c @@ -71,7 +71,7 @@ gp100_fb = { }; int -gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gp100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gp100_fb, device, index, pfb); + return gf100_fb_new_(&gp100_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index fc8c93aa3da5..0e78b3d734a0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -114,9 +114,9 @@ gp102_fb = { int gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, - int index, struct nvkm_fb **pfb) + enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - int ret = gf100_fb_new_(func, device, index, pfb); + int ret = gf100_fb_new_(func, device, type, inst, pfb); if (ret) return ret; @@ -126,9 +126,9 @@ gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, } int -gp102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&gp102_fb, device, index, pfb); + return gp102_fb_new_(&gp102_fb, device, type, inst, pfb); } MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c index af8e43979dc1..84c9815a6d48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c @@ -31,7 +31,7 @@ gp10b_fb = { }; int -gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gp10b_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gf100_fb_new_(&gp10b_fb, device, index, pfb); + return gf100_fb_new_(&gp10b_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c index 9266559b45f9..c1ec9758617c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c @@ -32,7 +32,7 @@ gt215_fb = { }; int -gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gt215_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nv50_fb_new_(>215_fb, device, index, pfb); + return nv50_fb_new_(>215_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c index feda86a5fba8..63daa83ae12d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c @@ -42,9 +42,9 @@ gv100_fb = { }; int -gv100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&gv100_fb, device, index, pfb); + return gp102_fb_new_(&gv100_fb, device, type, inst, pfb); } MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c index 73b3b86a2826..70c7b08ee0a6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c @@ -31,7 +31,7 @@ mcp77_fb = { }; int -mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +mcp77_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nv50_fb_new_(&mcp77_fb, device, index, pfb); + return nv50_fb_new_(&mcp77_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c index 6d11e32ec7ad..308d955168e8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c @@ -31,7 +31,7 @@ mcp89_fb = { }; int -mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +mcp89_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nv50_fb_new_(&mcp89_fb, device, index, pfb); + return nv50_fb_new_(&mcp89_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c index c886664533c8..8d5a007ecc47 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c @@ -44,7 +44,7 @@ nv04_fb = { }; int -nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv04_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv04_fb, device, index, pfb); + return nvkm_fb_new_(&nv04_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c index c998b7e96aa3..7d2c16b27032 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c @@ -64,7 +64,7 @@ nv10_fb = { }; int -nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv10_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv10_fb, device, index, pfb); + return nvkm_fb_new_(&nv10_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c index 7b9f04f44af8..4bdad2abd56f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c @@ -36,7 +36,7 @@ nv1a_fb = { }; int -nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv1a_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv1a_fb, device, index, pfb); + return nvkm_fb_new_(&nv1a_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c index a021d21ff153..d254f27f9b37 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c @@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags, { u32 tiles = DIV_ROUND_UP(size, 0x40); u32 tags = round_up(tiles / fb->ram->parts, 0x40); - if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { + if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */ else tile->zcomp = 0x04000000; /* Z24S8 */ tile->zcomp |= tile->tag->offset; @@ -63,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile) tile->limit = 0; tile->pitch = 0; tile->zcomp = 0; - nvkm_mm_free(&fb->tags, &tile->tag); + nvkm_mm_free(&fb->tags.mm, &tile->tag); } void @@ -96,7 +96,7 @@ nv20_fb = { }; int -nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv20_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv20_fb, device, index, pfb); + return nvkm_fb_new_(&nv20_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c index 7709f5fe9a45..47da66dea6e6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c @@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags, { u32 tiles = DIV_ROUND_UP(size, 0x40); u32 tags = round_up(tiles / fb->ram->parts, 0x40); - if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { + if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */ else tile->zcomp = 0x00200000; /* Z24S8 */ tile->zcomp |= tile->tag->offset; @@ -54,7 +54,7 @@ nv25_fb = { }; int -nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv25_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv25_fb, device, index, pfb); + return nvkm_fb_new_(&nv25_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c index 8aa782666507..0f87efb636d5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c @@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags, { u32 tiles = DIV_ROUND_UP(size, 0x40); u32 tags = round_up(tiles / fb->ram->parts, 0x40); - if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { + if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */ else tile->zcomp |= 0x02000000; /* Z24S8 */ tile->zcomp |= ((tile->tag->offset ) >> 6); @@ -127,7 +127,7 @@ nv30_fb = { }; int -nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv30_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv30_fb, device, index, pfb); + return nvkm_fb_new_(&nv30_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c index 6e83dcff72e0..0694dcfd107e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c @@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags, { u32 tiles = DIV_ROUND_UP(size, 0x40); u32 tags = round_up(tiles / fb->ram->parts, 0x40); - if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { + if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */ else tile->zcomp |= 0x08000000; /* Z24S8 */ tile->zcomp |= ((tile->tag->offset ) >> 6); @@ -56,7 +56,7 @@ nv35_fb = { }; int -nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv35_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv35_fb, device, index, pfb); + return nvkm_fb_new_(&nv35_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c index 2a07617bb44c..1a39770372f1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c @@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags, { u32 tiles = DIV_ROUND_UP(size, 0x40); u32 tags = round_up(tiles / fb->ram->parts, 0x40); - if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { + if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */ else tile->zcomp |= 0x20000000; /* Z24S8 */ tile->zcomp |= ((tile->tag->offset ) >> 6); @@ -56,7 +56,7 @@ nv36_fb = { }; int -nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv36_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv36_fb, device, index, pfb); + return nvkm_fb_new_(&nv36_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c index 955160778b5b..77dbb9d6ba48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c @@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags, u32 tiles = DIV_ROUND_UP(size, 0x80); u32 tags = round_up(tiles / fb->ram->parts, 0x100); if ( (flags & 2) && - !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { + !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */ tile->zcomp |= ((tile->tag->offset ) >> 8); tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; @@ -62,7 +62,7 @@ nv40_fb = { }; int -nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv40_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv40_fb, device, index, pfb); + return nvkm_fb_new_(&nv40_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c index b77f08d34cc3..0f9d9e48e7ad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c @@ -56,7 +56,7 @@ nv41_fb = { }; int -nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv41_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv41_fb, device, index, pfb); + return nvkm_fb_new_(&nv41_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c index b59dc486083d..b1046ee9f0ea 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c @@ -65,7 +65,7 @@ nv44_fb = { }; int -nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv44_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv44_fb, device, index, pfb); + return nvkm_fb_new_(&nv44_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c index cab7d20fa039..0d78de422dfa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c @@ -51,7 +51,7 @@ nv46_fb = { }; int -nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv46_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv46_fb, device, index, pfb); + return nvkm_fb_new_(&nv46_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c index a8b0ad4c871d..5cedde29c8ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c @@ -39,7 +39,7 @@ nv47_fb = { }; int -nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv47_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv47_fb, device, index, pfb); + return nvkm_fb_new_(&nv47_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c index d0b317bb0252..95cc099603d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c @@ -39,7 +39,7 @@ nv49_fb = { }; int -nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv49_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv49_fb, device, index, pfb); + return nvkm_fb_new_(&nv49_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c index 6a6f0c086071..c9f3148f4e75 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c @@ -37,7 +37,7 @@ nv4e_fb = { }; int -nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv4e_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nvkm_fb_new_(&nv4e_fb, device, index, pfb); + return nvkm_fb_new_(&nv4e_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c index b2f5bf8144ea..95fd8f834010 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c @@ -262,16 +262,15 @@ nv50_fb_ = { int nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device, - int index, struct nvkm_fb **pfb) + enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { struct nv50_fb *fb; if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL))) return -ENOMEM; - nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base); + nvkm_fb_ctor(&nv50_fb_, device, type, inst, &fb->base); fb->func = func; *pfb = &fb->base; - return 0; } @@ -283,7 +282,7 @@ nv50_fb = { }; int -nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +nv50_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return nv50_fb_new_(&nv50_fb, device, index, pfb); + return nv50_fb_new_(&nv50_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h index 5e2b0c9539ed..a5e673859a90 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h @@ -17,6 +17,6 @@ struct nv50_fb_func { u32 trap; }; -int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index, +int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_fb **pfb); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index 66932ac10d15..3f1be9780c65 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -38,9 +38,9 @@ struct nvkm_fb_func { }; void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device, - int index, struct nvkm_fb *); + enum nvkm_subdev_type type, int inst, struct nvkm_fb *); int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device, - int index, struct nvkm_fb **); + enum nvkm_subdev_type type, int inst, struct nvkm_fb **); int nvkm_fb_bios_memtype(struct nvkm_bios *); void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size, @@ -78,7 +78,7 @@ int gm200_fb_init_page(struct nvkm_fb *); void gp100_fb_init_remapper(struct nvkm_fb *); void gp100_fb_init_unkn(struct nvkm_fb *); -int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int, +int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_fb **); bool gp102_fb_vpr_scrub_required(struct nvkm_fb *); int gp102_fb_vpr_scrub(struct nvkm_fb *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c index b11867f682cb..03b1bdb27770 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c @@ -81,12 +81,12 @@ nvkm_vram_dtor(struct nvkm_memory *memory) struct nvkm_vram *vram = nvkm_vram(memory); struct nvkm_mm_node *next = vram->mn; struct nvkm_mm_node *node; - mutex_lock(&vram->ram->fb->subdev.mutex); + mutex_lock(&vram->ram->mutex); while ((node = next)) { next = node->next; nvkm_mm_free(&vram->ram->vram, &node); } - mutex_unlock(&vram->ram->fb->subdev.mutex); + mutex_unlock(&vram->ram->mutex); return vram; } @@ -126,7 +126,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size, vram->page = page; *pmemory = &vram->memory; - mutex_lock(&ram->fb->subdev.mutex); + mutex_lock(&ram->mutex); node = &vram->mn; do { if (back) @@ -134,7 +134,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size, else ret = nvkm_mm_head(mm, heap, type, max, min, align, &r); if (ret) { - mutex_unlock(&ram->fb->subdev.mutex); + mutex_unlock(&ram->mutex); nvkm_memory_unref(pmemory); return ret; } @@ -143,7 +143,7 @@ nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size, node = &r->next; max -= r->length; } while (max); - mutex_unlock(&ram->fb->subdev.mutex); + mutex_unlock(&ram->mutex); return 0; } @@ -163,6 +163,7 @@ nvkm_ram_del(struct nvkm_ram **pram) if (ram->func->dtor) *pram = ram->func->dtor(ram); nvkm_mm_fini(&ram->vram); + mutex_destroy(&ram->mutex); kfree(*pram); *pram = NULL; } @@ -196,6 +197,7 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, ram->fb = fb; ram->type = type; ram->size = size; + mutex_init(&ram->mutex); if (!nvkm_mm_initialised(&ram->vram)) { ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c index d350d92852d2..2b678b60b4d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c @@ -260,7 +260,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq) ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); ram_block(fuc); - if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) + if (ram->base.fb->subdev.device->disp) ram_wr32(fuc, 0x62c000, 0x0f0f0000); /* MR1: turn termination on early, for some reason.. */ @@ -661,7 +661,7 @@ gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq) ram_unblock(fuc); - if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) + if (ram->base.fb->subdev.device->disp) ram_wr32(fuc, 0x62c000, 0x0f0f0f00); if (next->bios.rammap_11_08_01) @@ -711,7 +711,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq) ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000); ram_block(fuc); - if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) + if (ram->base.fb->subdev.device->disp) ram_wr32(fuc, 0x62c000, 0x0f0f0000); if (vc == 1 && ram_have(fuc, gpio2E)) { @@ -943,7 +943,7 @@ gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq) ram_unblock(fuc); - if (nvkm_device_engine(ram->base.fb->subdev.device, NVKM_ENGINE_DISP)) + if (ram->base.fb->subdev.device->disp) ram_wr32(fuc, 0x62c000, 0x0f0f0f00); if (next->bios.rammap_11_08_01) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c index 1c3c18ea8ced..375dfce09f84 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c @@ -42,12 +42,12 @@ nvkm_fuse = { int nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device, - int index, struct nvkm_fuse **pfuse) + enum nvkm_subdev_type type, int inst, struct nvkm_fuse **pfuse) { struct nvkm_fuse *fuse; if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_fuse, device, index, &fuse->subdev); + nvkm_subdev_ctor(&nvkm_fuse, device, type, inst, &fuse->subdev); fuse->func = func; spin_lock_init(&fuse->lock); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c index 13671fedc805..01f770654b1d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c @@ -47,7 +47,8 @@ gf100_fuse = { }; int -gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse) +gf100_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fuse **pfuse) { - return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse); + return nvkm_fuse_new_(&gf100_fuse, device, type, inst, pfuse); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c index 9aff4ea04506..7dc99492f536 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c @@ -36,7 +36,8 @@ gm107_fuse = { }; int -gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse) +gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fuse **pfuse) { - return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse); + return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c index 514c193db25d..2505e8e1c1d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c @@ -45,7 +45,8 @@ nv50_fuse = { }; int -nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse) +nv50_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fuse **pfuse) { - return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse); + return nvkm_fuse_new_(&nv50_fuse, device, type, inst, pfuse); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h index 2edc612408dd..e83d0c30dff6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h @@ -8,6 +8,6 @@ struct nvkm_fuse_func { u32 (*read)(struct nvkm_fuse *, u32 addr); }; -int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *, - int index, struct nvkm_fuse **); +int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_fuse **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c index 914276410ef8..048bcc70c3f4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c @@ -241,14 +241,14 @@ nvkm_gpio = { int nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device, - int index, struct nvkm_gpio **pgpio) + enum nvkm_subdev_type type, int inst, struct nvkm_gpio **pgpio) { struct nvkm_gpio *gpio; if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_gpio, device, index, &gpio->subdev); + nvkm_subdev_ctor(&nvkm_gpio, device, type, inst, &gpio->subdev); gpio->func = func; return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c index 6dcda55fb865..114728ccdf8e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c @@ -68,7 +68,8 @@ g94_gpio = { }; int -g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) +g94_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gpio **pgpio) { - return nvkm_gpio_new_(&g94_gpio, device, index, pgpio); + return nvkm_gpio_new_(&g94_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c index 62c791baf400..4a96f926b66d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c @@ -112,7 +112,8 @@ ga102_gpio = { }; int -ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) +ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gpio **pgpio) { - return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio); + return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c index bb7400dfaef8..ecb19e4f5c48 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c @@ -80,7 +80,8 @@ gf119_gpio = { }; int -gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) +gf119_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gpio **pgpio) { - return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio); + return nvkm_gpio_new_(&gf119_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c index 2ead515b8530..c0e4cdb45520 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c @@ -68,7 +68,8 @@ gk104_gpio = { }; int -gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) +gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gpio **pgpio) { - return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio); + return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c index ae3499b48330..48ad29b5638f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c @@ -112,7 +112,8 @@ nv10_gpio = { }; int -nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) +nv10_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gpio **pgpio) { - return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio); + return nvkm_gpio_new_(&nv10_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c index 73923fd5f7f2..b86c49762f11 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c @@ -126,7 +126,8 @@ nv50_gpio = { }; int -nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio) +nv50_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gpio **pgpio) { - return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio); + return nvkm_gpio_new_(&nv50_gpio, device, type, inst, pgpio); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h index 59e39affe2a0..6590d81164e7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h @@ -28,8 +28,8 @@ struct nvkm_gpio_func { void (*reset)(struct nvkm_gpio *, u8); }; -int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *, - int index, struct nvkm_gpio **); +int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_gpio **); void nv50_gpio_reset(struct nvkm_gpio *, u8); int nv50_gpio_drive(struct nvkm_gpio *, int, int, int); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c index 5a32df0f9992..22574886b819 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c @@ -40,20 +40,18 @@ nvkm_gsp = { int nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device, - int index, struct nvkm_gsp **pgsp) + enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp) { struct nvkm_gsp *gsp; if (!(gsp = *pgsp = kzalloc(sizeof(*gsp), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_gsp, device, index, &gsp->subdev); + nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev); fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp); if (IS_ERR(fwif)) return PTR_ERR(fwif); - return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, - nvkm_subdev_name[gsp->subdev.index], 0, - &gsp->falcon); + return nvkm_falcon_ctor(fwif->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c index 2114f9b00a28..2ac7fc934c09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c @@ -49,7 +49,8 @@ gv100_gsp[] = { }; int -gv100_gsp_new(struct nvkm_device *device, int index, struct nvkm_gsp **pgsp) +gv100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_gsp **pgsp) { - return nvkm_gsp_new_(gv100_gsp, device, index, pgsp); + return nvkm_gsp_new_(gv100_gsp, device, type, inst, pgsp); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index 92820fb997c1..19381ddd38d4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -10,6 +10,6 @@ struct nvkm_gsp_fwif { const struct nvkm_falcon_func *flcn; }; -int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, int, +int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c index 719345074711..cb5cb533d91c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c @@ -277,7 +277,7 @@ nvkm_i2c_drv[] = { int nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device, - int index, struct nvkm_i2c **pi2c) + enum nvkm_subdev_type type, int inst, struct nvkm_i2c **pi2c) { struct nvkm_bios *bios = device->bios; struct nvkm_i2c *i2c; @@ -289,7 +289,7 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device, if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_i2c, device, index, &i2c->subdev); + nvkm_subdev_ctor(&nvkm_i2c, device, type, inst, &i2c->subdev); i2c->func = func; INIT_LIST_HEAD(&i2c->pad); INIT_LIST_HEAD(&i2c->bus); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c index bb2a31d88161..e5bad085c06f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c @@ -66,7 +66,8 @@ g94_i2c = { }; int -g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +g94_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&g94_i2c, device, index, pi2c); + return nvkm_i2c_new_(&g94_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c index ae4aad3fcd2e..cda30ee6767d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c @@ -30,7 +30,8 @@ gf117_i2c = { }; int -gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +gf117_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c); + return nvkm_i2c_new_(&gf117_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c index 6f2b02af42c8..e9c6a6cca09d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c @@ -34,7 +34,8 @@ gf119_i2c = { }; int -gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +gf119_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c); + return nvkm_i2c_new_(&gf119_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c index f9f6bf4b66c9..d35aa6fe3015 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c @@ -66,7 +66,8 @@ gk104_i2c = { }; int -gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +gk104_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c); + return nvkm_i2c_new_(&gk104_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c index 8e3bfa1af52a..9fec6af56e07 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c @@ -39,7 +39,8 @@ gk110_i2c = { }; int -gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +gk110_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c); + return nvkm_i2c_new_(&gk110_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c index 7b2375bff8a9..46917eb600f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c @@ -41,7 +41,8 @@ gm200_i2c = { }; int -gm200_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&gm200_i2c, device, index, pi2c); + return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c index 18776f49355c..ecfcf147c789 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c @@ -30,7 +30,8 @@ nv04_i2c = { }; int -nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +nv04_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c); + return nvkm_i2c_new_(&nv04_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c index 6b762f7cee9e..ad1d3fd2bcbc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c @@ -30,7 +30,8 @@ nv4e_i2c = { }; int -nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +nv4e_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c); + return nvkm_i2c_new_(&nv4e_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c index 75640ab97d6a..2f94bed2c056 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c @@ -30,7 +30,8 @@ nv50_i2c = { }; int -nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) +nv50_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_i2c **pi2c) { - return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c); + return nvkm_i2c_new_(&nv50_i2c, device, type, inst, pi2c); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h index e35f6036fcfc..f9d79f72f7e7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h @@ -4,8 +4,8 @@ #define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev) #include <subdev/i2c.h> -int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *, - int index, struct nvkm_i2c **); +int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_i2c **); struct nvkm_i2c_func { int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild deleted file mode 100644 index 127efb51f67d..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: MIT -nvkm-y += nvkm/subdev/ibus/gf100.o -nvkm-y += nvkm/subdev/ibus/gf117.o -nvkm-y += nvkm/subdev/ibus/gk104.o -nvkm-y += nvkm/subdev/ibus/gk20a.o -nvkm-y += nvkm/subdev/ibus/gm200.o -nvkm-y += nvkm/subdev/ibus/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h deleted file mode 100644 index 302d69e384d8..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -#ifndef __NVKM_IBUS_PRIV_H__ -#define __NVKM_IBUS_PRIV_H__ - -#include <subdev/ibus.h> - -void gf100_ibus_intr(struct nvkm_subdev *); -void gk104_ibus_intr(struct nvkm_subdev *); -#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c index fecfa6afcf54..8f0ccd3664eb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c @@ -312,20 +312,20 @@ iccsense_func = { }; void -nvkm_iccsense_ctor(struct nvkm_device *device, int index, +nvkm_iccsense_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_iccsense *iccsense) { - nvkm_subdev_ctor(&iccsense_func, device, index, &iccsense->subdev); + nvkm_subdev_ctor(&iccsense_func, device, type, inst, &iccsense->subdev); } int -nvkm_iccsense_new_(struct nvkm_device *device, int index, +nvkm_iccsense_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_iccsense **iccsense) { if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL))) return -ENOMEM; INIT_LIST_HEAD(&(*iccsense)->sensors); INIT_LIST_HEAD(&(*iccsense)->rails); - nvkm_iccsense_ctor(device, index, *iccsense); + nvkm_iccsense_ctor(device, type, inst, *iccsense); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c index cccff1c8a409..3eabf4944395 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c @@ -24,8 +24,8 @@ #include "priv.h" int -gf100_iccsense_new(struct nvkm_device *device, int index, +gf100_iccsense_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_iccsense **piccsense) { - return nvkm_iccsense_new_(device, index, piccsense); + return nvkm_iccsense_new_(device, type, inst, piccsense); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h index cc09c6c504af..c33441124241 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h @@ -22,6 +22,6 @@ struct nvkm_iccsense_rail { u8 mohm; }; -void nvkm_iccsense_ctor(struct nvkm_device *, int, struct nvkm_iccsense *); -int nvkm_iccsense_new_(struct nvkm_device *, int, struct nvkm_iccsense **); +void nvkm_iccsense_ctor(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense *); +int nvkm_iccsense_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_iccsense **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 364ea4492acc..cd8163a52bb6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c @@ -218,9 +218,11 @@ static void * nvkm_instmem_dtor(struct nvkm_subdev *subdev) { struct nvkm_instmem *imem = nvkm_instmem(subdev); + void *data = imem; if (imem->func->dtor) - return imem->func->dtor(imem); - return imem; + data = imem->func->dtor(imem); + mutex_destroy(&imem->mutex); + return data; } static const struct nvkm_subdev_func @@ -232,13 +234,13 @@ nvkm_instmem = { }; void -nvkm_instmem_ctor(const struct nvkm_instmem_func *func, - struct nvkm_device *device, int index, - struct nvkm_instmem *imem) +nvkm_instmem_ctor(const struct nvkm_instmem_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_instmem *imem) { - nvkm_subdev_ctor(&nvkm_instmem, device, index, &imem->subdev); + nvkm_subdev_ctor(&nvkm_instmem, device, type, inst, &imem->subdev); imem->func = func; spin_lock_init(&imem->lock); INIT_LIST_HEAD(&imem->list); INIT_LIST_HEAD(&imem->boot); + mutex_init(&imem->mutex); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 13d4d7ac0697..648ecf5a8fbc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -568,7 +568,7 @@ gk20a_instmem = { }; int -gk20a_instmem_new(struct nvkm_device *device, int index, +gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_instmem **pimem) { struct nvkm_device_tegra *tdev = device->func->tegra(device); @@ -576,7 +576,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index, if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; - nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); + nvkm_instmem_ctor(&gk20a_instmem, device, type, inst, &imem->base); mutex_init(&imem->lock); *pimem = &imem->base; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c index 6bf0dad46919..25603b01d6f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c @@ -99,9 +99,9 @@ static void * nv04_instobj_dtor(struct nvkm_memory *memory) { struct nv04_instobj *iobj = nv04_instobj(memory); - mutex_lock(&iobj->imem->base.subdev.mutex); + mutex_lock(&iobj->imem->base.mutex); nvkm_mm_free(&iobj->imem->heap, &iobj->node); - mutex_unlock(&iobj->imem->base.subdev.mutex); + mutex_unlock(&iobj->imem->base.mutex); nvkm_instobj_dtor(&iobj->imem->base, &iobj->base); return iobj; } @@ -132,10 +132,9 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, iobj->base.memory.ptrs = &nv04_instobj_ptrs; iobj->imem = imem; - mutex_lock(&imem->base.subdev.mutex); - ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, - align ? align : 1, &iobj->node); - mutex_unlock(&imem->base.subdev.mutex); + mutex_lock(&imem->base.mutex); + ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node); + mutex_unlock(&imem->base.mutex); return ret; } @@ -218,14 +217,14 @@ nv04_instmem = { }; int -nv04_instmem_new(struct nvkm_device *device, int index, +nv04_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_instmem **pimem) { struct nv04_instmem *imem; if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; - nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base); + nvkm_instmem_ctor(&nv04_instmem, device, type, inst, &imem->base); *pimem = &imem->base; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c index 086c118488ef..6b462f960922 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c @@ -99,9 +99,9 @@ static void * nv40_instobj_dtor(struct nvkm_memory *memory) { struct nv40_instobj *iobj = nv40_instobj(memory); - mutex_lock(&iobj->imem->base.subdev.mutex); + mutex_lock(&iobj->imem->base.mutex); nvkm_mm_free(&iobj->imem->heap, &iobj->node); - mutex_unlock(&iobj->imem->base.subdev.mutex); + mutex_unlock(&iobj->imem->base.mutex); nvkm_instobj_dtor(&iobj->imem->base, &iobj->base); return iobj; } @@ -132,10 +132,9 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, iobj->base.memory.ptrs = &nv40_instobj_ptrs; iobj->imem = imem; - mutex_lock(&imem->base.subdev.mutex); - ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, - align ? align : 1, &iobj->node); - mutex_unlock(&imem->base.subdev.mutex); + mutex_lock(&imem->base.mutex); + ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node); + mutex_unlock(&imem->base.mutex); return ret; } @@ -236,7 +235,7 @@ nv40_instmem = { }; int -nv40_instmem_new(struct nvkm_device *device, int index, +nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_instmem **pimem) { struct nv40_instmem *imem; @@ -244,7 +243,7 @@ nv40_instmem_new(struct nvkm_device *device, int index, if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; - nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base); + nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base); *pimem = &imem->base; /* map bar */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index 02c4eb28cef4..96aca0edfa3c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -133,12 +133,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) * into it. The lock has to be dropped while doing this due * to the possibility of recursion for page table allocation. */ - mutex_unlock(&subdev->mutex); + mutex_unlock(&imem->base.mutex); while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) { /* Evict unused mappings, and keep retrying until we either * succeed,or there's no more objects left on the LRU. */ - mutex_lock(&subdev->mutex); + mutex_lock(&imem->base.mutex); eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru); if (eobj) { nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n", @@ -151,7 +151,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) emap = eobj->map; eobj->map = NULL; } - mutex_unlock(&subdev->mutex); + mutex_unlock(&imem->base.mutex); if (!eobj) break; iounmap(emap); @@ -160,12 +160,12 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) if (ret == 0) ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0); - mutex_lock(&subdev->mutex); + mutex_lock(&imem->base.mutex); if (ret || iobj->bar) { /* We either failed, or another thread beat us. */ - mutex_unlock(&subdev->mutex); + mutex_unlock(&imem->base.mutex); nvkm_vmm_put(vmm, &bar); - mutex_lock(&subdev->mutex); + mutex_lock(&imem->base.mutex); return; } @@ -197,7 +197,7 @@ nv50_instobj_release(struct nvkm_memory *memory) wmb(); nvkm_bar_flush(subdev->device->bar); - if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) { + if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) { /* Add the now-unused mapping to the LRU instead of directly * unmapping it here, in case we need to map it again later. */ @@ -208,7 +208,7 @@ nv50_instobj_release(struct nvkm_memory *memory) /* Switch back to NULL accessors when last map is gone. */ iobj->base.memory.ptrs = NULL; - mutex_unlock(&subdev->mutex); + mutex_unlock(&imem->base.mutex); } } @@ -227,9 +227,9 @@ nv50_instobj_acquire(struct nvkm_memory *memory) /* Take the lock, and re-check that another thread hasn't * already mapped the object in the meantime. */ - mutex_lock(&imem->subdev.mutex); + mutex_lock(&imem->mutex); if (refcount_inc_not_zero(&iobj->maps)) { - mutex_unlock(&imem->subdev.mutex); + mutex_unlock(&imem->mutex); return iobj->map; } @@ -252,7 +252,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory) refcount_set(&iobj->maps, 1); } - mutex_unlock(&imem->subdev.mutex); + mutex_unlock(&imem->mutex); return map; } @@ -265,7 +265,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm) /* Exclude bootstrapped objects (ie. the page tables for the * instmem BAR itself) from eviction. */ - mutex_lock(&imem->subdev.mutex); + mutex_lock(&imem->mutex); if (likely(iobj->lru.next)) { list_del_init(&iobj->lru); iobj->lru.next = NULL; @@ -273,7 +273,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm) nv50_instobj_kmap(iobj, vmm); nvkm_instmem_boot(imem); - mutex_unlock(&imem->subdev.mutex); + mutex_unlock(&imem->mutex); } static u64 @@ -315,12 +315,12 @@ nv50_instobj_dtor(struct nvkm_memory *memory) struct nvkm_vma *bar; void *map = map; - mutex_lock(&imem->subdev.mutex); + mutex_lock(&imem->mutex); if (likely(iobj->lru.next)) list_del(&iobj->lru); map = iobj->map; bar = iobj->bar; - mutex_unlock(&imem->subdev.mutex); + mutex_unlock(&imem->mutex); if (map) { struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device); @@ -386,14 +386,14 @@ nv50_instmem = { }; int -nv50_instmem_new(struct nvkm_device *device, int index, +nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_instmem **pimem) { struct nv50_instmem *imem; if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; - nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base); + nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base); INIT_LIST_HEAD(&imem->lru); *pimem = &imem->base; return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h index f5da8fcbdde3..56c15e30a5dd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h @@ -16,7 +16,7 @@ struct nvkm_instmem_func { }; void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *, - int index, struct nvkm_instmem *); + enum nvkm_subdev_type, int, struct nvkm_instmem *); void nvkm_instmem_boot(struct nvkm_instmem *); #include <core/memory.h> diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c index 23242179e600..fa683c190795 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c @@ -33,10 +33,10 @@ nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count) BUG_ON((first > limit) || (limit >= ltc->num_tags)); - mutex_lock(<c->subdev.mutex); + mutex_lock(<c->mutex); ltc->func->cbc_clear(ltc, first, limit); ltc->func->cbc_wait(ltc); - mutex_unlock(<c->subdev.mutex); + mutex_unlock(<c->mutex); } int @@ -113,6 +113,7 @@ nvkm_ltc_dtor(struct nvkm_subdev *subdev) { struct nvkm_ltc *ltc = nvkm_ltc(subdev); nvkm_memory_unref(<c->tag_ram); + mutex_destroy(<c->mutex); return ltc; } @@ -126,15 +127,16 @@ nvkm_ltc = { int nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device, - int index, struct nvkm_ltc **pltc) + enum nvkm_subdev_type type, int inst, struct nvkm_ltc **pltc) { struct nvkm_ltc *ltc; if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_ltc, device, index, <c->subdev); + nvkm_subdev_ctor(&nvkm_ltc, device, type, inst, <c->subdev); ltc->func = func; + mutex_init(<c->mutex); ltc->zbc_min = 1; /* reserve 0 for disabled */ ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1; return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c index a21ef45b8572..fd8aeafc812d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c @@ -200,8 +200,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc) } mm_init: - nvkm_mm_fini(&fb->tags); - return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1); + nvkm_mm_fini(&fb->tags.mm); + return nvkm_mm_init(&fb->tags.mm, 0, 0, ltc->num_tags, 1); } int @@ -249,7 +249,8 @@ gf100_ltc = { }; int -gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +gf100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_ltc **pltc) { - return nvkm_ltc_new_(&gf100_ltc, device, index, pltc); + return nvkm_ltc_new_(&gf100_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c index b4f6e0034d58..94aa09244d67 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c @@ -50,7 +50,8 @@ gk104_ltc = { }; int -gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +gk104_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_ltc **pltc) { - return nvkm_ltc_new_(&gk104_ltc, device, index, pltc); + return nvkm_ltc_new_(&gk104_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c index ec0a3844b2d1..54d1d65d5a85 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c @@ -145,7 +145,8 @@ gm107_ltc = { }; int -gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +gm107_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_ltc **pltc) { - return nvkm_ltc_new_(&gm107_ltc, device, index, pltc); + return nvkm_ltc_new_(&gm107_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c index e18e0dc19ec8..8cfdbbdd8e8d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c @@ -57,7 +57,8 @@ gm200_ltc = { }; int -gm200_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +gm200_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_ltc **pltc) { - return nvkm_ltc_new_(&gm200_ltc, device, index, pltc); + return nvkm_ltc_new_(&gm200_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c index e923ed76d37a..a4a6cd9b435a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c @@ -69,7 +69,8 @@ gp100_ltc = { }; int -gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +gp100_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_ltc **pltc) { - return nvkm_ltc_new_(&gp100_ltc, device, index, pltc); + return nvkm_ltc_new_(&gp100_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c index 601747ada655..ff05d617e7f4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c @@ -45,7 +45,8 @@ gp102_ltc = { }; int -gp102_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_ltc **pltc) { - return nvkm_ltc_new_(&gp102_ltc, device, index, pltc); + return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c index c0063c7caa50..dfebd796cb4b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c @@ -59,7 +59,8 @@ gp10b_ltc = { }; int -gp10b_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +gp10b_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_ltc **pltc) { - return nvkm_ltc_new_(&gp10b_ltc, device, index, pltc); + return nvkm_ltc_new_(&gp10b_ltc, device, type, inst, pltc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h index eca5a711b1b8..2bebe139005d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h @@ -5,8 +5,8 @@ #include <subdev/ltc.h> #include <core/enum.h> -int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, - int index, struct nvkm_ltc **); +int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_ltc **); struct nvkm_ltc_func { int (*oneinit)(struct nvkm_ltc *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c index 09f669ac6630..21c4af3f81d5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c @@ -35,14 +35,14 @@ nvkm_mc_unk260(struct nvkm_device *device, u32 data) } void -nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en) +nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, bool en) { struct nvkm_mc *mc = device->mc; const struct nvkm_mc_map *map; if (likely(mc) && mc->func->intr_mask) { - u32 mask = nvkm_top_intr_mask(device, devidx); + u32 mask = nvkm_top_intr_mask(device, type, inst); for (map = mc->func->intr; !mask && map->stat; map++) { - if (map->unit == devidx) + if (map->type == type && map->inst == inst) mask = map->stat; } mc->func->intr_mask(mc, mask, en ? mask : 0); @@ -78,27 +78,34 @@ void nvkm_mc_intr(struct nvkm_device *device, bool *handled) { struct nvkm_mc *mc = device->mc; + struct nvkm_top *top = device->top; + struct nvkm_top_device *tdev; struct nvkm_subdev *subdev; const struct nvkm_mc_map *map; u32 stat, intr; - u64 subdevs; if (unlikely(!mc)) return; - intr = nvkm_mc_intr_stat(mc); - stat = nvkm_top_intr(device, intr, &subdevs); - while (subdevs) { - enum nvkm_devidx subidx = __ffs64(subdevs); - subdev = nvkm_device_subdev(device, subidx); - if (subdev) - nvkm_subdev_intr(subdev); - subdevs &= ~BIT_ULL(subidx); + stat = intr = nvkm_mc_intr_stat(mc); + + if (top) { + list_for_each_entry(tdev, &top->device, head) { + if (tdev->intr >= 0 && (stat & BIT(tdev->intr))) { + subdev = nvkm_device_subdev(device, tdev->type, tdev->inst); + if (subdev) { + nvkm_subdev_intr(subdev); + stat &= ~BIT(tdev->intr); + if (!stat) + break; + } + } + } } for (map = mc->func->intr; map->stat; map++) { if (intr & map->stat) { - subdev = nvkm_device_subdev(device, map->unit); + subdev = nvkm_device_subdev(device, map->type, map->inst); if (subdev) nvkm_subdev_intr(subdev); stat &= ~map->stat; @@ -111,17 +118,16 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled) } static u32 -nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, - enum nvkm_devidx devidx) +nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, enum nvkm_subdev_type type, int inst) { struct nvkm_mc *mc = device->mc; const struct nvkm_mc_map *map; u64 pmc_enable = 0; if (likely(mc)) { - if (!(pmc_enable = nvkm_top_reset(device, devidx))) { + if (!(pmc_enable = nvkm_top_reset(device, type, inst))) { for (map = mc->func->reset; map && map->stat; map++) { if (!isauto || !map->noauto) { - if (map->unit == devidx) { + if (map->type == type && map->inst == inst) { pmc_enable = map->stat; break; } @@ -133,9 +139,9 @@ nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, } void -nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx) +nvkm_mc_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { - u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx); + u64 pmc_enable = nvkm_mc_reset_mask(device, true, type, inst); if (pmc_enable) { nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); @@ -144,17 +150,17 @@ nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx) } void -nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx) +nvkm_mc_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { - u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst); if (pmc_enable) nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); } void -nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx) +nvkm_mc_enable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { - u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst); if (pmc_enable) { nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); nvkm_rd32(device, 0x000200); @@ -162,9 +168,9 @@ nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx) } bool -nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_devidx devidx) +nvkm_mc_enabled(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { - u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + u64 pmc_enable = nvkm_mc_reset_mask(device, false, type, inst); return (pmc_enable != 0) && ((nvkm_rd32(device, 0x000200) & pmc_enable) == pmc_enable); @@ -203,19 +209,19 @@ nvkm_mc = { void nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device, - int index, struct nvkm_mc *mc) + enum nvkm_subdev_type type, int inst, struct nvkm_mc *mc) { - nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev); + nvkm_subdev_ctor(&nvkm_mc, device, type, inst, &mc->subdev); mc->func = func; } int nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, - int index, struct nvkm_mc **pmc) + enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { struct nvkm_mc *mc; if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL))) return -ENOMEM; - nvkm_mc_ctor(func, device, index, *pmc); + nvkm_mc_ctor(func, device, type, inst, *pmc); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c index 430a61c3df44..4cfc1c984006 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c @@ -62,7 +62,7 @@ g84_mc = { }; int -g84_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +g84_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&g84_mc, device, index, pmc); + return nvkm_mc_new_(&g84_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c index 93ad4982ce5f..b7e58d75d894 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c @@ -62,7 +62,7 @@ g98_mc = { }; int -g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +g98_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&g98_mc, device, index, pmc); + return nvkm_mc_new_(&g98_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c index 967eb3af11eb..4105175dfccd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c @@ -68,7 +68,7 @@ ga100_mc = { }; int -ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&ga100_mc, device, index, pmc); + return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c index f93766418056..3a589c6f7fad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c @@ -27,11 +27,11 @@ static const struct nvkm_mc_map gf100_mc_reset[] = { { 0x00020000, NVKM_ENGINE_MSPDEC }, { 0x00008000, NVKM_ENGINE_MSVLD }, - { 0x00002000, NVKM_SUBDEV_PMU, true }, + { 0x00002000, NVKM_SUBDEV_PMU, 0, true }, { 0x00001000, NVKM_ENGINE_GR }, { 0x00000100, NVKM_ENGINE_FIFO }, - { 0x00000080, NVKM_ENGINE_CE1 }, - { 0x00000040, NVKM_ENGINE_CE0 }, + { 0x00000080, NVKM_ENGINE_CE, 1 }, + { 0x00000040, NVKM_ENGINE_CE, 0 }, { 0x00000002, NVKM_ENGINE_MSPPP }, {} }; @@ -43,10 +43,10 @@ gf100_mc_intr[] = { { 0x00008000, NVKM_ENGINE_MSVLD }, { 0x00001000, NVKM_ENGINE_GR }, { 0x00000100, NVKM_ENGINE_FIFO }, - { 0x00000040, NVKM_ENGINE_CE1 }, - { 0x00000020, NVKM_ENGINE_CE0 }, + { 0x00000040, NVKM_ENGINE_CE, 1 }, + { 0x00000020, NVKM_ENGINE_CE, 0 }, { 0x00000001, NVKM_ENGINE_MSPPP }, - { 0x40000000, NVKM_SUBDEV_IBUS }, + { 0x40000000, NVKM_SUBDEV_PRIVRING }, { 0x10000000, NVKM_SUBDEV_BUS }, { 0x08000000, NVKM_SUBDEV_FB }, { 0x02000000, NVKM_SUBDEV_LTC }, @@ -112,7 +112,7 @@ gf100_mc = { }; int -gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +gf100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&gf100_mc, device, index, pmc); + return nvkm_mc_new_(&gf100_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c index 7b8c6ecad1a5..d9b9067fa93f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c @@ -26,7 +26,7 @@ const struct nvkm_mc_map gk104_mc_reset[] = { { 0x00000100, NVKM_ENGINE_FIFO }, - { 0x00002000, NVKM_SUBDEV_PMU, true }, + { 0x00002000, NVKM_SUBDEV_PMU, 0, true }, {} }; @@ -34,7 +34,7 @@ const struct nvkm_mc_map gk104_mc_intr[] = { { 0x04000000, NVKM_ENGINE_DISP }, { 0x00000100, NVKM_ENGINE_FIFO }, - { 0x40000000, NVKM_SUBDEV_IBUS }, + { 0x40000000, NVKM_SUBDEV_PRIVRING }, { 0x10000000, NVKM_SUBDEV_BUS }, { 0x08000000, NVKM_SUBDEV_FB }, { 0x02000000, NVKM_SUBDEV_LTC }, @@ -60,7 +60,7 @@ gk104_mc = { }; int -gk104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +gk104_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&gk104_mc, device, index, pmc); + return nvkm_mc_new_(&gk104_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c index ca1bf3279dbe..03590292749a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c @@ -35,7 +35,7 @@ gk20a_mc = { }; int -gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +gk20a_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&gk20a_mc, device, index, pmc); + return nvkm_mc_new_(&gk20a_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c index 43db245eec9a..5fd1a0595c33 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c @@ -80,7 +80,7 @@ gp100_mc_intr[] = { { 0x04000000, NVKM_ENGINE_DISP }, { 0x00000100, NVKM_ENGINE_FIFO }, { 0x00000200, NVKM_SUBDEV_FAULT }, - { 0x40000000, NVKM_SUBDEV_IBUS }, + { 0x40000000, NVKM_SUBDEV_PRIVRING }, { 0x10000000, NVKM_SUBDEV_BUS }, { 0x08000000, NVKM_SUBDEV_FB }, { 0x02000000, NVKM_SUBDEV_LTC }, @@ -106,13 +106,13 @@ gp100_mc = { int gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, - int index, struct nvkm_mc **pmc) + enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { struct gp100_mc *mc; if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL))) return -ENOMEM; - nvkm_mc_ctor(func, device, index, &mc->base); + nvkm_mc_ctor(func, device, type, inst, &mc->base); *pmc = &mc->base; spin_lock_init(&mc->lock); @@ -122,7 +122,7 @@ gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, } int -gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return gp100_mc_new_(&gp100_mc, device, index, pmc); + return gp100_mc_new_(&gp100_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c index 45c62f5ef782..dd581d030ced 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c @@ -43,7 +43,7 @@ gp10b_mc = { }; int -gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +gp10b_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return gp100_mc_new_(&gp10b_mc, device, index, pmc); + return gp100_mc_new_(&gp10b_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c index 99d50a3d956f..1b4d43531dba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c @@ -27,7 +27,7 @@ static const struct nvkm_mc_map gt215_mc_reset[] = { { 0x04008000, NVKM_ENGINE_MSVLD }, { 0x01020000, NVKM_ENGINE_MSPDEC }, - { 0x00802000, NVKM_ENGINE_CE0 }, + { 0x00802000, NVKM_ENGINE_CE, 0 }, { 0x00400002, NVKM_ENGINE_MSPPP }, { 0x00201000, NVKM_ENGINE_GR }, { 0x00000100, NVKM_ENGINE_FIFO }, @@ -37,7 +37,7 @@ gt215_mc_reset[] = { static const struct nvkm_mc_map gt215_mc_intr[] = { { 0x04000000, NVKM_ENGINE_DISP }, - { 0x00400000, NVKM_ENGINE_CE0 }, + { 0x00400000, NVKM_ENGINE_CE, 0 }, { 0x00020000, NVKM_ENGINE_MSPDEC }, { 0x00008000, NVKM_ENGINE_MSVLD }, { 0x00001000, NVKM_ENGINE_GR }, @@ -71,7 +71,7 @@ gt215_mc = { }; int -gt215_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +gt215_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(>215_mc, device, index, pmc); + return nvkm_mc_new_(>215_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c index 6509defd1460..bc0d09bafa99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c @@ -80,7 +80,7 @@ nv04_mc = { }; int -nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +nv04_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&nv04_mc, device, index, pmc); + return nvkm_mc_new_(&nv04_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c index 9213107901e6..ab59ca1ee068 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c @@ -44,7 +44,7 @@ nv11_mc = { }; int -nv11_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +nv11_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&nv11_mc, device, index, pmc); + return nvkm_mc_new_(&nv11_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c index 64bf5bbf8146..03d756e26e57 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c @@ -53,7 +53,7 @@ nv17_mc = { }; int -nv17_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +nv17_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&nv17_mc, device, index, pmc); + return nvkm_mc_new_(&nv17_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c index 65fa44a64b98..95f65766e8b0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c @@ -48,7 +48,7 @@ nv44_mc = { }; int -nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +nv44_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&nv44_mc, device, index, pmc); + return nvkm_mc_new_(&nv44_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c index fe93b4fd7100..fce3613cdfa5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c @@ -55,7 +55,7 @@ nv50_mc = { }; int -nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +nv50_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return nvkm_mc_new_(&nv50_mc, device, index, pmc); + return nvkm_mc_new_(&nv50_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h index 0d01b2c419ff..c8bcabb98f99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h @@ -4,14 +4,15 @@ #define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev) #include <subdev/mc.h> -void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *, - int index, struct nvkm_mc *); -int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, - int index, struct nvkm_mc **); +void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_mc *); +int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_mc **); struct nvkm_mc_map { u32 stat; - u32 unit; + enum nvkm_subdev_type type; + int inst; bool noauto; }; @@ -52,7 +53,7 @@ void gf100_mc_unk260(struct nvkm_mc *, u32); void gp100_mc_intr_unarm(struct nvkm_mc *); void gp100_mc_intr_rearm(struct nvkm_mc *); void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32); -int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int, +int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mc **); extern const struct nvkm_mc_map gk104_mc_intr[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c index af0afd1ad6ee..58db83ebadc5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c @@ -112,15 +112,15 @@ tu102_mc = { .reset = gk104_mc_reset, }; -int +static int tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, - int index, struct nvkm_mc **pmc) + enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { struct tu102_mc *mc; if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL))) return -ENOMEM; - nvkm_mc_ctor(func, device, index, &mc->base); + nvkm_mc_ctor(func, device, type, inst, &mc->base); *pmc = &mc->base; spin_lock_init(&mc->lock); @@ -130,7 +130,7 @@ tu102_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, } int -tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +tu102_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc) { - return tu102_mc_new_(&tu102_mc, device, index, pmc); + return tu102_mc_new_(&tu102_mc, device, type, inst, pmc); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index 6d5212ae2fd5..ad3b44a9e0e7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -402,6 +402,7 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev) nvkm_vmm_unref(&mmu->vmm); nvkm_mmu_ptc_fini(mmu); + mutex_destroy(&mmu->mutex); return mmu; } @@ -414,22 +415,23 @@ nvkm_mmu = { void nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device, - int index, struct nvkm_mmu *mmu) + enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu) { - nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev); + nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev); mmu->func = func; mmu->dma_bits = func->dma_bits; nvkm_mmu_ptc_init(mmu); + mutex_init(&mmu->mutex); mmu->user.ctor = nvkm_ummu_new; mmu->user.base = func->mmu.user; } int nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device, - int index, struct nvkm_mmu **pmmu) + enum nvkm_subdev_type type, int inst, struct nvkm_mmu **pmmu) { if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL))) return -ENOMEM; - nvkm_mmu_ctor(func, device, index, *pmmu); + nvkm_mmu_ctor(func, device, type, inst, *pmmu); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c index 8accda5a772b..ce47a3b97be9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c @@ -35,7 +35,8 @@ g84_mmu = { }; int -g84_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +g84_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&g84_mmu, device, index, pmmu); + return nvkm_mmu_new_(&g84_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c index 2cd5ec81c0d0..7a28b1d49f7c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c @@ -84,7 +84,8 @@ gf100_mmu = { }; int -gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gf100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu); + return nvkm_mmu_new_(&gf100_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c index 3d7d1eb1cff9..34c9b2b821f6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c @@ -35,7 +35,8 @@ gk104_mmu = { }; int -gk104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gk104_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&gk104_mmu, device, index, pmmu); + return nvkm_mmu_new_(&gk104_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c index ac74965a60d4..a7db29c429ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c @@ -35,7 +35,8 @@ gk20a_mmu = { }; int -gk20a_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gk20a_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&gk20a_mmu, device, index, pmmu); + return nvkm_mmu_new_(&gk20a_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c index 83990c83f9f8..e1696f637a68 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c @@ -90,9 +90,10 @@ gm200_mmu_fixed = { }; int -gm200_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gm200_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { if (device->fb->page) - return nvkm_mmu_new_(&gm200_mmu_fixed, device, index, pmmu); - return nvkm_mmu_new_(&gm200_mmu, device, index, pmmu); + return nvkm_mmu_new_(&gm200_mmu_fixed, device, type, inst, pmmu); + return nvkm_mmu_new_(&gm200_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c index 7353a94b4091..e6e1a8ad701e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c @@ -47,9 +47,10 @@ gm20b_mmu_fixed = { }; int -gm20b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gm20b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { if (device->fb->page) - return nvkm_mmu_new_(&gm20b_mmu_fixed, device, index, pmmu); - return nvkm_mmu_new_(&gm20b_mmu, device, index, pmmu); + return nvkm_mmu_new_(&gm20b_mmu_fixed, device, type, inst, pmmu); + return nvkm_mmu_new_(&gm20b_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c index 65cb9d28e60e..daa5ab0f8711 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c @@ -37,9 +37,10 @@ gp100_mmu = { }; int -gp100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gp100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true)) - return gm200_mmu_new(device, index, pmmu); - return nvkm_mmu_new_(&gp100_mmu, device, index, pmmu); + return gm200_mmu_new(device, type, inst, pmmu); + return nvkm_mmu_new_(&gp100_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c index 0a50be9a785a..edd0bf9a5cd8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c @@ -37,9 +37,10 @@ gp10b_mmu = { }; int -gp10b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gp10b_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true)) - return gm20b_mmu_new(device, index, pmmu); - return nvkm_mmu_new_(&gp10b_mmu, device, index, pmmu); + return gm20b_mmu_new(device, type, inst, pmmu); + return nvkm_mmu_new_(&gp10b_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c index e0997eedd6d9..fb8bdc88d566 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gv100.c @@ -37,7 +37,8 @@ gv100_mmu = { }; int -gv100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +gv100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&gv100_mmu, device, index, pmmu); + return nvkm_mmu_new_(&gv100_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c index 0527b50730d9..514876d6411b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c @@ -35,7 +35,8 @@ mcp77_mmu = { }; int -mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +mcp77_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu); + return nvkm_mmu_new_(&mcp77_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c index d201c887c2cd..0674aa8f68c8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c @@ -35,7 +35,8 @@ nv04_mmu = { }; int -nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +nv04_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu); + return nvkm_mmu_new_(&nv04_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c index adca81895c09..909f92b72847 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c @@ -47,11 +47,12 @@ nv41_mmu = { }; int -nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +nv41_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { if (device->type == NVKM_DEVICE_AGP || !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) - return nv04_mmu_new(device, index, pmmu); + return nv04_mmu_new(device, type, inst, pmmu); - return nvkm_mmu_new_(&nv41_mmu, device, index, pmmu); + return nvkm_mmu_new_(&nv41_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c index 598c53a27bde..dd2a8d461da3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c @@ -62,11 +62,12 @@ nv44_mmu = { }; int -nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +nv44_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { if (device->type == NVKM_DEVICE_AGP || !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) - return nv04_mmu_new(device, index, pmmu); + return nv04_mmu_new(device, type, inst, pmmu); - return nvkm_mmu_new_(&nv44_mmu, device, index, pmmu); + return nvkm_mmu_new_(&nv44_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c index c0083ddda65a..78d46e35d0a9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c @@ -71,7 +71,8 @@ nv50_mmu = { }; int -nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +nv50_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu); + return nvkm_mmu_new_(&nv50_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h index 479b02344271..5265bf4d8366 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h @@ -4,10 +4,10 @@ #define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev) #include <subdev/mmu.h> -void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, - int index, struct nvkm_mmu *); -int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, - int index, struct nvkm_mmu **); +void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_mmu *); +int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_mmu **); struct nvkm_mmu_func { void (*init)(struct nvkm_mmu *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c index 94081f35f967..8d060ce47f86 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c @@ -51,7 +51,8 @@ tu102_mmu = { }; int -tu102_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mmu **pmmu) { - return nvkm_mmu_new_(&tu102_mmu, device, index, pmmu); + return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c index 6a2d9eb8e1ea..5438384d9a67 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c @@ -187,12 +187,11 @@ gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) void gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type) { - struct nvkm_subdev *subdev = &vmm->mmu->subdev; - struct nvkm_device *device = subdev->device; + struct nvkm_device *device = vmm->mmu->subdev.device; struct nvkm_mmu_pt *pd = vmm->pd->pt[0]; u64 addr = 0; - mutex_lock(&subdev->mutex); + mutex_lock(&vmm->mmu->mutex); /* Looks like maybe a "free flush slots" counter, the * faster you write to 0x100cbc to more it decreases. */ @@ -222,7 +221,7 @@ gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type) if (nvkm_rd32(device, 0x100c80) & 0x00008000) break; ); - mutex_unlock(&subdev->mutex); + mutex_unlock(&vmm->mmu->mutex); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c index 1d3369683a21..31984671daf8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c @@ -80,17 +80,16 @@ nv41_vmm_desc_12[] = { static void nv41_vmm_flush(struct nvkm_vmm *vmm, int level) { - struct nvkm_subdev *subdev = &vmm->mmu->subdev; - struct nvkm_device *device = subdev->device; + struct nvkm_device *device = vmm->mmu->subdev.device; - mutex_lock(&subdev->mutex); + mutex_lock(&vmm->mmu->mutex); nvkm_wr32(device, 0x100810, 0x00000022); nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x100810) & 0x00000020) break; ); nvkm_wr32(device, 0x100810, 0x00000000); - mutex_unlock(&subdev->mutex); + mutex_unlock(&vmm->mmu->mutex); } static const struct nvkm_vmm_func diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c index 2d89e27e8e9e..b7548dcd72c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c @@ -184,7 +184,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level) struct nvkm_device *device = subdev->device; int i, id; - mutex_lock(&subdev->mutex); + mutex_lock(&vmm->mmu->mutex); for (i = 0; i < NVKM_SUBDEV_NR; i++) { if (!atomic_read(&vmm->engref[i])) continue; @@ -207,7 +207,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level) case NVKM_ENGINE_MSVLD : id = 0x09; break; case NVKM_ENGINE_CIPHER: case NVKM_ENGINE_SEC : id = 0x0a; break; - case NVKM_ENGINE_CE0 : id = 0x0d; break; + case NVKM_ENGINE_CE : id = 0x0d; break; default: continue; } @@ -217,10 +217,9 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level) if (!(nvkm_rd32(device, 0x100c80) & 0x00000001)) break; ) < 0) - nvkm_error(subdev, "%s mmu invalidate timeout\n", - nvkm_subdev_name[i]); + nvkm_error(subdev, "%s mmu invalidate timeout\n", nvkm_subdev_type[i]); } - mutex_unlock(&subdev->mutex); + mutex_unlock(&vmm->mmu->mutex); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c index b1294d0076c0..6cb5eefa45e9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c @@ -26,15 +26,14 @@ static void tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) { - struct nvkm_subdev *subdev = &vmm->mmu->subdev; - struct nvkm_device *device = subdev->device; + struct nvkm_device *device = vmm->mmu->subdev.device; u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24; type |= 0x00000001; /* PAGE_ALL */ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) type |= 0x00000004; /* HUB_ONLY */ - mutex_lock(&subdev->mutex); + mutex_lock(&vmm->mmu->mutex); nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8); nvkm_wr32(device, 0xb830a4, 0x00000000); @@ -46,7 +45,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) break; ); - mutex_unlock(&subdev->mutex); + mutex_unlock(&vmm->mmu->mutex); } static const struct nvkm_vmm_func diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c index f44682d62f75..c1acfe642da3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c @@ -230,7 +230,8 @@ nvkm_mxm = { }; int -nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm) +nvkm_mxm_new_(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_mxm **pmxm) { struct nvkm_bios *bios = device->bios; struct nvkm_mxm *mxm; @@ -240,7 +241,7 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm) if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_mxm, device, index, &mxm->subdev); + nvkm_subdev_ctor(&nvkm_mxm, device, type, inst, &mxm->subdev); data = mxm_table(bios, &ver, &len); if (!data || !(ver = nvbios_rd08(bios, data))) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c index 70e2c414bb7b..f3167904dcb0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c @@ -201,12 +201,13 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm) } int -nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm) +nv50_mxm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_subdev **pmxm) { struct nvkm_mxm *mxm; int ret; - ret = nvkm_mxm_new_(device, index, &mxm); + ret = nvkm_mxm_new_(device, type, inst, &mxm); if (mxm) *pmxm = &mxm->subdev; if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h index fc8f69e6fc64..fcacb6c6a7f7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h @@ -12,5 +12,5 @@ struct nvkm_mxm { u8 *mxms; }; -int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **); +int nvkm_mxm_new_(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_mxm **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index ee2431a7804e..a7d42ea8ba28 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -183,13 +183,13 @@ nvkm_pci_func = { int nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device, - int index, struct nvkm_pci **ppci) + enum nvkm_subdev_type type, int inst, struct nvkm_pci **ppci) { struct nvkm_pci *pci; if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev); + nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev); pci->func = func; pci->pdev = device->func->pci(device)->pdev; pci->irq = -1; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c index 62438d892f42..5b29aacedef3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c @@ -150,7 +150,8 @@ g84_pci_func = { }; int -g84_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +g84_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&g84_pci_func, device, index, ppci); + return nvkm_pci_new_(&g84_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c index 48874359d5f6..a9e0674009c6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c @@ -51,7 +51,8 @@ g92_pci_func = { }; int -g92_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +g92_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&g92_pci_func, device, index, ppci); + return nvkm_pci_new_(&g92_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c index 09adb37a5664..7bacd0693283 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c @@ -43,7 +43,8 @@ g94_pci_func = { }; int -g94_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +g94_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&g94_pci_func, device, index, ppci); + return nvkm_pci_new_(&g94_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c index 00a5e7d3ee9d..099906092fe1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c @@ -96,7 +96,8 @@ gf100_pci_func = { }; int -gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +gf100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&gf100_pci_func, device, index, ppci); + return nvkm_pci_new_(&gf100_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c index 11bf419afe3f..bcde609ba866 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c @@ -43,7 +43,8 @@ gf106_pci_func = { }; int -gf106_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +gf106_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&gf106_pci_func, device, index, ppci); + return nvkm_pci_new_(&gf106_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c index e68030507d88..6be87ecffc89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c @@ -222,7 +222,8 @@ gk104_pci_func = { }; int -gk104_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +gk104_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&gk104_pci_func, device, index, ppci); + return nvkm_pci_new_(&gk104_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c index 82c5234a06ff..a5fafda0014d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c @@ -38,7 +38,8 @@ gp100_pci_func = { }; int -gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +gp100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&gp100_pci_func, device, index, ppci); + return nvkm_pci_new_(&gp100_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c index 5b1ed42cb90b..9ab64194b185 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c @@ -52,7 +52,8 @@ nv04_pci_func = { }; int -nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +nv04_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&nv04_pci_func, device, index, ppci); + return nvkm_pci_new_(&nv04_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c index 6eb417765802..6a3c31cf0200 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c @@ -59,7 +59,8 @@ nv40_pci_func = { }; int -nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +nv40_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&nv40_pci_func, device, index, ppci); + return nvkm_pci_new_(&nv40_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c index fc617e4c0ab6..9cad17f178ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c @@ -45,7 +45,8 @@ nv46_pci_func = { }; int -nv46_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +nv46_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&nv46_pci_func, device, index, ppci); + return nvkm_pci_new_(&nv46_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c index 1f1b26b5fa72..741e34bf307c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c @@ -31,7 +31,8 @@ nv4c_pci_func = { }; int -nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +nv4c_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pci **ppci) { - return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci); + return nvkm_pci_new_(&nv4c_pci_func, device, type, inst, ppci); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h index 7009aad86b6e..9b7583532962 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h @@ -4,8 +4,8 @@ #define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev) #include <subdev/pci.h> -int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, - int index, struct nvkm_pci **); +int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_pci **); struct nvkm_pci_func { void (*init)(struct nvkm_pci *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index a0fe607c9c07..24382875fb4f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -148,6 +148,7 @@ nvkm_pmu_dtor(struct nvkm_subdev *subdev) nvkm_falcon_cmdq_del(&pmu->hpq); nvkm_falcon_qmgr_del(&pmu->qmgr); nvkm_falcon_dtor(&pmu->falcon); + mutex_destroy(&pmu->send.mutex); return nvkm_pmu(subdev); } @@ -162,11 +163,13 @@ nvkm_pmu = { int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, - int index, struct nvkm_pmu *pmu) + enum nvkm_subdev_type type, int inst, struct nvkm_pmu *pmu) { int ret; - nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); + nvkm_subdev_ctor(&nvkm_pmu, device, type, inst, &pmu->subdev); + + mutex_init(&pmu->send.mutex); INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); init_waitqueue_head(&pmu->recv.wait); @@ -177,9 +180,8 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, pmu->func = fwif->func; - ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, - nvkm_subdev_name[pmu->subdev.index], 0x10a000, - &pmu->falcon); + ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, pmu->subdev.name, + 0x10a000, &pmu->falcon); if (ret) return ret; @@ -195,10 +197,10 @@ nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, int nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, - int index, struct nvkm_pmu **ppmu) + enum nvkm_subdev_type type, int inst, struct nvkm_pmu **ppmu) { struct nvkm_pmu *pmu; if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) return -ENOMEM; - return nvkm_pmu_ctor(fwif, device, index, *ppmu); + return nvkm_pmu_ctor(fwif, device, type, inst, *ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c index 3ecb3d9cbcf2..f725a3ec5479 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c @@ -30,14 +30,14 @@ void gf100_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; - nvkm_mc_disable(device, NVKM_SUBDEV_PMU); - nvkm_mc_enable(device, NVKM_SUBDEV_PMU); + nvkm_mc_disable(device, NVKM_SUBDEV_PMU, 0); + nvkm_mc_enable(device, NVKM_SUBDEV_PMU, 0); } bool gf100_pmu_enabled(struct nvkm_pmu *pmu) { - return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU); + return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU, 0); } static const struct nvkm_pmu_func @@ -69,7 +69,8 @@ gf100_pmu_fwif[] = { }; int -gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gf100_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gf100_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gf100_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c index 8dd0271aaaee..0f4b6697a4e4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c @@ -47,7 +47,8 @@ gf119_pmu_fwif[] = { }; int -gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gf119_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gf119_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gf119_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index 8b70cc17a634..9e7631d7aa41 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c @@ -127,7 +127,8 @@ gk104_pmu_fwif[] = { }; int -gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gk104_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gk104_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gk104_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c index 0081f2141b10..dbaefee53e1f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c @@ -106,7 +106,8 @@ gk110_pmu_fwif[] = { }; int -gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gk110_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gk110_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gk110_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c index b227c701a5e7..a08fb049e6d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c @@ -48,7 +48,8 @@ gk208_pmu_fwif[] = { }; int -gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gk208_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gk208_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gk208_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c index 26c1adf8f44c..a67a42e73f08 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c @@ -210,7 +210,8 @@ gk20a_pmu_fwif[] = { }; int -gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gk20a_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { struct gk20a_pmu *pmu; int ret; @@ -219,7 +220,7 @@ gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) return -ENOMEM; *ppmu = &pmu->base; - ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, index, &pmu->base); + ret = nvkm_pmu_ctor(gk20a_pmu_fwif, device, type, inst, &pmu->base); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c index 5afb55e58b51..622ee637f97b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c @@ -49,7 +49,8 @@ gm107_pmu_fwif[] = { }; int -gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gm107_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gm107_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gm107_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c index 383376addb41..5968c7696596 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c @@ -45,7 +45,8 @@ gm200_pmu_fwif[] = { }; int -gm200_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gm200_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gm200_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gm200_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 8f6ed5373ea1..148706977eec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -240,7 +240,8 @@ gm20b_pmu_fwif[] = { }; int -gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gm20b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gm20b_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 3d8ce14dba7b..00da1b873ce8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -51,7 +51,8 @@ gp102_pmu_fwif[] = { }; int -gp102_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gp102_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 9c237c426599..461f722656e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -99,7 +99,8 @@ gp10b_pmu_fwif[] = { }; int -gp10b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gp10b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gp10b_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gp10b_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c index 88b909913ff9..b0407b86bc10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c @@ -34,7 +34,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], struct nvkm_device *device = subdev->device; u32 addr; - mutex_lock(&subdev->mutex); + mutex_lock(&pmu->send.mutex); /* wait for a free slot in the fifo */ addr = nvkm_rd32(device, 0x10a4a0); if (nvkm_msec(device, 2000, @@ -42,7 +42,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], if (tmp != (addr ^ 8)) break; ) < 0) { - mutex_unlock(&subdev->mutex); + mutex_unlock(&pmu->send.mutex); return -EBUSY; } @@ -79,7 +79,7 @@ gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], reply[1] = pmu->recv.data[1]; } - mutex_unlock(&subdev->mutex); + mutex_unlock(&pmu->send.mutex); return 0; } @@ -282,7 +282,8 @@ gt215_pmu_fwif[] = { }; int -gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu) +gt215_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_pmu **ppmu) { - return nvkm_pmu_new_(gt215_pmu_fwif, device, index, ppmu); + return nvkm_pmu_new_(gt215_pmu_fwif, device, type, inst, ppmu); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 276b6d778e53..e7860d177353 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -62,8 +62,8 @@ int gf100_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); int gm200_pmu_nofw(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); int gm20b_pmu_load(struct nvkm_pmu *, int, const struct nvkm_pmu_fwif *); -int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, - int index, struct nvkm_pmu *); -int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, - int index, struct nvkm_pmu **); +int nvkm_pmu_ctor(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_pmu *); +int nvkm_pmu_new_(const struct nvkm_pmu_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_pmu **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild new file mode 100644 index 000000000000..d47d1bdd0f2b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: MIT +nvkm-y += nvkm/subdev/privring/gf100.o +nvkm-y += nvkm/subdev/privring/gf117.o +nvkm-y += nvkm/subdev/privring/gk104.o +nvkm-y += nvkm/subdev/privring/gk20a.o +nvkm-y += nvkm/subdev/privring/gm200.o +nvkm-y += nvkm/subdev/privring/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c index 1115376bc85f..ef7caca70372 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf100.c @@ -25,39 +25,39 @@ #include <subdev/timer.h> static void -gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i) +gf100_privring_intr_hub(struct nvkm_subdev *privring, int i) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400)); u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400)); - nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); + nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); } static void -gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i) +gf100_privring_intr_rop(struct nvkm_subdev *privring, int i) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400)); u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400)); - nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); + nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); } static void -gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) +gf100_privring_intr_gpc(struct nvkm_subdev *privring, int i) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400)); u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400)); - nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); + nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); } void -gf100_ibus_intr(struct nvkm_subdev *ibus) +gf100_privring_intr(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 intr0 = nvkm_rd32(device, 0x121c58); u32 intr1 = nvkm_rd32(device, 0x121c5c); u32 hubnr = nvkm_rd32(device, 0x121c70); @@ -68,7 +68,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus) for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) { u32 stat = 0x00000100 << i; if (intr0 & stat) { - gf100_ibus_intr_hub(ibus, i); + gf100_privring_intr_hub(privring, i); intr0 &= ~stat; } } @@ -76,7 +76,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus) for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) { u32 stat = 0x00010000 << i; if (intr0 & stat) { - gf100_ibus_intr_rop(ibus, i); + gf100_privring_intr_rop(privring, i); intr0 &= ~stat; } } @@ -84,7 +84,7 @@ gf100_ibus_intr(struct nvkm_subdev *ibus) for (i = 0; intr1 && i < gpcnr; i++) { u32 stat = 0x00000001 << i; if (intr1 & stat) { - gf100_ibus_intr_gpc(ibus, i); + gf100_privring_intr_gpc(privring, i); intr1 &= ~stat; } } @@ -97,9 +97,9 @@ gf100_ibus_intr(struct nvkm_subdev *ibus) } static int -gf100_ibus_init(struct nvkm_subdev *ibus) +gf100_privring_init(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); nvkm_wr32(device, 0x12232c, 0x00100064); nvkm_wr32(device, 0x122330, 0x00100064); @@ -109,14 +109,14 @@ gf100_ibus_init(struct nvkm_subdev *ibus) } static const struct nvkm_subdev_func -gf100_ibus = { - .init = gf100_ibus_init, - .intr = gf100_ibus_intr, +gf100_privring = { + .init = gf100_privring_init, + .intr = gf100_privring_intr, }; int -gf100_ibus_new(struct nvkm_device *device, int index, - struct nvkm_subdev **pibus) +gf100_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_subdev **pprivring) { - return nvkm_subdev_new_(&gf100_ibus, device, index, pibus); + return nvkm_subdev_new_(&gf100_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c index 1124dadac145..c78721fcd729 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gf117.c @@ -24,9 +24,9 @@ #include "priv.h" static int -gf117_ibus_init(struct nvkm_subdev *ibus) +gf117_privring_init(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100); nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff); @@ -34,14 +34,14 @@ gf117_ibus_init(struct nvkm_subdev *ibus) } static const struct nvkm_subdev_func -gf117_ibus = { - .init = gf117_ibus_init, - .intr = gf100_ibus_intr, +gf117_privring = { + .init = gf117_privring_init, + .intr = gf100_privring_intr, }; int -gf117_ibus_new(struct nvkm_device *device, int index, - struct nvkm_subdev **pibus) +gf117_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_subdev **pprivring) { - return nvkm_subdev_new_(&gf117_ibus, device, index, pibus); + return nvkm_subdev_new_(&gf117_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c index 22e487b493ad..568a4c0997bd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk104.c @@ -25,39 +25,39 @@ #include <subdev/timer.h> static void -gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i) +gk104_privring_intr_hub(struct nvkm_subdev *privring, int i) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800)); - nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); + nvkm_debug(privring, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); } static void -gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i) +gk104_privring_intr_rop(struct nvkm_subdev *privring, int i) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800)); - nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); + nvkm_debug(privring, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); } static void -gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) +gk104_privring_intr_gpc(struct nvkm_subdev *privring, int i) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800)); - nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); + nvkm_debug(privring, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); } void -gk104_ibus_intr(struct nvkm_subdev *ibus) +gk104_privring_intr(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 intr0 = nvkm_rd32(device, 0x120058); u32 intr1 = nvkm_rd32(device, 0x12005c); u32 hubnr = nvkm_rd32(device, 0x120070); @@ -68,7 +68,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus) for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) { u32 stat = 0x00000100 << i; if (intr0 & stat) { - gk104_ibus_intr_hub(ibus, i); + gk104_privring_intr_hub(privring, i); intr0 &= ~stat; } } @@ -76,7 +76,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus) for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) { u32 stat = 0x00010000 << i; if (intr0 & stat) { - gk104_ibus_intr_rop(ibus, i); + gk104_privring_intr_rop(privring, i); intr0 &= ~stat; } } @@ -84,7 +84,7 @@ gk104_ibus_intr(struct nvkm_subdev *ibus) for (i = 0; intr1 && i < gpcnr; i++) { u32 stat = 0x00000001 << i; if (intr1 & stat) { - gk104_ibus_intr_gpc(ibus, i); + gk104_privring_intr_gpc(privring, i); intr1 &= ~stat; } } @@ -97,9 +97,9 @@ gk104_ibus_intr(struct nvkm_subdev *ibus) } static int -gk104_ibus_init(struct nvkm_subdev *ibus) +gk104_privring_init(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000); nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200); nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); @@ -111,15 +111,15 @@ gk104_ibus_init(struct nvkm_subdev *ibus) } static const struct nvkm_subdev_func -gk104_ibus = { - .preinit = gk104_ibus_init, - .init = gk104_ibus_init, - .intr = gk104_ibus_intr, +gk104_privring = { + .preinit = gk104_privring_init, + .init = gk104_privring_init, + .intr = gk104_privring_intr, }; int -gk104_ibus_new(struct nvkm_device *device, int index, - struct nvkm_subdev **pibus) +gk104_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_subdev **pprivring) { - return nvkm_subdev_new_(&gk104_ibus, device, index, pibus); + return nvkm_subdev_new_(&gk104_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c index 187d544378b0..55e4a60d8770 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gk20a.c @@ -19,13 +19,13 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ -#include <subdev/ibus.h> +#include <subdev/privring.h> #include <subdev/timer.h> static void -gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus) +gk20a_privring_init_privring_ring(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; nvkm_mask(device, 0x137250, 0x3f, 0); nvkm_mask(device, 0x000200, 0x20, 0); @@ -46,14 +46,14 @@ gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus) } static void -gk20a_ibus_intr(struct nvkm_subdev *ibus) +gk20a_privring_intr(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; u32 status0 = nvkm_rd32(device, 0x120058); if (status0 & 0x7) { - nvkm_debug(ibus, "resetting ibus ring\n"); - gk20a_ibus_init_ibus_ring(ibus); + nvkm_debug(privring, "resetting privring ring\n"); + gk20a_privring_init_privring_ring(privring); } /* Acknowledge interrupt */ @@ -65,21 +65,21 @@ gk20a_ibus_intr(struct nvkm_subdev *ibus) } static int -gk20a_ibus_init(struct nvkm_subdev *ibus) +gk20a_privring_init(struct nvkm_subdev *privring) { - gk20a_ibus_init_ibus_ring(ibus); + gk20a_privring_init_privring_ring(privring); return 0; } static const struct nvkm_subdev_func -gk20a_ibus = { - .init = gk20a_ibus_init, - .intr = gk20a_ibus_intr, +gk20a_privring = { + .init = gk20a_privring_init, + .intr = gk20a_privring_intr, }; int -gk20a_ibus_new(struct nvkm_device *device, int index, - struct nvkm_subdev **pibus) +gk20a_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_subdev **pprivring) { - return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus); + return nvkm_subdev_new_(&gk20a_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c index 0f1f0ad6377e..b4eaf6db36d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c @@ -24,13 +24,13 @@ #include "priv.h" static const struct nvkm_subdev_func -gm200_ibus = { - .intr = gk104_ibus_intr, +gm200_privring = { + .intr = gk104_privring_intr, }; int -gm200_ibus_new(struct nvkm_device *device, int index, - struct nvkm_subdev **pibus) +gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_subdev **pprivring) { - return nvkm_subdev_new_(&gm200_ibus, device, index, pibus); + return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c index 0347b367cefe..4534111cf907 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/gp10b.c @@ -19,14 +19,14 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ -#include <subdev/ibus.h> +#include <subdev/privring.h> #include "priv.h" static int -gp10b_ibus_init(struct nvkm_subdev *ibus) +gp10b_privring_init(struct nvkm_subdev *privring) { - struct nvkm_device *device = ibus->device; + struct nvkm_device *device = privring->device; nvkm_wr32(device, 0x1200a8, 0x0); @@ -42,14 +42,14 @@ gp10b_ibus_init(struct nvkm_subdev *ibus) } static const struct nvkm_subdev_func -gp10b_ibus = { - .init = gp10b_ibus_init, - .intr = gk104_ibus_intr, +gp10b_privring = { + .init = gp10b_privring_init, + .intr = gk104_privring_intr, }; int -gp10b_ibus_new(struct nvkm_device *device, int index, - struct nvkm_subdev **pibus) +gp10b_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_subdev **pprivring) { - return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus); + return nvkm_subdev_new_(&gp10b_privring, device, type, inst, pprivring); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h new file mode 100644 index 000000000000..b378c14bc8dc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/privring/priv.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef __NVKM_PRIVRING_PRIV_H__ +#define __NVKM_PRIVRING_PRIV_H__ +#include <subdev/privring.h> + +void gf100_privring_intr(struct nvkm_subdev *); +void gk104_privring_intr(struct nvkm_subdev *); +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index 4a4d1e224126..fc5ee118e910 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c @@ -421,10 +421,10 @@ nvkm_therm = { }; void -nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, - int index, const struct nvkm_therm_func *func) +nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, enum nvkm_subdev_type type, + int inst, const struct nvkm_therm_func *func) { - nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev); + nvkm_subdev_ctor(&nvkm_therm, device, type, inst, &therm->subdev); therm->func = func; nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm); @@ -443,13 +443,13 @@ nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, int nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device, - int index, struct nvkm_therm **ptherm) + enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { struct nvkm_therm *therm; if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL))) return -ENOMEM; - nvkm_therm_ctor(therm, device, index, func); + nvkm_therm_ctor(therm, device, type, inst, func); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c index 96f8da40ac82..4af86f2d3e7e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c @@ -223,12 +223,13 @@ g84_therm = { }; int -g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm) +g84_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_therm **ptherm) { struct nvkm_therm *therm; int ret; - ret = nvkm_therm_new_(&g84_therm, device, index, &therm); + ret = nvkm_therm_new_(&g84_therm, device, type, inst, &therm); *ptherm = therm; if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c index 0981b02790e2..2b031d4eaeb6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c @@ -146,8 +146,8 @@ gf119_therm = { }; int -gf119_therm_new(struct nvkm_device *device, int index, - struct nvkm_therm **ptherm) +gf119_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_therm **ptherm) { - return nvkm_therm_new_(&gf119_therm, device, index, ptherm); + return nvkm_therm_new_(&gf119_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c index 4e03971d2e3d..45e295c271fb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c @@ -35,8 +35,8 @@ gk104_clkgate_enable(struct nvkm_therm *base) int i; /* Program ENG_MANT, ENG_FILTER */ - for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { - if (!nvkm_device_subdev(dev, order[i].engine)) + for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { + if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) continue; nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); @@ -47,8 +47,8 @@ gk104_clkgate_enable(struct nvkm_therm *base) nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu); /* Enable clockgating (ENG_CLK = RUN->AUTO) */ - for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { - if (!nvkm_device_subdev(dev, order[i].engine)) + for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { + if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) continue; nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); @@ -64,8 +64,8 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend) int i; /* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */ - for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { - if (!nvkm_device_subdev(dev, order[i].engine)) + for (i = 0; order[i].type != NVKM_SUBDEV_NR; i++) { + if (!nvkm_device_subdev(dev, order[i].type, order[i].inst)) continue; nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54); @@ -73,15 +73,15 @@ gk104_clkgate_fini(struct nvkm_therm *base, bool suspend) } const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = { - { NVKM_ENGINE_GR, 0x00 }, - { NVKM_ENGINE_MSPDEC, 0x04 }, - { NVKM_ENGINE_MSPPP, 0x08 }, - { NVKM_ENGINE_MSVLD, 0x0c }, - { NVKM_ENGINE_CE0, 0x10 }, - { NVKM_ENGINE_CE1, 0x14 }, - { NVKM_ENGINE_MSENC, 0x18 }, - { NVKM_ENGINE_CE2, 0x1c }, - { NVKM_SUBDEV_NR, 0 }, + { NVKM_ENGINE_GR, 0, 0x00 }, + { NVKM_ENGINE_MSPDEC, 0, 0x04 }, + { NVKM_ENGINE_MSPPP, 0, 0x08 }, + { NVKM_ENGINE_MSVLD, 0, 0x0c }, + { NVKM_ENGINE_CE, 0, 0x10 }, + { NVKM_ENGINE_CE, 1, 0x14 }, + { NVKM_ENGINE_MSENC, 0, 0x18 }, + { NVKM_ENGINE_CE, 2, 0x1c }, + { NVKM_SUBDEV_NR }, }; const struct gf100_idle_filter gk104_idle_filter = { @@ -106,9 +106,8 @@ gk104_therm_func = { }; static int -gk104_therm_new_(const struct nvkm_therm_func *func, - struct nvkm_device *device, - int index, +gk104_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, const struct gk104_clkgate_engine_info *clkgate_order, const struct gf100_idle_filter *idle_filter, struct nvkm_therm **ptherm) @@ -118,19 +117,17 @@ gk104_therm_new_(const struct nvkm_therm_func *func, if (!therm) return -ENOMEM; - nvkm_therm_ctor(&therm->base, device, index, func); + nvkm_therm_ctor(&therm->base, device, type, inst, func); *ptherm = &therm->base; therm->clkgate_order = clkgate_order; therm->idle_filter = idle_filter; - return 0; } int -gk104_therm_new(struct nvkm_device *device, - int index, struct nvkm_therm **ptherm) +gk104_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { - return gk104_therm_new_(&gk104_therm_func, device, index, + return gk104_therm_new_(&gk104_therm_func, device, type, inst, gk104_clkgate_engine_info, &gk104_idle_filter, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h index 293e7743b19b..9a8641421038 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h @@ -31,7 +31,8 @@ #include "gf100.h" struct gk104_clkgate_engine_info { - enum nvkm_devidx engine; + enum nvkm_subdev_type type; + int inst; u8 offset; }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c index 86848ece4d89..c845fd392f58 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c @@ -68,8 +68,8 @@ gm107_therm = { }; int -gm107_therm_new(struct nvkm_device *device, int index, +gm107_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { - return nvkm_therm_new_(&gm107_therm, device, index, ptherm); + return nvkm_therm_new_(&gm107_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c index 73dc78093d5d..e0cdd12463ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c @@ -32,8 +32,8 @@ gm200_therm = { }; int -gm200_therm_new(struct nvkm_device *device, int index, +gm200_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { - return nvkm_therm_new_(&gm200_therm, device, index, ptherm); + return nvkm_therm_new_(&gm200_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c index 9f0dea3f61dc..44f021392b95 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c @@ -49,8 +49,8 @@ gp100_therm = { }; int -gp100_therm_new(struct nvkm_device *device, int index, +gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { - return nvkm_therm_new_(&gp100_therm, device, index, ptherm); + return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c index c08097f2aff5..9e451bd9395c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c @@ -68,8 +68,8 @@ gt215_therm = { }; int -gt215_therm_new(struct nvkm_device *device, int index, - struct nvkm_therm **ptherm) +gt215_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_therm **ptherm) { - return nvkm_therm_new_(>215_therm, device, index, ptherm); + return nvkm_therm_new_(>215_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c index 2c92ffb5f9d0..c13fee9734df 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c @@ -197,8 +197,8 @@ nv40_therm = { }; int -nv40_therm_new(struct nvkm_device *device, int index, +nv40_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { - return nvkm_therm_new_(&nv40_therm, device, index, ptherm); + return nvkm_therm_new_(&nv40_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c index 9b57b433d4cf..9cf16a75a3cd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c @@ -169,8 +169,8 @@ nv50_therm = { }; int -nv50_therm_new(struct nvkm_device *device, int index, +nv50_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_therm **ptherm) { - return nvkm_therm_new_(&nv50_therm, device, index, ptherm); + return nvkm_therm_new_(&nv50_therm, device, type, inst, ptherm); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h index 21659daf1864..54e960589411 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h @@ -30,10 +30,10 @@ #include <subdev/bios/gpio.h> #include <subdev/bios/perf.h> -int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *, - int index, struct nvkm_therm **); -void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device, - int index, const struct nvkm_therm_func *func); +int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *, enum nvkm_subdev_type, + int, struct nvkm_therm **); +void nvkm_therm_ctor(struct nvkm_therm *, struct nvkm_device *, enum nvkm_subdev_type, int, + const struct nvkm_therm_func *); struct nvkm_fan { struct nvkm_therm *parent; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c index dd922033628c..8b0da0c06268 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c @@ -183,14 +183,14 @@ nvkm_timer = { int nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device, - int index, struct nvkm_timer **ptmr) + enum nvkm_subdev_type type, int inst, struct nvkm_timer **ptmr) { struct nvkm_timer *tmr; if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev); + nvkm_subdev_ctor(&nvkm_timer, device, type, inst, &tmr->subdev); tmr->func = func; INIT_LIST_HEAD(&tmr->alarms); spin_lock_init(&tmr->lock); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c index 9ed5f64912d0..73c3776b6b83 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c @@ -33,7 +33,8 @@ gk20a_timer = { }; int -gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr) +gk20a_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_timer **ptmr) { - return nvkm_timer_new_(&gk20a_timer, device, index, ptmr); + return nvkm_timer_new_(&gk20a_timer, device, type, inst, ptmr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c index 7f48249f41de..0058e856b378 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c @@ -145,7 +145,8 @@ nv04_timer = { }; int -nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr) +nv04_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_timer **ptmr) { - return nvkm_timer_new_(&nv04_timer, device, index, ptmr); + return nvkm_timer_new_(&nv04_timer, device, type, inst, ptmr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c index bb99a152f26e..7e1f8c22f2a8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c @@ -82,7 +82,8 @@ nv40_timer = { }; int -nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr) +nv40_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_timer **ptmr) { - return nvkm_timer_new_(&nv40_timer, device, index, ptmr); + return nvkm_timer_new_(&nv40_timer, device, type, inst, ptmr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c index 3cf9ec1b1b57..c2b263721f10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c @@ -79,7 +79,8 @@ nv41_timer = { }; int -nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr) +nv41_timer_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_timer **ptmr) { - return nvkm_timer_new_(&nv41_timer, device, index, ptmr); + return nvkm_timer_new_(&nv41_timer, device, type, inst, ptmr); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h index 89e97294b182..e6debe7e2fa9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h @@ -4,8 +4,8 @@ #define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev) #include <subdev/timer.h> -int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *, - int index, struct nvkm_timer **); +int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *, enum nvkm_subdev_type, + int, struct nvkm_timer **); struct nvkm_timer_func { void (*init)(struct nvkm_timer *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild index 438d9d78ab52..d5db845195dc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild @@ -1,3 +1,4 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/subdev/top/base.o nvkm-y += nvkm/subdev/top/gk104.o +nvkm-y += nvkm/subdev/top/ga100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c index cce6e4e90ebf..28d0789f50fe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c @@ -28,7 +28,8 @@ nvkm_top_device_new(struct nvkm_top *top) { struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL); if (info) { - info->index = NVKM_SUBDEV_NR; + info->type = NVKM_SUBDEV_NR; + info->inst = -1; info->addr = 0; info->fault = -1; info->engine = -1; @@ -41,14 +42,14 @@ nvkm_top_device_new(struct nvkm_top *top) } u32 -nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index) +nvkm_top_addr(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { struct nvkm_top *top = device->top; struct nvkm_top_device *info; if (top) { list_for_each_entry(info, &top->device, head) { - if (info->index == index) + if (info->type == type && info->inst == inst) return info->addr; } } @@ -57,14 +58,14 @@ nvkm_top_addr(struct nvkm_device *device, enum nvkm_devidx index) } u32 -nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index) +nvkm_top_reset(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { struct nvkm_top *top = device->top; struct nvkm_top_device *info; if (top) { list_for_each_entry(info, &top->device, head) { - if (info->index == index && info->reset >= 0) + if (info->type == type && info->inst == inst && info->reset >= 0) return BIT(info->reset); } } @@ -73,14 +74,14 @@ nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index) } u32 -nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx) +nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { struct nvkm_top *top = device->top; struct nvkm_top_device *info; if (top) { list_for_each_entry(info, &top->device, head) { - if (info->index == devidx && info->intr >= 0) + if (info->type == type && info->inst == inst && info->intr >= 0) return BIT(info->intr); } } @@ -88,44 +89,21 @@ nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx) return 0; } -u32 -nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs) -{ - struct nvkm_top *top = device->top; - struct nvkm_top_device *info; - u64 subdevs = 0; - u32 handled = 0; - - if (top) { - list_for_each_entry(info, &top->device, head) { - if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) { - if (intr & BIT(info->intr)) { - subdevs |= BIT_ULL(info->index); - handled |= BIT(info->intr); - } - } - } - } - - *psubdevs = subdevs; - return intr & ~handled; -} - int -nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx) +nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) { struct nvkm_top *top = device->top; struct nvkm_top_device *info; list_for_each_entry(info, &top->device, head) { - if (info->index == devidx && info->fault >= 0) + if (info->type == type && info->inst == inst && info->fault >= 0) return info->fault; } return -ENOENT; } -enum nvkm_devidx +struct nvkm_subdev * nvkm_top_fault(struct nvkm_device *device, int fault) { struct nvkm_top *top = device->top; @@ -133,28 +111,10 @@ nvkm_top_fault(struct nvkm_device *device, int fault) list_for_each_entry(info, &top->device, head) { if (info->fault == fault) - return info->index; - } - - return NVKM_SUBDEV_NR; -} - -enum nvkm_devidx -nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn) -{ - struct nvkm_top *top = device->top; - struct nvkm_top_device *info; - int n = 0; - - list_for_each_entry(info, &top->device, head) { - if (info->engine >= 0 && info->runlist >= 0 && n++ == index) { - *runl = info->runlist; - *engn = info->engine; - return info->index; - } + return nvkm_device_subdev(device, info->type, info->inst); } - return -ENODEV; + return NULL; } static int @@ -186,12 +146,12 @@ nvkm_top = { int nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device, - int index, struct nvkm_top **ptop) + enum nvkm_subdev_type type, int inst, struct nvkm_top **ptop) { struct nvkm_top *top; if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL))) return -ENOMEM; - nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev); + nvkm_subdev_ctor(&nvkm_top, device, type, inst, &top->subdev); top->func = func; INIT_LIST_HEAD(&top->device); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c new file mode 100644 index 000000000000..31933f3e5a07 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c @@ -0,0 +1,107 @@ +/* + * Copyright 2021 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static int +ga100_top_oneinit(struct nvkm_top *top) +{ + struct nvkm_subdev *subdev = &top->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_top_device *info = NULL; + u32 data, type, inst; + int i, n, size = nvkm_rd32(device, 0x0224fc) >> 20; + + for (i = 0, n = 0; i < size; i++) { + if (!info) { + if (!(info = nvkm_top_device_new(top))) + return -ENOMEM; + type = ~0; + inst = 0; + } + + data = nvkm_rd32(device, 0x022800 + (i * 0x04)); + nvkm_trace(subdev, "%02x: %08x\n", i, data); + if (!data && n == 0) + continue; + + switch (n++) { + case 0: + type = (data & 0x3f000000) >> 24; + inst = (data & 0x000f0000) >> 16; + info->fault = (data & 0x0000007f); + break; + case 1: + info->addr = (data & 0x00fff000); + info->reset = (data & 0x0000001f); + break; + case 2: + info->runlist = (data & 0x0000fc00) >> 10; + info->engine = (data & 0x00000003); + break; + default: + break; + } + + if (data & 0x80000000) + continue; + n = 0; + + /* Translate engine type to NVKM engine identifier. */ +#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0) +#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0) + switch (type) { + case 0x00000000: O_(NVKM_ENGINE_GR , 0); break; + case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break; + case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break; + case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break; + case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break; + case 0x00000013: I_(NVKM_ENGINE_CE , inst); break; + case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break; + case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break; + case 0x00000016: O_(NVKM_ENGINE_OFA , 0); break; + case 0x00000017: O_(NVKM_SUBDEV_FLA , 0); break; + break; + default: + break; + } + + nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d " + "runlist %2d engine %2d reset %2d\n", type, inst, + info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type], + info->addr, info->fault, info->runlist, info->engine, info->reset); + info = NULL; + } + + return 0; +} + +static const struct nvkm_top_func +ga100_top = { + .oneinit = ga100_top_oneinit, +}; + +int +ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_top **ptop) +{ + return nvkm_top_new_(&ga100_top, device, type, inst, ptop); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c index 1156634533f9..4dcad97bd505 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c @@ -70,26 +70,26 @@ gk104_top_oneinit(struct nvkm_top *top) continue; /* Translate engine type to NVKM engine identifier. */ -#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A -#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \ - info->index = NVKM_ENGINE_##A##0 + inst -#define C_(A) if (inst == 0) info->index = NVKM_SUBDEV_##A +#define I_(T,I) do { info->type = (T); info->inst = (I); } while(0) +#define O_(T,I) do { WARN_ON(inst); I_(T, I); } while (0) switch (type) { - case 0x00000000: A_(GR ); break; - case 0x00000001: A_(CE0 ); break; - case 0x00000002: A_(CE1 ); break; - case 0x00000003: A_(CE2 ); break; - case 0x00000008: A_(MSPDEC); break; - case 0x00000009: A_(MSPPP ); break; - case 0x0000000a: A_(MSVLD ); break; - case 0x0000000b: A_(MSENC ); break; - case 0x0000000c: A_(VIC ); break; - case 0x0000000d: A_(SEC2 ); break; - case 0x0000000e: B_(NVENC ); break; - case 0x0000000f: A_(NVENC1); break; - case 0x00000010: B_(NVDEC ); break; - case 0x00000013: B_(CE ); break; - case 0x00000014: C_(GSP ); break; + case 0x00000000: O_(NVKM_ENGINE_GR , 0); break; + case 0x00000001: O_(NVKM_ENGINE_CE , 0); break; + case 0x00000002: O_(NVKM_ENGINE_CE , 1); break; + case 0x00000003: O_(NVKM_ENGINE_CE , 2); break; + case 0x00000008: O_(NVKM_ENGINE_MSPDEC, 0); break; + case 0x00000009: O_(NVKM_ENGINE_MSPPP , 0); break; + case 0x0000000a: O_(NVKM_ENGINE_MSVLD , 0); break; + case 0x0000000b: O_(NVKM_ENGINE_MSENC , 0); break; + case 0x0000000c: O_(NVKM_ENGINE_VIC , 0); break; + case 0x0000000d: O_(NVKM_ENGINE_SEC2 , 0); break; + case 0x0000000e: I_(NVKM_ENGINE_NVENC , inst); break; + case 0x0000000f: O_(NVKM_ENGINE_NVENC , 1); break; + case 0x00000010: I_(NVKM_ENGINE_NVDEC , inst); break; + case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break; + case 0x00000013: I_(NVKM_ENGINE_CE , inst); break; + case 0x00000014: O_(NVKM_SUBDEV_GSP , 0); break; + case 0x00000015: O_(NVKM_ENGINE_NVJPG , 0); break; default: break; } @@ -97,8 +97,7 @@ gk104_top_oneinit(struct nvkm_top *top) nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d " "engine %2d runlist %2d intr %2d " "reset %2d\n", type, inst, - info->index == NVKM_SUBDEV_NR ? NULL : - nvkm_subdev_name[info->index], + info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type], info->addr, info->fault, info->engine, info->runlist, info->intr, info->reset); info = NULL; @@ -113,7 +112,8 @@ gk104_top = { }; int -gk104_top_new(struct nvkm_device *device, int index, struct nvkm_top **ptop) +gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_top **ptop) { - return nvkm_top_new_(&gk104_top, device, index, ptop); + return nvkm_top_new_(&gk104_top, device, type, inst, ptop); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h index a16baa2941cf..8e103a836705 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h @@ -8,19 +8,8 @@ struct nvkm_top_func { int (*oneinit)(struct nvkm_top *); }; -int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *, - int, struct nvkm_top **); - -struct nvkm_top_device { - enum nvkm_devidx index; - u32 addr; - int fault; - int engine; - int runlist; - int reset; - int intr; - struct list_head head; -}; +int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_top **); struct nvkm_top_device *nvkm_top_device_new(struct nvkm_top *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c index e344901cfdc7..a17a6dd8d3de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c @@ -281,12 +281,12 @@ nvkm_volt = { void nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device, - int index, struct nvkm_volt *volt) + enum nvkm_subdev_type type, int inst, struct nvkm_volt *volt) { struct nvkm_bios *bios = device->bios; int i; - nvkm_subdev_ctor(&nvkm_volt, device, index, &volt->subdev); + nvkm_subdev_ctor(&nvkm_volt, device, type, inst, &volt->subdev); volt->func = func; /* Assuming the non-bios device should build the voltage table later */ @@ -319,10 +319,10 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device, int nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device, - int index, struct nvkm_volt **pvolt) + enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt) { if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL))) return -ENOMEM; - nvkm_volt_ctor(func, device, index, *pvolt); + nvkm_volt_ctor(func, device, type, inst, *pvolt); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c index d9ed6925ca64..b47a1c0817be 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf100.c @@ -56,12 +56,13 @@ gf100_volt = { }; int -gf100_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) +gf100_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_volt **pvolt) { struct nvkm_volt *volt; int ret; - ret = nvkm_volt_new_(&gf100_volt, device, index, &volt); + ret = nvkm_volt_new_(&gf100_volt, device, type, inst, &volt); *pvolt = volt; if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c index 547a58f0aeac..03c8a2c2916c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c @@ -46,12 +46,13 @@ gf117_volt = { }; int -gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) +gf117_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_volt **pvolt) { struct nvkm_volt *volt; int ret; - ret = nvkm_volt_new_(&gf117_volt, device, index, &volt); + ret = nvkm_volt_new_(&gf117_volt, device, type, inst, &volt); *pvolt = volt; if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c index 1c744e029454..d1ce4309cfb8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c @@ -95,7 +95,8 @@ gk104_volt_pwm = { }; int -gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) +gk104_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_volt **pvolt) { const struct nvkm_volt_func *volt_func = &gk104_volt_gpio; struct dcb_gpio_func gpio; @@ -114,7 +115,7 @@ gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL))) return -ENOMEM; - nvkm_volt_ctor(volt_func, device, index, &volt->base); + nvkm_volt_ctor(volt_func, device, type, inst, &volt->base); *pvolt = &volt->base; volt->bios = bios; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c index ce5d83cdc7cf..8c2faa964511 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c @@ -144,14 +144,14 @@ gk20a_volt = { }; int -gk20a_volt_ctor(struct nvkm_device *device, int index, +gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, const struct cvb_coef *coefs, int nb_coefs, int vmin, struct gk20a_volt *volt) { struct nvkm_device_tegra *tdev = device->func->tegra(device); int i, uv; - nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base); + nvkm_volt_ctor(&gk20a_volt, device, type, inst, &volt->base); uv = regulator_get_voltage(tdev->vdd); nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv); @@ -172,7 +172,7 @@ gk20a_volt_ctor(struct nvkm_device *device, int index, } int -gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) +gk20a_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_volt **pvolt) { struct gk20a_volt *volt; @@ -181,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) return -ENOMEM; *pvolt = &volt->base; - return gk20a_volt_ctor(device, index, gk20a_cvb_coef, + return gk20a_volt_ctor(device, type, inst, gk20a_cvb_coef, ARRAY_SIZE(gk20a_cvb_coef), 0, volt); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h index 6a6c97f9684e..01f8a5fcf496 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h @@ -37,7 +37,7 @@ struct gk20a_volt { struct regulator *vdd; }; -int gk20a_volt_ctor(struct nvkm_device *device, int index, +int gk20a_volt_ctor(struct nvkm_device *device, enum nvkm_subdev_type, int, const struct cvb_coef *coefs, int nb_coefs, int vmin, struct gk20a_volt *volt); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c index 2925b9cae681..c2e9694d333f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c @@ -64,7 +64,8 @@ static const u32 speedo_to_vmin[] = { }; int -gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) +gm20b_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_volt **pvolt) { struct nvkm_device_tegra *tdev = device->func->tegra(device); struct gk20a_volt *volt; @@ -84,9 +85,9 @@ gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) vmin = speedo_to_vmin[tdev->gpu_speedo_id]; if (tdev->gpu_speedo_id >= 1) - return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef, - ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt); + return gk20a_volt_ctor(device, type, inst, gm20b_na_cvb_coef, + ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt); else - return gk20a_volt_ctor(device, index, gm20b_cvb_coef, - ARRAY_SIZE(gm20b_cvb_coef), vmin, volt); + return gk20a_volt_ctor(device, type, inst, gm20b_cvb_coef, + ARRAY_SIZE(gm20b_cvb_coef), vmin, volt); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c index 23409387abb5..d6a587d6082d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c @@ -30,12 +30,13 @@ nv40_volt = { }; int -nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) +nv40_volt_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_volt **pvolt) { struct nvkm_volt *volt; int ret; - ret = nvkm_volt_new_(&nv40_volt, device, index, &volt); + ret = nvkm_volt_new_(&nv40_volt, device, type, inst, &volt); *pvolt = volt; if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h index 75f13a34671f..24e2d16d1913 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h @@ -4,10 +4,10 @@ #define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev) #include <subdev/volt.h> -void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *, - int index, struct nvkm_volt *); -int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *, - int index, struct nvkm_volt **); +void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_volt *); +int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *, enum nvkm_subdev_type, int, + struct nvkm_volt **); struct nvkm_volt_func { int (*oneinit)(struct nvkm_volt *); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8e11612f5fe1..febcc87ddfe1 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -2149,11 +2149,12 @@ static int dsi_vc_send_short(struct dsi_data *dsi, int vc, const struct mipi_dsi_msg *msg) { struct mipi_dsi_packet pkt; + int err; u32 r; - r = mipi_dsi_create_packet(&pkt, msg); - if (r < 0) - return r; + err = mipi_dsi_create_packet(&pkt, msg); + if (err) + return err; WARN_ON(!dsi_bus_is_locked(dsi)); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 28bbad1353ee..8632139e0f01 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -68,6 +68,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct omap_drm_private *priv = dev->dev_private; + bool fence_cookie = dma_fence_begin_signalling(); dispc_runtime_get(priv->dispc); @@ -90,8 +91,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); - - drm_atomic_helper_commit_hw_done(old_state); } else { /* * OMAP3 DSS seems to have issues with the work-around above, @@ -101,10 +100,12 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); - - drm_atomic_helper_commit_hw_done(old_state); } + drm_atomic_helper_commit_hw_done(old_state); + + dma_fence_end_signalling(fence_cookie); + /* * Wait for completion of the page flips to ensure that old buffers * can't be touched by the hardware anymore before cleaning up planes. diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 51dc24acea73..801da917507d 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -40,30 +40,32 @@ static void omap_plane_cleanup_fb(struct drm_plane *plane, } static void omap_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct omap_drm_private *priv = plane->dev->dev_private; struct omap_plane *omap_plane = to_omap_plane(plane); - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct omap_overlay_info info; int ret; - DBG("%s, crtc=%p fb=%p", omap_plane->name, state->crtc, state->fb); + DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc, + new_state->fb); memset(&info, 0, sizeof(info)); info.rotation_type = OMAP_DSS_ROT_NONE; info.rotation = DRM_MODE_ROTATE_0; - info.global_alpha = state->alpha >> 8; - info.zorder = state->normalized_zpos; - if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) + info.global_alpha = new_state->alpha >> 8; + info.zorder = new_state->normalized_zpos; + if (new_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) info.pre_mult_alpha = 1; else info.pre_mult_alpha = 0; - info.color_encoding = state->color_encoding; - info.color_range = state->color_range; + info.color_encoding = new_state->color_encoding; + info.color_range = new_state->color_range; /* update scanout: */ - omap_framebuffer_update_scanout(state->fb, state, &info); + omap_framebuffer_update_scanout(new_state->fb, new_state, &info); DBG("%dx%d -> %dx%d (%d)", info.width, info.height, info.out_width, info.out_height, @@ -73,8 +75,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane, /* and finally, update omapdss: */ ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info, - omap_crtc_timings(state->crtc), false, - omap_crtc_channel(state->crtc)); + omap_crtc_timings(new_state->crtc), false, + omap_crtc_channel(new_state->crtc)); if (ret) { dev_err(plane->dev->dev, "Failed to setup plane %s\n", omap_plane->name); @@ -86,31 +88,35 @@ static void omap_plane_atomic_update(struct drm_plane *plane, } static void omap_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct omap_drm_private *priv = plane->dev->dev_private; struct omap_plane *omap_plane = to_omap_plane(plane); - plane->state->rotation = DRM_MODE_ROTATE_0; - plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY - ? 0 : omap_plane->id; + new_state->rotation = DRM_MODE_ROTATE_0; + new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; dispc_ovl_enable(priv->dispc, omap_plane->id, false); } static int omap_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; - if (!state->fb) + if (!new_plane_state->fb) return 0; - /* crtc should only be NULL when disabling (i.e., !state->fb) */ - if (WARN_ON(!state->crtc)) + /* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */ + if (WARN_ON(!new_plane_state->crtc)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_plane_state->crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; @@ -118,17 +124,17 @@ static int omap_plane_atomic_check(struct drm_plane *plane, if (!crtc_state->enable) return 0; - if (state->crtc_x < 0 || state->crtc_y < 0) + if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) return -EINVAL; - if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + if (new_plane_state->crtc_x + new_plane_state->crtc_w > crtc_state->adjusted_mode.hdisplay) return -EINVAL; - if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay) + if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay) return -EINVAL; - if (state->rotation != DRM_MODE_ROTATE_0 && - !omap_framebuffer_supports_rotation(state->fb)) + if (new_plane_state->rotation != DRM_MODE_ROTATE_0 && + !omap_framebuffer_supports_rotation(new_plane_state->fb)) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index bc36aa3c1123..fe5ac3ef9018 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi) dsi->lanes = 1; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 66c7d765b8f7..59a8d99e777d 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -244,7 +244,7 @@ static int panel_lvds_probe(struct platform_device *pdev) static int panel_lvds_remove(struct platform_device *pdev) { - struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev); + struct panel_lvds *lvds = platform_get_drvdata(pdev); drm_panel_remove(&lvds->panel); diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c index 0ee508576231..3939b25e6666 100644 --- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c @@ -267,7 +267,7 @@ static int seiko_panel_probe(struct device *dev, static int seiko_panel_remove(struct platform_device *pdev) { - struct seiko_panel *panel = dev_get_drvdata(&pdev->dev); + struct seiko_panel *panel = platform_get_drvdata(pdev); drm_panel_remove(&panel->base); drm_panel_disable(&panel->base); @@ -277,7 +277,7 @@ static int seiko_panel_remove(struct platform_device *pdev) static void seiko_panel_shutdown(struct platform_device *pdev) { - struct seiko_panel *panel = dev_get_drvdata(&pdev->dev); + struct seiko_panel *panel = platform_get_drvdata(pdev); drm_panel_disable(&panel->base); } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 4e2dad314c79..9858079f9e14 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -4800,7 +4800,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi) err = mipi_dsi_attach(dsi); if (err) { - struct panel_simple *panel = dev_get_drvdata(&dsi->dev); + struct panel_simple *panel = mipi_dsi_get_drvdata(dsi); drm_panel_remove(&panel->base); } diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 56b3f5935703..7c5ffc81dce1 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -130,8 +130,16 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) panfrost_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); + /* + * Setup default thresholds for the simple_ondemand governor. + * The values are chosen based on experiments. + */ + pfdevfreq->gov_data.upthreshold = 45; + pfdevfreq->gov_data.downdifferential = 5; + devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile, - DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL); + DEVFREQ_GOV_SIMPLE_ONDEMAND, + &pfdevfreq->gov_data); if (IS_ERR(devfreq)) { DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n"); ret = PTR_ERR(devfreq); diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h index db6ea48e21f9..1e2a4de941aa 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h @@ -4,6 +4,7 @@ #ifndef __PANFROST_DEVFREQ_H__ #define __PANFROST_DEVFREQ_H__ +#include <linux/devfreq.h> #include <linux/spinlock.h> #include <linux/ktime.h> @@ -17,6 +18,7 @@ struct panfrost_devfreq { struct devfreq *devfreq; struct opp_table *regulators_opp_table; struct thermal_cooling_device *cooling; + struct devfreq_simple_ondemand_data gov_data; bool opp_of_table_added; ktime_t busy_time; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 04e6f6f9b742..6003cfeb1322 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -432,7 +432,8 @@ static void panfrost_scheduler_start(struct panfrost_queue_state *queue) mutex_unlock(&queue->lock); } -static void panfrost_job_timedout(struct drm_sched_job *sched_job) +static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job + *sched_job) { struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_device *pfdev = job->pfdev; @@ -443,7 +444,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) * spurious. Bail out. */ if (dma_fence_is_signaled(job->done_fence)) - return; + return DRM_GPU_SCHED_STAT_NOMINAL; dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", js, @@ -455,11 +456,13 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) /* Scheduler is already stopped, nothing to do. */ if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) - return; + return DRM_GPU_SCHED_STAT_NOMINAL; /* Schedule a reset if there's no reset in progress. */ if (!atomic_xchg(&pfdev->reset.pending, 1)) schedule_work(&pfdev->reset.work); + + return DRM_GPU_SCHED_STAT_NOMINAL; } static const struct drm_sched_backend_ops panfrost_sched_ops = { @@ -624,7 +627,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) ret = drm_sched_init(&js->queue[j].sched, &panfrost_sched_ops, 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), - "pan_js"); + NULL, "pan_js"); if (ret) { dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); goto err_sched; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 7c1b3481b785..0581186ebfb3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, } bo->base.pages = pages; bo->base.pages_use_count = 1; - } else + } else { pages = bo->base.pages; + if (pages[page_offset]) { + /* Pages are already mapped, bail out. */ + mutex_unlock(&bo->base.pages_lock); + goto out; + } + } mapping = bo->base.base.filp->f_mapping; mapping_set_unevictable(mapping); @@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); +out: panfrost_gem_mapping_put(bomapping); return 0; @@ -571,32 +578,32 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) { struct panfrost_device *pfdev = data; u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); - int i, ret; + int ret; - for (i = 0; status; i++) { - u32 mask = BIT(i) | BIT(i + 16); + while (status) { + u32 as = ffs(status | (status >> 16)) - 1; + u32 mask = BIT(as) | BIT(as + 16); u64 addr; u32 fault_status; u32 exception_type; u32 access_type; u32 source_id; - if (!(status & mask)) - continue; - - fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i)); - addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i)); - addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32; + fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as)); + addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as)); + addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32; /* decode the fault status */ exception_type = fault_status & 0xFF; access_type = (fault_status >> 8) & 0x3; source_id = (fault_status >> 16); + mmu_write(pfdev, MMU_INT_CLEAR, mask); + /* Page fault only */ ret = -1; - if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0) - ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); + if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0) + ret = panfrost_mmu_map_fault_addr(pfdev, as, addr); if (ret) /* terminal fault, print info about the fault */ @@ -608,7 +615,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) "exception type 0x%X: %s\n" "access type 0x%X: %s\n" "source id 0x%X\n", - i, addr, + as, addr, "TODO", fault_status, (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), @@ -616,9 +623,11 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) access_type, access_type_name(pfdev, fault_status), source_id); - mmu_write(pfdev, MMU_INT_CLEAR, mask); - status &= ~mask; + + /* If we received new MMU interrupts, process them before returning. */ + if (!status) + status = mmu_read(pfdev, MMU_INT_RAWSTAT); } mmu_write(pfdev, MMU_INT_MASK, ~0); diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 69c02e7c82b7..6fd7f13f1aca 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -17,8 +17,8 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_vblank.h> #include "pl111_drm.h" @@ -440,7 +440,7 @@ static struct drm_simple_display_pipe_funcs pl111_display_funcs = { .enable = pl111_display_enable, .disable = pl111_display_disable, .update = pl111_display_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index e4dcaef6c143..fa0a737e9dea 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -320,7 +320,7 @@ dev_put: return ret; } -static int pl111_amba_remove(struct amba_device *amba_dev) +static void pl111_amba_remove(struct amba_device *amba_dev) { struct device *dev = &amba_dev->dev; struct drm_device *drm = amba_get_drvdata(amba_dev); @@ -331,8 +331,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev) drm_panel_bridge_remove(priv->bridge); drm_dev_put(drm); of_reserved_mem_device_release(dev); - - return 0; } /* diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 54e3c3a97440..7b00c955cd82 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -254,6 +254,7 @@ int qxl_garbage_collect(struct qxl_device *qdev) } } + wake_up_all(&qdev->release_event); DRM_DEBUG_DRIVER("%d\n", i); return i; @@ -268,7 +269,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev, int ret; ret = qxl_bo_create(qdev, size, false /* not kernel - device */, - false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); + false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo); if (ret) { DRM_ERROR("failed to allocate VRAM BO\n"); return ret; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 012bce0cdb65..a7637e79cb42 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -328,6 +328,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, head.id = i; head.flags = 0; + head.surface_id = 0; oldcount = qdev->monitors_config->count; if (crtc->state->active) { struct drm_display_mode *mode = &crtc->mode; @@ -463,25 +464,26 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { }; static int qxl_primary_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct qxl_device *qdev = to_qxl(plane->dev); struct qxl_bo *bo; - if (!state->crtc || !state->fb) + if (!new_plane_state->crtc || !new_plane_state->fb) return 0; - bo = gem_to_qxl_bo(state->fb->obj[0]); + bo = gem_to_qxl_bo(new_plane_state->fb->obj[0]); return qxl_check_framebuffer(qdev, bo); } -static int qxl_primary_apply_cursor(struct drm_plane *plane) +static int qxl_primary_apply_cursor(struct qxl_device *qdev, + struct drm_plane_state *plane_state) { - struct drm_device *dev = plane->dev; - struct qxl_device *qdev = to_qxl(dev); - struct drm_framebuffer *fb = plane->state->fb; - struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); + struct drm_framebuffer *fb = plane_state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); struct qxl_cursor_cmd *cmd; struct qxl_release *release; int ret = 0; @@ -505,8 +507,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane) cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; - cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x; - cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y; + cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x; + cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); @@ -523,17 +525,126 @@ out_free_release: return ret; } +static int qxl_primary_move_cursor(struct qxl_device *qdev, + struct drm_plane_state *plane_state) +{ + struct drm_framebuffer *fb = plane_state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); + struct qxl_cursor_cmd *cmd; + struct qxl_release *release; + int ret = 0; + + if (!qcrtc->cursor_bo) + return 0; + + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), + QXL_RELEASE_CURSOR_CMD, + &release, NULL); + if (ret) + return ret; + + ret = qxl_release_reserve_list(release, true); + if (ret) { + qxl_release_free(qdev, release); + return ret; + } + + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_CURSOR_MOVE; + cmd->u.position.x = plane_state->crtc_x + fb->hot_x; + cmd->u.position.y = plane_state->crtc_y + fb->hot_y; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + return ret; +} + +static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev, + struct qxl_bo *user_bo, + int hot_x, int hot_y) +{ + static const u32 size = 64 * 64 * 4; + struct qxl_bo *cursor_bo; + struct dma_buf_map cursor_map; + struct dma_buf_map user_map; + struct qxl_cursor cursor; + int ret; + + if (!user_bo) + return NULL; + + ret = qxl_bo_create(qdev, sizeof(struct qxl_cursor) + size, + false, true, QXL_GEM_DOMAIN_VRAM, 1, + NULL, &cursor_bo); + if (ret) + goto err; + + ret = qxl_bo_vmap(cursor_bo, &cursor_map); + if (ret) + goto err_unref; + + ret = qxl_bo_vmap(user_bo, &user_map); + if (ret) + goto err_unmap; + + cursor.header.unique = 0; + cursor.header.type = SPICE_CURSOR_TYPE_ALPHA; + cursor.header.width = 64; + cursor.header.height = 64; + cursor.header.hot_spot_x = hot_x; + cursor.header.hot_spot_y = hot_y; + cursor.data_size = size; + cursor.chunk.next_chunk = 0; + cursor.chunk.prev_chunk = 0; + cursor.chunk.data_size = size; + if (cursor_map.is_iomem) { + memcpy_toio(cursor_map.vaddr_iomem, + &cursor, sizeof(cursor)); + memcpy_toio(cursor_map.vaddr_iomem + sizeof(cursor), + user_map.vaddr, size); + } else { + memcpy(cursor_map.vaddr, + &cursor, sizeof(cursor)); + memcpy(cursor_map.vaddr + sizeof(cursor), + user_map.vaddr, size); + } + + qxl_bo_vunmap(user_bo); + qxl_bo_vunmap(cursor_bo); + return cursor_bo; + +err_unmap: + qxl_bo_vunmap(cursor_bo); +err_unref: + qxl_bo_unpin(cursor_bo); + qxl_bo_unref(&cursor_bo); +err: + return NULL; +} + +static void qxl_free_cursor(struct qxl_bo *cursor_bo) +{ + if (!cursor_bo) + return; + + qxl_bo_unpin(cursor_bo); + qxl_bo_unref(&cursor_bo); +} + static void qxl_primary_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct qxl_device *qdev = to_qxl(plane->dev); - struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]); + struct qxl_bo *bo = gem_to_qxl_bo(new_state->fb->obj[0]); struct qxl_bo *primary; struct drm_clip_rect norect = { .x1 = 0, .y1 = 0, - .x2 = plane->state->fb->width, - .y2 = plane->state->fb->height + .x2 = new_state->fb->width, + .y2 = new_state->fb->height }; uint32_t dumb_shadow_offset = 0; @@ -543,25 +654,29 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, if (qdev->primary_bo) qxl_io_destroy_primary(qdev); qxl_io_create_primary(qdev, primary); - qxl_primary_apply_cursor(plane); + qxl_primary_apply_cursor(qdev, plane->state); } if (bo->is_dumb) dumb_shadow_offset = - qdev->dumb_heads[plane->state->crtc->index].x; + qdev->dumb_heads[new_state->crtc->index].x; - qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1, + qxl_draw_dirty_fb(qdev, new_state->fb, bo, 0, 0, &norect, 1, 1, dumb_shadow_offset); } static void qxl_primary_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct qxl_device *qdev = to_qxl(plane->dev); if (old_state->fb) { struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]); + if (bo->shadow) + bo = bo->shadow; if (bo->is_primary) { qxl_io_destroy_primary(qdev); bo->is_primary = false; @@ -570,126 +685,29 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, } static void qxl_cursor_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_device *dev = plane->dev; - struct qxl_device *qdev = to_qxl(dev); - struct drm_framebuffer *fb = plane->state->fb; - struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); - struct qxl_release *release; - struct qxl_cursor_cmd *cmd; - struct qxl_cursor *cursor; - struct drm_gem_object *obj; - struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; - int ret; - struct dma_buf_map user_map; - struct dma_buf_map cursor_map; - void *user_ptr; - int size = 64*64*4; - - ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), - QXL_RELEASE_CURSOR_CMD, - &release, NULL); - if (ret) - return; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct qxl_device *qdev = to_qxl(plane->dev); + struct drm_framebuffer *fb = new_state->fb; if (fb != old_state->fb) { - obj = fb->obj[0]; - user_bo = gem_to_qxl_bo(obj); - - /* pinning is done in the prepare/cleanup framevbuffer */ - ret = qxl_bo_kmap(user_bo, &user_map); - if (ret) - goto out_free_release; - user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */ - - ret = qxl_alloc_bo_reserved(qdev, release, - sizeof(struct qxl_cursor) + size, - &cursor_bo); - if (ret) - goto out_kunmap; - - ret = qxl_bo_pin(cursor_bo); - if (ret) - goto out_free_bo; - - ret = qxl_release_reserve_list(release, true); - if (ret) - goto out_unpin; - - ret = qxl_bo_kmap(cursor_bo, &cursor_map); - if (ret) - goto out_backoff; - if (cursor_map.is_iomem) /* TODO: Use mapping abstraction properly */ - cursor = (struct qxl_cursor __force *)cursor_map.vaddr_iomem; - else - cursor = (struct qxl_cursor *)cursor_map.vaddr; - - cursor->header.unique = 0; - cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; - cursor->header.width = 64; - cursor->header.height = 64; - cursor->header.hot_spot_x = fb->hot_x; - cursor->header.hot_spot_y = fb->hot_y; - cursor->data_size = size; - cursor->chunk.next_chunk = 0; - cursor->chunk.prev_chunk = 0; - cursor->chunk.data_size = size; - memcpy(cursor->chunk.data, user_ptr, size); - qxl_bo_kunmap(cursor_bo); - qxl_bo_kunmap(user_bo); - - cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); - cmd->u.set.visible = 1; - cmd->u.set.shape = qxl_bo_physical_address(qdev, - cursor_bo, 0); - cmd->type = QXL_CURSOR_SET; - - old_cursor_bo = qcrtc->cursor_bo; - qcrtc->cursor_bo = cursor_bo; - cursor_bo = NULL; + qxl_primary_apply_cursor(qdev, new_state); } else { - - ret = qxl_release_reserve_list(release, true); - if (ret) - goto out_free_release; - - cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release); - cmd->type = QXL_CURSOR_MOVE; + qxl_primary_move_cursor(qdev, new_state); } - - cmd->u.position.x = plane->state->crtc_x + fb->hot_x; - cmd->u.position.y = plane->state->crtc_y + fb->hot_y; - - qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_release_fence_buffer_objects(release); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); - - if (old_cursor_bo != NULL) - qxl_bo_unpin(old_cursor_bo); - qxl_bo_unref(&old_cursor_bo); - qxl_bo_unref(&cursor_bo); - - return; - -out_backoff: - qxl_release_backoff_reserve_list(release); -out_unpin: - qxl_bo_unpin(cursor_bo); -out_free_bo: - qxl_bo_unref(&cursor_bo); -out_kunmap: - qxl_bo_kunmap(user_bo); -out_free_release: - qxl_release_free(qdev, release); - return; - } static void qxl_cursor_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct qxl_device *qdev = to_qxl(plane->dev); + struct qxl_crtc *qcrtc; struct qxl_release *release; struct qxl_cursor_cmd *cmd; int ret; @@ -712,6 +730,10 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, qxl_release_fence_buffer_objects(release); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + + qcrtc = to_qxl_crtc(old_state->crtc); + qxl_free_cursor(qcrtc->cursor_bo); + qcrtc->cursor_bo = NULL; } static void qxl_update_dumb_head(struct qxl_device *qdev, @@ -769,13 +791,45 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev, DRM_DEBUG("%dx%d\n", surf->width, surf->height); } +static void qxl_prepare_shadow(struct qxl_device *qdev, struct qxl_bo *user_bo, + int crtc_index) +{ + struct qxl_surface surf; + + qxl_update_dumb_head(qdev, crtc_index, + user_bo); + qxl_calc_dumb_shadow(qdev, &surf); + if (!qdev->dumb_shadow_bo || + qdev->dumb_shadow_bo->surf.width != surf.width || + qdev->dumb_shadow_bo->surf.height != surf.height) { + if (qdev->dumb_shadow_bo) { + drm_gem_object_put + (&qdev->dumb_shadow_bo->tbo.base); + qdev->dumb_shadow_bo = NULL; + } + qxl_bo_create(qdev, surf.height * surf.stride, + true, true, QXL_GEM_DOMAIN_SURFACE, 0, + &surf, &qdev->dumb_shadow_bo); + } + if (user_bo->shadow != qdev->dumb_shadow_bo) { + if (user_bo->shadow) { + qxl_bo_unpin(user_bo->shadow); + drm_gem_object_put + (&user_bo->shadow->tbo.base); + user_bo->shadow = NULL; + } + drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base); + user_bo->shadow = qdev->dumb_shadow_bo; + qxl_bo_pin(user_bo->shadow); + } +} + static int qxl_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct qxl_device *qdev = to_qxl(plane->dev); struct drm_gem_object *obj; struct qxl_bo *user_bo; - struct qxl_surface surf; if (!new_state->fb) return 0; @@ -785,30 +839,18 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, if (plane->type == DRM_PLANE_TYPE_PRIMARY && user_bo->is_dumb) { - qxl_update_dumb_head(qdev, new_state->crtc->index, - user_bo); - qxl_calc_dumb_shadow(qdev, &surf); - if (!qdev->dumb_shadow_bo || - qdev->dumb_shadow_bo->surf.width != surf.width || - qdev->dumb_shadow_bo->surf.height != surf.height) { - if (qdev->dumb_shadow_bo) { - drm_gem_object_put - (&qdev->dumb_shadow_bo->tbo.base); - qdev->dumb_shadow_bo = NULL; - } - qxl_bo_create(qdev, surf.height * surf.stride, - true, true, QXL_GEM_DOMAIN_SURFACE, &surf, - &qdev->dumb_shadow_bo); - } - if (user_bo->shadow != qdev->dumb_shadow_bo) { - if (user_bo->shadow) { - drm_gem_object_put - (&user_bo->shadow->tbo.base); - user_bo->shadow = NULL; - } - drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base); - user_bo->shadow = qdev->dumb_shadow_bo; - } + qxl_prepare_shadow(qdev, user_bo, new_state->crtc->index); + } + + if (plane->type == DRM_PLANE_TYPE_CURSOR && + plane->state->fb != new_state->fb) { + struct qxl_crtc *qcrtc = to_qxl_crtc(new_state->crtc); + struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo; + + qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo, + new_state->fb->hot_x, + new_state->fb->hot_y); + qxl_free_cursor(old_cursor_bo); } return qxl_bo_pin(user_bo); @@ -833,6 +875,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, qxl_bo_unpin(user_bo); if (old_state->fb != plane->state->fb && user_bo->shadow) { + qxl_bo_unpin(user_bo->shadow); drm_gem_object_put(&user_bo->shadow->tbo.base); user_bo->shadow = NULL; } @@ -1154,12 +1197,10 @@ int qxl_create_monitors_object(struct qxl_device *qdev) } qdev->monitors_config_bo = gem_to_qxl_bo(gobj); - ret = qxl_bo_pin(qdev->monitors_config_bo); + ret = qxl_bo_vmap(qdev->monitors_config_bo, &map); if (ret) return ret; - qxl_bo_kmap(qdev->monitors_config_bo, &map); - qdev->monitors_config = qdev->monitors_config_bo->kptr; qdev->ram_header->monitors_config = qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0); @@ -1178,11 +1219,13 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev) { int ret; + if (!qdev->monitors_config_bo) + return 0; + qdev->monitors_config = NULL; qdev->ram_header->monitors_config = 0; - qxl_bo_kunmap(qdev->monitors_config_bo); - ret = qxl_bo_unpin(qdev->monitors_config_bo); + ret = qxl_bo_vunmap(qdev->monitors_config_bo); if (ret) return ret; @@ -1195,7 +1238,9 @@ int qxl_modeset_init(struct qxl_device *qdev) int i; int ret; - drm_mode_config_init(&qdev->ddev); + ret = drmm_mode_config_init(&qdev->ddev); + if (ret) + return ret; ret = qxl_create_monitors_object(qdev); if (ret) @@ -1227,6 +1272,10 @@ int qxl_modeset_init(struct qxl_device *qdev) void qxl_modeset_fini(struct qxl_device *qdev) { + if (qdev->dumb_shadow_bo) { + qxl_bo_unpin(qdev->dumb_shadow_bo); + drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base); + qdev->dumb_shadow_bo = NULL; + } qxl_destroy_monitors_object(qdev); - drm_mode_config_cleanup(&qdev->ddev); } diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 7b7acb910780..7d27891e87fa 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -48,7 +48,7 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, struct qxl_clip_rects *dev_clips; int ret; - ret = qxl_bo_kmap(clips_bo, &map); + ret = qxl_bo_vmap_locked(clips_bo, &map); if (ret) return NULL; dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */ @@ -202,7 +202,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, if (ret) goto out_release_backoff; - ret = qxl_bo_kmap(bo, &surface_map); + ret = qxl_bo_vmap_locked(bo, &surface_map); if (ret) goto out_release_backoff; surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */ @@ -210,7 +210,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, ret = qxl_image_init(qdev, release, dimage, surface_base, left - dumb_shadow_offset, top, width, height, depth, stride); - qxl_bo_kunmap(bo); + qxl_bo_vunmap_locked(bo); if (ret) goto out_release_backoff; @@ -247,7 +247,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, rects[i].top = clips_ptr->y1; rects[i].bottom = clips_ptr->y2; } - qxl_bo_kunmap(clips_bo); + qxl_bo_vunmap_locked(clips_bo); qxl_release_fence_buffer_objects(release); qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index fb5f6a5e81d7..1864467f1063 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -141,7 +141,7 @@ static void qxl_drm_release(struct drm_device *dev) /* * TODO: qxl_device_fini() call should be in qxl_pci_remove(), - * reodering qxl_modeset_fini() + qxl_device_fini() calls is + * reordering qxl_modeset_fini() + qxl_device_fini() calls is * non-trivial though. */ qxl_modeset_fini(qdev); diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 83b54f0dad61..6dd57cfb2e7c 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -125,7 +125,7 @@ struct qxl_output { #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) struct qxl_mman { - struct ttm_bo_device bdev; + struct ttm_device bdev; }; struct qxl_memslot { @@ -214,6 +214,8 @@ struct qxl_device { spinlock_t release_lock; struct idr release_idr; uint32_t release_seqno; + atomic_t release_count; + wait_queue_head_t release_event; spinlock_t release_idr_lock; struct mutex async_io_mutex; unsigned int last_sent_io_cmd; @@ -335,7 +337,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp, /* qxl ttm */ int qxl_ttm_init(struct qxl_device *qdev); void qxl_ttm_fini(struct qxl_device *qdev); -int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, +int qxl_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem); /* qxl image */ diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index c04cd5a2553c..48a58ba1db96 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -59,7 +59,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, surf.stride = pitch; surf.format = format; r = qxl_gem_object_create_with_handle(qdev, file_priv, - QXL_GEM_DOMAIN_SURFACE, + QXL_GEM_DOMAIN_CPU, args->size, &surf, &qobj, &handle); if (r) diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index 48e096285b4c..a08da0bd9098 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, /* At least align on page size */ if (alignment < PAGE_SIZE) alignment = PAGE_SIZE; - r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); + r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo); if (r) { if (r != -ERESTARTSYS) DRM_ERROR( diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index 60ab7151b84d..ffff54e5fb31 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -186,7 +186,7 @@ qxl_image_init_helper(struct qxl_device *qdev, } } } - qxl_bo_kunmap(chunk_bo); + qxl_bo_vunmap_locked(chunk_bo); image_bo = dimage->bo; ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index ddf6588a2a38..d312322cacd1 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -87,6 +87,7 @@ int qxl_irq_init(struct qxl_device *qdev) init_waitqueue_head(&qdev->display_event); init_waitqueue_head(&qdev->cursor_event); init_waitqueue_head(&qdev->io_cmd_event); + init_waitqueue_head(&qdev->release_event); INIT_WORK(&qdev->client_monitors_config_work, qxl_client_monitors_config_work_func); atomic_set(&qdev->irq_received, 0); diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 4a60a52ab62e..4dc5ad13f12c 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -286,11 +286,35 @@ vram_mapping_free: void qxl_device_fini(struct qxl_device *qdev) { - qxl_bo_unref(&qdev->current_release_bo[0]); - qxl_bo_unref(&qdev->current_release_bo[1]); + int cur_idx; + + /* check if qxl_device_init() was successful (gc_work is initialized last) */ + if (!qdev->gc_work.func) + return; + + for (cur_idx = 0; cur_idx < 3; cur_idx++) { + if (!qdev->current_release_bo[cur_idx]) + continue; + qxl_bo_unpin(qdev->current_release_bo[cur_idx]); + qxl_bo_unref(&qdev->current_release_bo[cur_idx]); + qdev->current_release_bo_offset[cur_idx] = 0; + qdev->current_release_bo[cur_idx] = NULL; + } + + /* + * Ask host to release resources (+fill release ring), + * then wait for the release actually happening. + */ + qxl_io_notify_oom(qdev); + wait_event_timeout(qdev->release_event, + atomic_read(&qdev->release_count) == 0, + HZ); + flush_work(&qdev->gc_work); + qxl_surf_evict(qdev); + qxl_vram_evict(qdev); + qxl_gem_fini(qdev); qxl_bo_fini(qdev); - flush_work(&qdev->gc_work); qxl_ring_free(qdev->command_ring); qxl_ring_free(qdev->cursor_ring); qxl_ring_free(qdev->release_ring); diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index ceebc5881f68..6e26d70f2f07 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -29,6 +29,9 @@ #include "qxl_drv.h" #include "qxl_object.h" +static int __qxl_bo_pin(struct qxl_bo *bo); +static void __qxl_bo_unpin(struct qxl_bo *bo); + static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct qxl_bo *bo; @@ -103,8 +106,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = { .print_info = drm_gem_ttm_print_info, }; -int qxl_bo_create(struct qxl_device *qdev, - unsigned long size, bool kernel, bool pinned, u32 domain, +int qxl_bo_create(struct qxl_device *qdev, unsigned long size, + bool kernel, bool pinned, u32 domain, u32 priority, struct qxl_surface *surf, struct qxl_bo **bo_ptr) { @@ -137,9 +140,10 @@ int qxl_bo_create(struct qxl_device *qdev, qxl_ttm_placement_from_domain(bo, domain); + bo->tbo.priority = priority; r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, 0, &ctx, size, - NULL, NULL, &qxl_ttm_bo_destroy); + &bo->placement, 0, &ctx, NULL, NULL, + &qxl_ttm_bo_destroy); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(qdev->ddev.dev, @@ -154,10 +158,12 @@ int qxl_bo_create(struct qxl_device *qdev, return 0; } -int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map) +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map) { int r; + dma_resv_assert_held(bo->tbo.base.resv); + if (bo->kptr) { bo->map_count++; goto out; @@ -178,6 +184,25 @@ out: return 0; } +int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map) +{ + int r; + + r = qxl_bo_reserve(bo); + if (r) + return r; + + r = __qxl_bo_pin(bo); + if (r) { + qxl_bo_unreserve(bo); + return r; + } + + r = qxl_bo_vmap_locked(bo, map); + qxl_bo_unreserve(bo); + return r; +} + void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) { @@ -202,7 +227,7 @@ fallback: return rptr; } - ret = qxl_bo_kmap(bo, &bo_map); + ret = qxl_bo_vmap_locked(bo, &bo_map); if (ret) return NULL; rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */ @@ -211,8 +236,10 @@ fallback: return rptr; } -void qxl_bo_kunmap(struct qxl_bo *bo) +void qxl_bo_vunmap_locked(struct qxl_bo *bo) { + dma_resv_assert_held(bo->tbo.base.resv); + if (bo->kptr == NULL) return; bo->map_count--; @@ -222,6 +249,20 @@ void qxl_bo_kunmap(struct qxl_bo *bo) ttm_bo_vunmap(&bo->tbo, &bo->map); } +int qxl_bo_vunmap(struct qxl_bo *bo) +{ + int r; + + r = qxl_bo_reserve(bo); + if (r) + return r; + + qxl_bo_vunmap_locked(bo); + __qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + return 0; +} + void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *pmap) { @@ -232,7 +273,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, io_mapping_unmap_atomic(pmap); return; fallback: - qxl_bo_kunmap(bo); + qxl_bo_vunmap_locked(bo); } void qxl_bo_unref(struct qxl_bo **bo) diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index e60a8f88e226..ee9c29de4d3d 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -61,10 +61,13 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) extern int qxl_bo_create(struct qxl_device *qdev, unsigned long size, bool kernel, bool pinned, u32 domain, + u32 priority, struct qxl_surface *surf, struct qxl_bo **bo_ptr); -extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map); -extern void qxl_bo_kunmap(struct qxl_bo *bo); +int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map); +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map); +int qxl_bo_vunmap(struct qxl_bo *bo); +void qxl_bo_vunmap_locked(struct qxl_bo *bo); void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 4aa949799446..0628d1cc91fe 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) struct qxl_bo *bo = gem_to_qxl_bo(obj); int ret; - ret = qxl_bo_kmap(bo, map); + ret = qxl_bo_vmap(bo, map); if (ret < 0) return ret; @@ -71,7 +71,7 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj, { struct qxl_bo *bo = gem_to_qxl_bo(obj); - qxl_bo_kunmap(bo); + qxl_bo_vunmap(bo); } int qxl_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 0fcfc952d5e9..f5845c96d414 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -58,56 +58,16 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct qxl_device *qdev; - struct qxl_release *release; - int count = 0, sc = 0; - bool have_drawable_releases; unsigned long cur, end = jiffies + timeout; qdev = container_of(fence->lock, struct qxl_device, release_lock); - release = container_of(fence, struct qxl_release, base); - have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE; - -retry: - sc++; - - if (dma_fence_is_signaled(fence)) - goto signaled; - - qxl_io_notify_oom(qdev); - - for (count = 0; count < 11; count++) { - if (!qxl_queue_garbage_collect(qdev, true)) - break; - - if (dma_fence_is_signaled(fence)) - goto signaled; - } - - if (dma_fence_is_signaled(fence)) - goto signaled; - - if (have_drawable_releases || sc < 4) { - if (sc > 2) - /* back off */ - usleep_range(500, 1000); - if (time_after(jiffies, end)) - return 0; - - if (have_drawable_releases && sc > 300) { - DMA_FENCE_WARN(fence, "failed to wait on release %llu " - "after spincount %d\n", - fence->context & ~0xf0000000, sc); - goto signaled; - } - goto retry; - } - /* - * yeah, original sync_obj_wait gave up after 3 spins when - * have_drawable_releases is not set. - */ + if (!wait_event_timeout(qdev->release_event, + (dma_fence_is_signaled(fence) || + (qxl_io_notify_oom(qdev), 0)), + timeout)) + return 0; -signaled: cur = jiffies; if (time_after(cur, end)) return 0; @@ -196,14 +156,16 @@ qxl_release_free(struct qxl_device *qdev, qxl_release_free_list(release); kfree(release); } + atomic_dec(&qdev->release_count); } static int qxl_release_bo_alloc(struct qxl_device *qdev, - struct qxl_bo **bo) + struct qxl_bo **bo, + u32 priority) { /* pin releases bo's they are too messy to evict */ return qxl_bo_create(qdev, PAGE_SIZE, false, true, - QXL_GEM_DOMAIN_VRAM, NULL, bo); + QXL_GEM_DOMAIN_VRAM, priority, NULL, bo); } int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) @@ -321,18 +283,23 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, int type, struct qxl_release **release, struct qxl_bo **rbo) { - struct qxl_bo *bo; + struct qxl_bo *bo, *free_bo = NULL; int idr_ret; int ret = 0; union qxl_release_info *info; int cur_idx; + u32 priority; - if (type == QXL_RELEASE_DRAWABLE) + if (type == QXL_RELEASE_DRAWABLE) { cur_idx = 0; - else if (type == QXL_RELEASE_SURFACE_CMD) + priority = 0; + } else if (type == QXL_RELEASE_SURFACE_CMD) { cur_idx = 1; - else if (type == QXL_RELEASE_CURSOR_CMD) + priority = 1; + } else if (type == QXL_RELEASE_CURSOR_CMD) { cur_idx = 2; + priority = 1; + } else { DRM_ERROR("got illegal type: %d\n", type); return -EINVAL; @@ -344,17 +311,22 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, *rbo = NULL; return idr_ret; } + atomic_inc(&qdev->release_count); mutex_lock(&qdev->release_mutex); if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { - qxl_bo_unref(&qdev->current_release_bo[cur_idx]); + free_bo = qdev->current_release_bo[cur_idx]; qdev->current_release_bo_offset[cur_idx] = 0; qdev->current_release_bo[cur_idx] = NULL; } if (!qdev->current_release_bo[cur_idx]) { - ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); + ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority); if (ret) { mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } qxl_release_free(qdev, *release); return ret; } @@ -370,6 +342,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, *rbo = bo; mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } ret = qxl_release_list_add(*release, bo); qxl_bo_unref(&bo); @@ -429,7 +405,7 @@ void qxl_release_unmap(struct qxl_device *qdev, void qxl_release_fence_buffer_objects(struct qxl_release *release) { struct ttm_buffer_object *bo; - struct ttm_bo_device *bdev; + struct ttm_device *bdev; struct ttm_validate_buffer *entry; struct qxl_device *qdev; @@ -450,7 +426,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) release->id | 0xf0000000, release->base.seqno); trace_dma_fence_emit(&release->base); - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; @@ -459,7 +435,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); ww_acquire_fini(&release->ticket); } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 33c09dc94f8b..b7f77eb685cb 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -36,7 +36,7 @@ #include "qxl_drv.h" #include "qxl_object.h" -static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) +static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev) { struct qxl_mman *mman; struct qxl_device *qdev; @@ -69,7 +69,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, *placement = qbo->placement; } -int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, +int qxl_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct qxl_device *qdev = qxl_get_qdev(bdev); @@ -98,8 +98,7 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, /* * TTM backend functions. */ -static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev, - struct ttm_tt *ttm) +static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { ttm_tt_destroy_common(bdev, ttm); ttm_tt_fini(ttm); @@ -170,7 +169,7 @@ static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo) qxl_bo_move_notify(bo, false, NULL); } -static struct ttm_bo_driver qxl_bo_driver = { +static struct ttm_device_funcs qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, .ttm_tt_destroy = &qxl_ttm_backend_destroy, .eviction_valuable = ttm_bo_eviction_valuable, @@ -193,10 +192,10 @@ int qxl_ttm_init(struct qxl_device *qdev) int num_io_pages; /* != rom->num_io_pages, we include surface0 */ /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, - qdev->ddev.anon_inode->i_mapping, - qdev->ddev.vma_offset_manager, - false, false); + r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, + qdev->ddev.anon_inode->i_mapping, + qdev->ddev.vma_offset_manager, + false, false); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; @@ -227,7 +226,7 @@ void qxl_ttm_fini(struct qxl_device *qdev) { ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM); ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV); - ttm_bo_device_release(&qdev->mman.bdev); + ttm_device_fini(&qdev->mman.bdev); DRM_INFO("qxl: ttm finalized\n"); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 1979ed3d6547..c94e429e75f9 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1157,7 +1157,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1267,8 +1266,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } @@ -1478,7 +1477,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; - struct drm_format_name_buf format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1579,8 +1577,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, #endif break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->format->format, &format_name)); + DRM_ERROR("Unsupported screen format %p4cc\n", + &target_fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 6fd178eafb63..42281fce552e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -451,7 +451,7 @@ struct radeon_surface_reg { * TTM. */ struct radeon_mman { - struct ttm_bo_device bdev; + struct ttm_device bdev; bool initialized; }; @@ -2805,7 +2805,7 @@ extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, uint32_t flags); extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm); extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm); -bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +bool radeon_ttm_tt_is_bound(struct ttm_device *bdev, struct ttm_tt *ttm); extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); @@ -2815,7 +2815,7 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size extern void radeon_program_register_sequence(struct radeon_device *rdev, const u32 *registers, const u32 array_size); -struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev); +struct radeon_device *radeon_get_rdev(struct ttm_device *bdev); /* KMS */ diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 500796dc5d74..33121655d50b 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -205,7 +205,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } @@ -218,7 +218,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 9b81786782de..804f7a427be7 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -159,7 +159,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct radeon_bo *bo; enum ttm_bo_type type; unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; - size_t acc_size; int r; size = ALIGN(size, PAGE_SIZE); @@ -173,9 +172,6 @@ int radeon_bo_create(struct radeon_device *rdev, } *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, - sizeof(struct radeon_bo)); - bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -230,8 +226,8 @@ int radeon_bo_create(struct radeon_device *rdev, /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, page_align, !kernel, acc_size, - sg, resv, &radeon_ttm_bo_destroy); + &bo->placement, page_align, !kernel, sg, resv, + &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { return r; @@ -372,7 +368,7 @@ void radeon_bo_unpin(struct radeon_bo *bo) int radeon_bo_evict_vram(struct radeon_device *rdev) { - struct ttm_bo_device *bdev = &rdev->mman.bdev; + struct ttm_device *bdev = &rdev->mman.bdev; struct ttm_resource_manager *man; /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index eb7a1c68459f..476ce9c24b9f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -53,13 +53,11 @@ static void radeon_ttm_debugfs_init(struct radeon_device *rdev); -static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, - struct ttm_tt *ttm, +static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); -static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, - struct ttm_tt *ttm); +static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); -struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) +struct radeon_device *radeon_get_rdev(struct ttm_device *bdev) { struct radeon_mman *mman; struct radeon_device *rdev; @@ -278,7 +276,7 @@ out: return 0; } -static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) +static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct radeon_device *rdev = radeon_get_rdev(bdev); size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; @@ -345,7 +343,7 @@ struct radeon_ttm_tt { }; /* prepare the sg table with the user pages */ -static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) { struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_ttm_tt *gtt = (void *)ttm; @@ -406,7 +404,7 @@ release_pages: return r; } -static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) { struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_ttm_tt *gtt = (void *)ttm; @@ -442,7 +440,7 @@ static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm) return (gtt->bound); } -static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, +static int radeon_ttm_backend_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem) { @@ -478,7 +476,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, return 0; } -static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_device *rdev = radeon_get_rdev(bdev); @@ -493,7 +491,7 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt gtt->bound = false; } -static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; @@ -552,7 +550,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, return container_of(ttm, struct radeon_ttm_tt, ttm); } -static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, +static int radeon_ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { @@ -578,7 +576,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx); } -static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); @@ -611,7 +609,7 @@ int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, return 0; } -bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, +bool radeon_ttm_tt_is_bound(struct ttm_device *bdev, struct ttm_tt *ttm) { #if IS_ENABLED(CONFIG_AGP) @@ -622,7 +620,7 @@ bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, return radeon_ttm_backend_is_bound(ttm); } -static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, +static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem) { @@ -640,7 +638,7 @@ static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, return radeon_ttm_backend_bind(bdev, ttm, bo_mem); } -static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, +static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { #if IS_ENABLED(CONFIG_AGP) @@ -654,7 +652,7 @@ static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, radeon_ttm_backend_unbind(bdev, ttm); } -static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev, +static void radeon_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { #if IS_ENABLED(CONFIG_AGP) @@ -698,7 +696,7 @@ radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo) radeon_bo_move_notify(bo, false, NULL); } -static struct ttm_bo_driver radeon_bo_driver = { +static struct ttm_device_funcs radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, @@ -716,7 +714,7 @@ int radeon_ttm_init(struct radeon_device *rdev) int r; /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, + r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, rdev->ddev->anon_inode->i_mapping, rdev->ddev->vma_offset_manager, rdev->need_swiotlb, @@ -783,7 +781,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) } ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM); ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT); - ttm_bo_device_release(&rdev->mman.bdev); + ttm_device_fini(&rdev->mman.bdev); radeon_gart_fini(rdev); rdev->mman.initialized = false; DRM_INFO("radeon: ttm finalized\n"); @@ -832,7 +830,7 @@ unlock_mclk: return ret; } -static struct vm_operations_struct radeon_ttm_vm_ops = { +static const struct vm_operations_struct radeon_ttm_vm_ops = { .fault = radeon_ttm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 02e5f11f38eb..862197be1e01 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -607,21 +607,26 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane, } static int rcar_du_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct rcar_du_plane_state *rstate = to_rcar_plane_state(state); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct rcar_du_plane_state *rstate = to_rcar_plane_state(new_plane_state); - return __rcar_du_plane_atomic_check(plane, state, &rstate->format); + return __rcar_du_plane_atomic_check(plane, new_plane_state, + &rstate->format); } static void rcar_du_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct rcar_du_plane *rplane = to_rcar_plane(plane); struct rcar_du_plane_state *old_rstate; struct rcar_du_plane_state *new_rstate; - if (!plane->state->visible) + if (!new_state->visible) return; rcar_du_plane_setup(rplane); @@ -635,7 +640,7 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane, * bit. We thus need to restart the group if the source changes. */ old_rstate = to_rcar_plane_state(old_state); - new_rstate = to_rcar_plane_state(plane->state); + new_rstate = to_rcar_plane_state(new_state); if ((old_rstate->source == RCAR_DU_PLANE_MEMORY) != (new_rstate->source == RCAR_DU_PLANE_MEMORY)) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 53221d8473c1..23e41c83c875 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -7,12 +7,13 @@ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> @@ -236,7 +237,7 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, if (ret < 0) return ret; - return drm_gem_fb_prepare_fb(plane, state); + return drm_gem_plane_helper_prepare_fb(plane, state); } void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, @@ -265,20 +266,25 @@ static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane, } static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(new_plane_state); - return __rcar_du_plane_atomic_check(plane, state, &rstate->format); + return __rcar_du_plane_atomic_check(plane, new_plane_state, + &rstate->format); } static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane); struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc); - if (plane->state->visible) + if (new_state->visible) rcar_du_vsp_plane_setup(rplane); else if (old_state->crtc) vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 8d15cabdcb02..81c70d7a0471 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -23,6 +23,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_flip_work.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -778,11 +779,13 @@ static bool rockchip_mod_supported(struct drm_plane *plane, } static int vop_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_crtc *crtc = state->crtc; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; - struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *fb = new_plane_state->fb; struct vop_win *vop_win = to_vop_win(plane); const struct vop_win_data *win = vop_win->data; int ret; @@ -794,17 +797,18 @@ static int vop_plane_atomic_check(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + crtc); if (WARN_ON(!crtc_state)) return -EINVAL; - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min_scale, max_scale, true, true); if (ret) return ret; - if (!state->visible) + if (!new_plane_state->visible) return 0; ret = vop_convert_format(fb->format->format); @@ -815,12 +819,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) { + if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) { DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } - if (fb->format->is_yuv && state->rotation & DRM_MODE_REFLECT_Y) { + if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) { DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); return -EINVAL; } @@ -837,14 +841,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane, if (ret < 0) return ret; - if (state->src.x1 || state->src.y1) { - DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]); + if (new_plane_state->src.x1 || new_plane_state->src.y1) { + DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", + new_plane_state->src.x1, + new_plane_state->src.y1, fb->offsets[0]); return -EINVAL; } - if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) { + if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) { DRM_ERROR("No rotation support in AFBC, rotation=%d\n", - state->rotation); + new_plane_state->rotation); return -EINVAL; } } @@ -853,8 +859,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane, } static void vop_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct vop_win *vop_win = to_vop_win(plane); struct vop *vop = to_vop(old_state->crtc); @@ -869,20 +877,23 @@ static void vop_plane_atomic_disable(struct drm_plane *plane, } static void vop_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; - struct drm_crtc *crtc = state->crtc; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc *crtc = new_state->crtc; struct vop_win *vop_win = to_vop_win(plane); const struct vop_win_data *win = vop_win->data; const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data; - struct vop *vop = to_vop(state->crtc); - struct drm_framebuffer *fb = state->fb; + struct vop *vop = to_vop(new_state->crtc); + struct drm_framebuffer *fb = new_state->fb; unsigned int actual_w, actual_h; unsigned int dsp_stx, dsp_sty; uint32_t act_info, dsp_info, dsp_st; - struct drm_rect *src = &state->src; - struct drm_rect *dest = &state->dst; + struct drm_rect *src = &new_state->src; + struct drm_rect *dest = &new_state->dst; struct drm_gem_object *obj, *uv_obj; struct rockchip_gem_object *rk_obj, *rk_uv_obj; unsigned long offset; @@ -903,8 +914,8 @@ static void vop_plane_atomic_update(struct drm_plane *plane, if (WARN_ON(!vop->is_enabled)) return; - if (!state->visible) { - vop_plane_atomic_disable(plane, old_state); + if (!new_state->visible) { + vop_plane_atomic_disable(plane, state); return; } @@ -930,7 +941,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, * For y-mirroring we need to move address * to the beginning of the last line. */ - if (state->rotation & DRM_MODE_REFLECT_Y) + if (new_state->rotation & DRM_MODE_REFLECT_Y) dma_addr += (actual_h - 1) * fb->pitches[0]; format = vop_convert_format(fb->format->format); @@ -952,9 +963,9 @@ static void vop_plane_atomic_update(struct drm_plane *plane, VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv); VOP_WIN_SET(vop, win, y_mir_en, - (state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0); + (new_state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0); VOP_WIN_SET(vop, win, x_mir_en, - (state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0); + (new_state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0); if (is_yuv) { int hsub = fb->format->hsub; @@ -1021,8 +1032,10 @@ static void vop_plane_atomic_update(struct drm_plane *plane, } static int vop_plane_atomic_async_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct vop_win *vop_win = to_vop_win(plane); const struct vop_win_data *win = vop_win->data; int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : @@ -1031,7 +1044,7 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane, DRM_PLANE_HELPER_NO_SCALING; struct drm_crtc_state *crtc_state; - if (plane != state->crtc->cursor) + if (plane != new_plane_state->crtc->cursor) return -EINVAL; if (!plane->state) @@ -1040,9 +1053,9 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane, if (!plane->state->fb) return -EINVAL; - if (state->state) - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + if (state) + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_plane_state->crtc); else /* Special case for asynchronous cursor updates. */ crtc_state = plane->crtc->state; @@ -1052,8 +1065,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane, } static void vop_plane_atomic_async_update(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct vop *vop = to_vop(plane->state->crtc); struct drm_framebuffer *old_fb = plane->state->fb; @@ -1068,7 +1083,7 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane, swap(plane->state->fb, new_state->fb); if (vop->is_enabled) { - vop_plane_atomic_update(plane, plane->state); + vop_plane_atomic_update(plane, state); spin_lock(&vop->reg_lock); vop_cfg_done(vop); spin_unlock(&vop->reg_lock); @@ -1096,7 +1111,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = { .atomic_disable = vop_plane_atomic_disable, .atomic_async_check = vop_plane_atomic_async_check, .atomic_async_update = vop_plane_atomic_async_update, - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, }; static const struct drm_plane_funcs vop_plane_funcs = { diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 4a2099cb582e..857d97cdc67c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -17,9 +17,20 @@ #define NUM_YUV2YUV_COEFFICIENTS 12 +/* AFBC supports a number of configurable modes. Relevant to us is block size + * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like + * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode + * could be enabled via the hreg_block_split register, but is not currently + * handled. The colourspace transform is implicitly always assumed by the + * decoder, so consumers must use this transform as well. + * + * Failure to match modifiers will cause errors displaying AFBC buffers + * produced by conformant AFBC producers, including Mesa. + */ #define ROCKCHIP_AFBC_MOD \ DRM_FORMAT_MOD_ARM_AFBC( \ AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \ + | AFBC_FORMAT_MOD_YTR \ ) enum vop_data_format { diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 654bc52d9ff3..bd5ba10822c2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -725,7 +725,7 @@ static int rockchip_lvds_probe(struct platform_device *pdev) static int rockchip_lvds_remove(struct platform_device *pdev) { - struct rockchip_lvds *lvds = dev_get_drvdata(&pdev->dev); + struct rockchip_lvds *lvds = platform_get_drvdata(pdev); component_del(&pdev->dev, &rockchip_lvds_component_ops); clk_unprepare(lvds->pclk); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index c1ac3e4003c6..92d965b629c6 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -489,7 +489,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, bool first; trace_drm_sched_job(sched_job, entity); - atomic_inc(&entity->rq->sched->score); + atomic_inc(entity->rq->sched->score); WRITE_ONCE(entity->last_user, current->group_leader); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 92637b70c9bf..d82a7ebf6099 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -91,7 +91,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, if (!list_empty(&entity->list)) return; spin_lock(&rq->lock); - atomic_inc(&rq->sched->score); + atomic_inc(rq->sched->score); list_add_tail(&entity->list, &rq->entities); spin_unlock(&rq->lock); } @@ -110,7 +110,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, if (list_empty(&entity->list)) return; spin_lock(&rq->lock); - atomic_dec(&rq->sched->score); + atomic_dec(rq->sched->score); list_del_init(&entity->list); if (rq->current_entity == entity) rq->current_entity = NULL; @@ -173,7 +173,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job) struct drm_gpu_scheduler *sched = s_fence->sched; atomic_dec(&sched->hw_rq_count); - atomic_dec(&sched->score); + atomic_dec(sched->score); trace_drm_sched_process_job(s_fence); @@ -527,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) EXPORT_SYMBOL(drm_sched_start); /** - * drm_sched_resubmit_jobs - helper to relunch job from pending ring list + * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list * * @sched: scheduler instance * @@ -561,8 +561,6 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) } else { s_job->s_fence->parent = fence; } - - } } EXPORT_SYMBOL(drm_sched_resubmit_jobs); @@ -734,7 +732,7 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, continue; } - num_score = atomic_read(&sched->score); + num_score = atomic_read(sched->score); if (num_score < min_score) { min_score = num_score; picked_sched = sched; @@ -844,16 +842,15 @@ static int drm_sched_main(void *param) * @hw_submission: number of hw submissions that can be in flight * @hang_limit: number of times to allow a job to hang before dropping it * @timeout: timeout value in jiffies for the scheduler + * @score: optional score atomic shared with other schedulers * @name: name used for debugging * * Return 0 on success, otherwise error code. */ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, - unsigned hw_submission, - unsigned hang_limit, - long timeout, - const char *name) + unsigned hw_submission, unsigned hang_limit, long timeout, + atomic_t *score, const char *name) { int i, ret; sched->ops = ops; @@ -861,6 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->name = name; sched->timeout = timeout; sched->hang_limit = hang_limit; + sched->score = score ? score : &sched->_score; for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) drm_sched_rq_init(sched, &sched->sched_rq[i]); @@ -870,7 +868,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); - atomic_set(&sched->score, 0); + atomic_set(&sched->_score, 0); atomic64_set(&sched->job_id_count, 0); /* Each scheduler will run on a seperate kernel thread */ diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index 7476301d7142..1d6051b4f6fe 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -181,12 +181,14 @@ static void sti_cursor_init(struct sti_cursor *cursor) } static int sti_cursor_atomic_check(struct drm_plane *drm_plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_cursor *cursor = to_sti_cursor(plane); - struct drm_crtc *crtc = state->crtc; - struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = new_plane_state->crtc; + struct drm_framebuffer *fb = new_plane_state->fb; struct drm_crtc_state *crtc_state; struct drm_display_mode *mode; int dst_x, dst_y, dst_w, dst_h; @@ -196,15 +198,17 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane, if (!crtc || !fb) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_crtc_state(state, crtc); mode = &crtc_state->mode; - dst_x = state->crtc_x; - dst_y = state->crtc_y; - dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x); - dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y); + dst_x = new_plane_state->crtc_x; + dst_y = new_plane_state->crtc_y; + dst_w = clamp_val(new_plane_state->crtc_w, 0, + mode->crtc_hdisplay - dst_x); + dst_h = clamp_val(new_plane_state->crtc_h, 0, + mode->crtc_vdisplay - dst_y); /* src_x are in 16.16 format */ - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + src_w = new_plane_state->src_w >> 16; + src_h = new_plane_state->src_h >> 16; if (src_w < STI_CURS_MIN_SIZE || src_h < STI_CURS_MIN_SIZE || @@ -252,13 +256,14 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane, } static void sti_cursor_atomic_update(struct drm_plane *drm_plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { - struct drm_plane_state *state = drm_plane->state; + struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_cursor *cursor = to_sti_cursor(plane); - struct drm_crtc *crtc = state->crtc; - struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = newstate->crtc; + struct drm_framebuffer *fb = newstate->fb; struct drm_display_mode *mode; int dst_x, dst_y; struct drm_gem_cma_object *cma_obj; @@ -269,8 +274,8 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane, return; mode = &crtc->mode; - dst_x = state->crtc_x; - dst_y = state->crtc_y; + dst_x = newstate->crtc_x; + dst_y = newstate->crtc_y; cma_obj = drm_fb_cma_get_gem_obj(fb, 0); @@ -306,8 +311,10 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane, } static void sti_cursor_atomic_disable(struct drm_plane *drm_plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { + struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); if (!oldstate->crtc) { diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 2f4a34f14d33..d1a35d97bc45 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -615,12 +615,14 @@ static int sti_gdp_get_dst(struct device *dev, int dst, int src) } static int sti_gdp_atomic_check(struct drm_plane *drm_plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_gdp *gdp = to_sti_gdp(plane); - struct drm_crtc *crtc = state->crtc; - struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = new_plane_state->crtc; + struct drm_framebuffer *fb = new_plane_state->fb; struct drm_crtc_state *crtc_state; struct sti_mixer *mixer; struct drm_display_mode *mode; @@ -633,17 +635,19 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, return 0; mixer = to_sti_mixer(crtc); - crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_crtc_state(state, crtc); mode = &crtc_state->mode; - dst_x = state->crtc_x; - dst_y = state->crtc_y; - dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x); - dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y); + dst_x = new_plane_state->crtc_x; + dst_y = new_plane_state->crtc_y; + dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x); + dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y); /* src_x are in 16.16 format */ - src_x = state->src_x >> 16; - src_y = state->src_y >> 16; - src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH); - src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT); + src_x = new_plane_state->src_x >> 16; + src_y = new_plane_state->src_y >> 16; + src_w = clamp_val(new_plane_state->src_w >> 16, 0, + GAM_GDP_SIZE_MAX_WIDTH); + src_h = clamp_val(new_plane_state->src_h >> 16, 0, + GAM_GDP_SIZE_MAX_HEIGHT); format = sti_gdp_fourcc2format(fb->format->format); if (format == -1) { @@ -695,13 +699,16 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, } static void sti_gdp_atomic_update(struct drm_plane *drm_plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { - struct drm_plane_state *state = drm_plane->state; + struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, + drm_plane); + struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_gdp *gdp = to_sti_gdp(plane); - struct drm_crtc *crtc = state->crtc; - struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = newstate->crtc; + struct drm_framebuffer *fb = newstate->fb; struct drm_display_mode *mode; int dst_x, dst_y, dst_w, dst_h; int src_x, src_y, src_w, src_h; @@ -718,15 +725,15 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, if (!crtc || !fb) return; - if ((oldstate->fb == state->fb) && - (oldstate->crtc_x == state->crtc_x) && - (oldstate->crtc_y == state->crtc_y) && - (oldstate->crtc_w == state->crtc_w) && - (oldstate->crtc_h == state->crtc_h) && - (oldstate->src_x == state->src_x) && - (oldstate->src_y == state->src_y) && - (oldstate->src_w == state->src_w) && - (oldstate->src_h == state->src_h)) { + if ((oldstate->fb == newstate->fb) && + (oldstate->crtc_x == newstate->crtc_x) && + (oldstate->crtc_y == newstate->crtc_y) && + (oldstate->crtc_w == newstate->crtc_w) && + (oldstate->crtc_h == newstate->crtc_h) && + (oldstate->src_x == newstate->src_x) && + (oldstate->src_y == newstate->src_y) && + (oldstate->src_w == newstate->src_w) && + (oldstate->src_h == newstate->src_h)) { /* No change since last update, do not post cmd */ DRM_DEBUG_DRIVER("No change, not posting cmd\n"); plane->status = STI_PLANE_UPDATED; @@ -744,15 +751,15 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, } mode = &crtc->mode; - dst_x = state->crtc_x; - dst_y = state->crtc_y; - dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x); - dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y); + dst_x = newstate->crtc_x; + dst_y = newstate->crtc_y; + dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x); + dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y); /* src_x are in 16.16 format */ - src_x = state->src_x >> 16; - src_y = state->src_y >> 16; - src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH); - src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT); + src_x = newstate->src_x >> 16; + src_y = newstate->src_y >> 16; + src_w = clamp_val(newstate->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH); + src_h = clamp_val(newstate->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT); list = sti_gdp_get_free_nodes(gdp); top_field = list->top_field; @@ -860,8 +867,10 @@ end: } static void sti_gdp_atomic_disable(struct drm_plane *drm_plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { + struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); if (!oldstate->crtc) { diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 62f824cd5f21..edbb99f53de1 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1017,12 +1017,14 @@ out: } static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); - struct drm_crtc *crtc = state->crtc; - struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = new_plane_state->crtc; + struct drm_framebuffer *fb = new_plane_state->fb; struct drm_crtc_state *crtc_state; struct drm_display_mode *mode; int dst_x, dst_y, dst_w, dst_h; @@ -1032,17 +1034,17 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, if (!crtc || !fb) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_crtc_state(state, crtc); mode = &crtc_state->mode; - dst_x = state->crtc_x; - dst_y = state->crtc_y; - dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x); - dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y); + dst_x = new_plane_state->crtc_x; + dst_y = new_plane_state->crtc_y; + dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x); + dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y); /* src_x are in 16.16 format */ - src_x = state->src_x >> 16; - src_y = state->src_y >> 16; - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + src_x = new_plane_state->src_x >> 16; + src_y = new_plane_state->src_y >> 16; + src_w = new_plane_state->src_w >> 16; + src_h = new_plane_state->src_h >> 16; if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode, src_w, src_h, @@ -1107,13 +1109,16 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, } static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { - struct drm_plane_state *state = drm_plane->state; + struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, + drm_plane); + struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); - struct drm_crtc *crtc = state->crtc; - struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = newstate->crtc; + struct drm_framebuffer *fb = newstate->fb; struct drm_display_mode *mode; int dst_x, dst_y, dst_w, dst_h; int src_x, src_y, src_w, src_h; @@ -1125,15 +1130,15 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, if (!crtc || !fb) return; - if ((oldstate->fb == state->fb) && - (oldstate->crtc_x == state->crtc_x) && - (oldstate->crtc_y == state->crtc_y) && - (oldstate->crtc_w == state->crtc_w) && - (oldstate->crtc_h == state->crtc_h) && - (oldstate->src_x == state->src_x) && - (oldstate->src_y == state->src_y) && - (oldstate->src_w == state->src_w) && - (oldstate->src_h == state->src_h)) { + if ((oldstate->fb == newstate->fb) && + (oldstate->crtc_x == newstate->crtc_x) && + (oldstate->crtc_y == newstate->crtc_y) && + (oldstate->crtc_w == newstate->crtc_w) && + (oldstate->crtc_h == newstate->crtc_h) && + (oldstate->src_x == newstate->src_x) && + (oldstate->src_y == newstate->src_y) && + (oldstate->src_w == newstate->src_w) && + (oldstate->src_h == newstate->src_h)) { /* No change since last update, do not post cmd */ DRM_DEBUG_DRIVER("No change, not posting cmd\n"); plane->status = STI_PLANE_UPDATED; @@ -1141,15 +1146,15 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, } mode = &crtc->mode; - dst_x = state->crtc_x; - dst_y = state->crtc_y; - dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x); - dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y); + dst_x = newstate->crtc_x; + dst_y = newstate->crtc_y; + dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x); + dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y); /* src_x are in 16.16 format */ - src_x = state->src_x >> 16; - src_y = state->src_y >> 16; - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + src_x = newstate->src_x >> 16; + src_y = newstate->src_y >> 16; + src_w = newstate->src_w >> 16; + src_h = newstate->src_h >> 16; cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); if (cmd_offset == -1) { @@ -1238,8 +1243,10 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, } static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { + struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, + drm_plane); struct sti_plane *plane = to_sti_plane(drm_plane); if (!oldstate->crtc) { diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 7812094f93d6..b5117fccf355 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -26,8 +26,8 @@ #include <drm/drm_device.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); struct drm_device *ddev = crtc->dev; + struct drm_connector_list_iter iter; + struct drm_connector *connector = NULL; + struct drm_encoder *encoder = NULL; + struct drm_bridge *bridge = NULL; struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct videomode vm; u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h; u32 total_width, total_height; + u32 bus_flags = 0; u32 val; int ret; + /* get encoder from crtc */ + drm_for_each_encoder(encoder, ddev) + if (encoder->crtc == crtc) + break; + + if (encoder) { + /* get bridge from encoder */ + list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) + if (bridge->encoder == encoder) + break; + + /* Get the connector from encoder */ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) + if (connector->encoder == encoder) + break; + drm_connector_list_iter_end(&iter); + } + + if (bridge && bridge->timings) + bus_flags = bridge->timings->input_bus_flags; + else if (connector) + bus_flags = connector->display_info.bus_flags; + if (!pm_runtime_active(ddev->dev)) { ret = pm_runtime_get_sync(ddev->dev); if (ret) { @@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH) val |= GCR_VSPOL; - if (vm.flags & DISPLAY_FLAGS_DE_LOW) + if (bus_flags & DRM_BUS_FLAG_DE_LOW) val |= GCR_DEPOL; - if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) + if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) val |= GCR_PCPOL; reg_update_bits(ldev->regs, LTDC_GCR, @@ -720,9 +749,11 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = { */ static int ltdc_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct drm_framebuffer *fb = state->fb; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_plane_state->fb; u32 src_w, src_h; DRM_DEBUG_DRIVER("\n"); @@ -731,11 +762,11 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane, return 0; /* convert src_ from 16:16 format */ - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + src_w = new_plane_state->src_w >> 16; + src_h = new_plane_state->src_h >> 16; /* Reject scaling */ - if (src_w != state->crtc_w || src_h != state->crtc_h) { + if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) { DRM_ERROR("Scaling is not supported"); return -EINVAL; } @@ -744,36 +775,37 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane, } static void ltdc_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { struct ltdc_device *ldev = plane_to_ltdc(plane); - struct drm_plane_state *state = plane->state; - struct drm_framebuffer *fb = state->fb; + struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = newstate->fb; u32 lofs = plane->index * LAY_OFS; - u32 x0 = state->crtc_x; - u32 x1 = state->crtc_x + state->crtc_w - 1; - u32 y0 = state->crtc_y; - u32 y1 = state->crtc_y + state->crtc_h - 1; + u32 x0 = newstate->crtc_x; + u32 x1 = newstate->crtc_x + newstate->crtc_w - 1; + u32 y0 = newstate->crtc_y; + u32 y1 = newstate->crtc_y + newstate->crtc_h - 1; u32 src_x, src_y, src_w, src_h; u32 val, pitch_in_bytes, line_length, paddr, ahbp, avbp, bpcr; enum ltdc_pix_fmt pf; - if (!state->crtc || !fb) { + if (!newstate->crtc || !fb) { DRM_DEBUG_DRIVER("fb or crtc NULL"); return; } /* convert src_ from 16:16 format */ - src_x = state->src_x >> 16; - src_y = state->src_y >> 16; - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + src_x = newstate->src_x >> 16; + src_y = newstate->src_y >> 16; + src_w = newstate->src_w >> 16; + src_h = newstate->src_h >> 16; DRM_DEBUG_DRIVER("plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n", plane->base.id, fb->base.id, src_w, src_h, src_x, src_y, - state->crtc_w, state->crtc_h, - state->crtc_x, state->crtc_y); + newstate->crtc_w, newstate->crtc_h, + newstate->crtc_x, newstate->crtc_y); bpcr = reg_read(ldev->regs, LTDC_BPCR); ahbp = (bpcr & BPCR_AHBP) >> 16; @@ -832,7 +864,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane, reg_update_bits(ldev->regs, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, val); /* Sets the FB address */ - paddr = (u32)drm_fb_cma_get_gem_addr(fb, state, 0); + paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 0); DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr); reg_write(ldev->regs, LTDC_L1CFBAR + lofs, paddr); @@ -858,8 +890,10 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane, } static void ltdc_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *oldstate) + struct drm_atomic_state *state) { + struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, + plane); struct ltdc_device *ldev = plane_to_ltdc(plane); u32 lofs = plane->index * LAY_OFS; @@ -911,7 +945,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = { }; static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = ltdc_plane_atomic_check, .atomic_update = ltdc_plane_atomic_update, .atomic_disable = ltdc_plane_atomic_disable, diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 522e51a404cc..bf8cfefa0365 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -510,7 +510,6 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine, struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; - struct drm_format_name_buf format_name; if (!sun4i_backend_plane_is_supported(plane_state, &layer_state->uses_frontend)) @@ -527,9 +526,8 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine, } } - DRM_DEBUG_DRIVER("Plane FB format is %s\n", - drm_get_format_name(fb->format->format, - &format_name)); + DRM_DEBUG_DRIVER("Plane FB format is %p4cc\n", + &fb->format->format); if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE)) num_alpha_planes++; diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c index acfbfd4463a1..11771bdd6e7c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_layer.c +++ b/drivers/gpu/drm/sun4i/sun4i_layer.c @@ -6,8 +6,9 @@ * Maxime Ripard <maxime.ripard@free-electrons.com> */ +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> #include "sun4i_backend.h" @@ -63,8 +64,10 @@ static void sun4i_backend_layer_destroy_state(struct drm_plane *plane, } static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(old_state); struct sun4i_layer *layer = plane_to_sun4i_layer(plane); struct sun4i_backend *backend = layer->backend; @@ -81,9 +84,11 @@ static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane, } static void sun4i_backend_layer_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(plane->state); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(new_state); struct sun4i_layer *layer = plane_to_sun4i_layer(plane); struct sun4i_backend *backend = layer->backend; struct sun4i_frontend *frontend = backend->frontend; @@ -122,7 +127,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane, } static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_disable = sun4i_backend_layer_atomic_disable, .atomic_update = sun4i_backend_layer_atomic_update, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 6b9af4c08cd6..9f06dec0fc61 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -672,6 +672,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, SUN4I_TCON1_BASIC5_V_SYNC(vsync) | SUN4I_TCON1_BASIC5_H_SYNC(hsync)); + /* Setup the polarity of multiple signals */ + if (tcon->quirks->polarity_in_ch0) { + val = 0; + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; + + regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val); + } else { + /* according to vendor driver, this bit must be always set */ + val = SUN4I_TCON1_IO_POL_UNKNOWN; + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE; + + regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val); + } + /* Map output pins to channel 1 */ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, SUN4I_TCON_GCTL_IOMAP_MASK, @@ -1500,6 +1524,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { .has_channel_1 = true, + .polarity_in_ch0 = true, .set_mux = sun8i_r40_tcon_tv_set_mux, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index c5ac1b02482c..e624f6977eb8 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -154,6 +154,11 @@ #define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff) #define SUN4I_TCON1_IO_POL_REG 0xf0 +/* there is no documentation about this bit */ +#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26) +#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25) +#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24) + #define SUN4I_TCON1_IO_TRI_REG 0xf4 #define SUN4I_TCON_ECC_FIFO_REG 0xf8 @@ -236,6 +241,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool supports_lvds; /* Does the TCON support an LVDS output? */ + bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ /* callback to handle tcon muxing options */ diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 92add2cef2e7..bbdfd5e26ec8 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder, { struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder); - if (hdmi->quirks->set_rate) - clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000); + clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000); } static const struct drm_encoder_helper_funcs @@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data, { /* * Controller support maximum of 594 MHz, which correlates to - * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than - * 340 MHz scrambling has to be enabled. Because scrambling is - * not yet implemented, just limit to 340 MHz for now. + * 4K@60Hz 4:4:4 or RGB. */ - if (mode->clock > 340000) + if (mode->clock > 594000) return MODE_CLOCK_HIGH; return MODE_OK; @@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev) static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = { .mode_valid = sun8i_dw_hdmi_mode_valid_a83t, - .set_rate = true, }; static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index d983746fa194..d4b55af0592f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks { enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *info, const struct drm_display_mode *mode); - unsigned int set_rate : 1; unsigned int use_drm_infoframe : 1; }; diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 35c2133724e2..9994edf67509 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = { static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = { /* pixelclk bpp8 bpp10 bpp12 */ - { 25175000, { 0x0000, 0x0000, 0x0000 }, }, { 27000000, { 0x0012, 0x0000, 0x0000 }, }, - { 59400000, { 0x0008, 0x0008, 0x0008 }, }, - { 72000000, { 0x0008, 0x0008, 0x001b }, }, - { 74250000, { 0x0013, 0x0013, 0x0013 }, }, - { 90000000, { 0x0008, 0x001a, 0x001b }, }, - { 118800000, { 0x001b, 0x001a, 0x001b }, }, - { 144000000, { 0x001b, 0x001a, 0x0034 }, }, - { 180000000, { 0x001b, 0x0033, 0x0034 }, }, - { 216000000, { 0x0036, 0x0033, 0x0034 }, }, - { 237600000, { 0x0036, 0x0033, 0x001b }, }, - { 288000000, { 0x0036, 0x001b, 0x001b }, }, - { 297000000, { 0x0019, 0x001b, 0x0019 }, }, - { 330000000, { 0x0036, 0x001b, 0x001b }, }, - { 594000000, { 0x003f, 0x001b, 0x001b }, }, + { 74250000, { 0x0013, 0x001a, 0x001b }, }, + { 148500000, { 0x0019, 0x0033, 0x0034 }, }, + { 297000000, { 0x0019, 0x001b, 0x001b }, }, + { 594000000, { 0x0010, 0x001b, 0x001b }, }, { ~0UL, { 0x0000, 0x0000, 0x0000 }, } }; static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = { /*pixelclk symbol term vlev*/ - { 74250000, 0x8009, 0x0004, 0x0232}, - { 148500000, 0x8029, 0x0004, 0x0273}, - { 594000000, 0x8039, 0x0004, 0x014a}, + { 27000000, 0x8009, 0x0007, 0x02b0 }, + { 74250000, 0x8009, 0x0006, 0x022d }, + { 148500000, 0x8029, 0x0006, 0x0270 }, + { 297000000, 0x8039, 0x0005, 0x01ab }, + { 594000000, 0x8029, 0x0000, 0x008a }, { ~0UL, 0x0000, 0x0000, 0x0000} }; diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c index 816ad4ce8996..0db164a774a1 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c @@ -14,8 +14,8 @@ #include <drm/drm_crtc.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -72,6 +72,27 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel, } } +static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel, + int overlay, struct drm_plane *plane) +{ + u32 mask, val, ch_base; + + ch_base = sun8i_channel_base(mixer, channel); + + mask = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK | + SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK; + + val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(plane->state->alpha >> 8); + + val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ? + SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_PIXEL : + SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_COMBINED; + + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay), + mask, val); +} + static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel, int overlay, struct drm_plane *plane, unsigned int zpos) @@ -236,17 +257,20 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel, } static int sun8i_ui_layer_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane); - struct drm_crtc *crtc = state->crtc; + struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; int min_scale, max_scale; if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -258,14 +282,17 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane, max_scale = SUN8I_UI_SCALER_SCALE_MAX; } - return drm_atomic_helper_check_plane_state(state, crtc_state, + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, min_scale, max_scale, true, true); } static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane); unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; @@ -275,14 +302,18 @@ static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane, } static void sun8i_ui_layer_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane); - unsigned int zpos = plane->state->normalized_zpos; + unsigned int zpos = new_state->normalized_zpos; unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; - if (!plane->state->visible) { + if (!new_state->visible) { sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false, 0, old_zpos); return; @@ -290,6 +321,8 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane, sun8i_ui_layer_update_coord(mixer, layer->channel, layer->overlay, plane, zpos); + sun8i_ui_layer_update_alpha(mixer, layer->channel, + layer->overlay, plane); sun8i_ui_layer_update_formats(mixer, layer->channel, layer->overlay, plane); sun8i_ui_layer_update_buffer(mixer, layer->channel, @@ -299,7 +332,7 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = sun8i_ui_layer_atomic_check, .atomic_disable = sun8i_ui_layer_atomic_disable, .atomic_update = sun8i_ui_layer_atomic_update, @@ -367,6 +400,12 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm, plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num; + ret = drm_plane_create_alpha_property(&layer->plane); + if (ret) { + dev_err(drm->dev, "Couldn't add alpha property\n"); + return ERR_PTR(ret); + } + ret = drm_plane_create_zpos_property(&layer->plane, channel, 0, plane_cnt - 1); if (ret) { diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h index f4ab1cf6cded..e3e32ee1178d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h +++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h @@ -40,6 +40,11 @@ #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET 8 #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24) +#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(x) ((x) << 24) + +#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_PIXEL ((0) << 1) +#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_LAYER ((1) << 1) +#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_COMBINED ((2) << 1) struct sun8i_mixer; diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 8cc294a9969d..46420780db59 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -7,8 +7,8 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -66,6 +66,36 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel, } } +static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel, + int overlay, struct drm_plane *plane) +{ + u32 mask, val, ch_base; + + ch_base = sun8i_channel_base(mixer, channel); + + if (mixer->cfg->is_de3) { + mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK | + SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK; + val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA + (plane->state->alpha >> 8); + + val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ? + SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_PIXEL : + SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_COMBINED; + + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, + overlay), + mask, val); + } else if (mixer->cfg->vi_num == 1) { + regmap_update_bits(mixer->engine.regs, + SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG, + SUN8I_MIXER_FCC_GLOBAL_ALPHA_MASK, + SUN8I_MIXER_FCC_GLOBAL_ALPHA + (plane->state->alpha >> 8)); + } +} + static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, int overlay, struct drm_plane *plane, unsigned int zpos) @@ -268,14 +298,6 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel, SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay), SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val); - /* It seems that YUV formats use global alpha setting. */ - if (mixer->cfg->is_de3) - regmap_update_bits(mixer->engine.regs, - SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, - overlay), - SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK, - SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(0xff)); - return 0; } @@ -339,17 +361,20 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel, } static int sun8i_vi_layer_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane); - struct drm_crtc *crtc = state->crtc; + struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *crtc_state; int min_scale, max_scale; if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -361,14 +386,17 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane, max_scale = SUN8I_VI_SCALER_SCALE_MAX; } - return drm_atomic_helper_check_plane_state(state, crtc_state, + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, min_scale, max_scale, true, true); } static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane); unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; @@ -378,14 +406,18 @@ static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane, } static void sun8i_vi_layer_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane); - unsigned int zpos = plane->state->normalized_zpos; + unsigned int zpos = new_state->normalized_zpos; unsigned int old_zpos = old_state->normalized_zpos; struct sun8i_mixer *mixer = layer->mixer; - if (!plane->state->visible) { + if (!new_state->visible) { sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false, 0, old_zpos); return; @@ -393,6 +425,8 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane, sun8i_vi_layer_update_coord(mixer, layer->channel, layer->overlay, plane, zpos); + sun8i_vi_layer_update_alpha(mixer, layer->channel, + layer->overlay, plane); sun8i_vi_layer_update_formats(mixer, layer->channel, layer->overlay, plane); sun8i_vi_layer_update_buffer(mixer, layer->channel, @@ -402,7 +436,7 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = sun8i_vi_layer_atomic_check, .atomic_disable = sun8i_vi_layer_atomic_disable, .atomic_update = sun8i_vi_layer_atomic_update, @@ -534,6 +568,14 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num; + if (mixer->cfg->vi_num == 1 || mixer->cfg->is_de3) { + ret = drm_plane_create_alpha_property(&layer->plane); + if (ret) { + dev_err(drm->dev, "Couldn't add alpha property\n"); + return ERR_PTR(ret); + } + } + ret = drm_plane_create_zpos_property(&layer->plane, index, 0, plane_cnt - 1); if (ret) { diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h index eaa6076f5dbc..48c399e1c86d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h @@ -29,14 +29,25 @@ #define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \ ((base) + 0xfc) +#define SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG \ + (0xAA000 + 0x90) + +#define SUN8I_MIXER_FCC_GLOBAL_ALPHA(x) ((x) << 24) +#define SUN8I_MIXER_FCC_GLOBAL_ALPHA_MASK GENMASK(31, 24) + #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0) /* RGB mode should be set for RGB formats and cleared for YCbCr */ #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE BIT(15) #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET 8 #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8) +#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1) #define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24) #define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24) +#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_PIXEL ((0) << 1) +#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_LAYER ((1) << 1) +#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_COMBINED ((2) << 1) + #define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16) #define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0) diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 0ae3a025efe9..da6afe7f0c7d 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -604,23 +604,25 @@ static const u64 tegra124_modifiers[] = { }; static int tegra_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct tegra_plane_state *plane_state = to_tegra_plane_state(state); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state); unsigned int supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; - unsigned int rotation = state->rotation; + unsigned int rotation = new_plane_state->rotation; struct tegra_bo_tiling *tiling = &plane_state->tiling; struct tegra_plane *tegra = to_tegra_plane(plane); - struct tegra_dc *dc = to_tegra_dc(state->crtc); + struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc); int err; /* no need for further checks if the plane is being disabled */ - if (!state->crtc) + if (!new_plane_state->crtc) return 0; - err = tegra_plane_format(state->fb->format->format, + err = tegra_plane_format(new_plane_state->fb->format->format, &plane_state->format, &plane_state->swap); if (err < 0) @@ -638,7 +640,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane, return err; } - err = tegra_fb_get_tiling(state->fb, tiling); + err = tegra_fb_get_tiling(new_plane_state->fb, tiling); if (err < 0) return err; @@ -654,7 +656,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane, * property in order to achieve the same result. The legacy BO flag * duplicates the DRM rotation property when both are set. */ - if (tegra_fb_is_bottom_up(state->fb)) + if (tegra_fb_is_bottom_up(new_plane_state->fb)) rotation |= DRM_MODE_REFLECT_Y; rotation = drm_rotation_simplify(rotation, supported_rotation); @@ -674,14 +676,14 @@ static int tegra_plane_atomic_check(struct drm_plane *plane, * error out if the user tries to display a framebuffer with such a * configuration. */ - if (state->fb->format->num_planes > 2) { - if (state->fb->pitches[2] != state->fb->pitches[1]) { + if (new_plane_state->fb->format->num_planes > 2) { + if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) { DRM_ERROR("unsupported UV-plane configuration\n"); return -EINVAL; } } - err = tegra_plane_state_add(tegra, state); + err = tegra_plane_state_add(tegra, new_plane_state); if (err < 0) return err; @@ -689,8 +691,10 @@ static int tegra_plane_atomic_check(struct drm_plane *plane, } static void tegra_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct tegra_plane *p = to_tegra_plane(plane); u32 value; @@ -704,42 +708,44 @@ static void tegra_plane_atomic_disable(struct drm_plane *plane, } static void tegra_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct tegra_plane_state *state = to_tegra_plane_state(plane->state); - struct drm_framebuffer *fb = plane->state->fb; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state); + struct drm_framebuffer *fb = new_state->fb; struct tegra_plane *p = to_tegra_plane(plane); struct tegra_dc_window window; unsigned int i; /* rien ne va plus */ - if (!plane->state->crtc || !plane->state->fb) + if (!new_state->crtc || !new_state->fb) return; - if (!plane->state->visible) - return tegra_plane_atomic_disable(plane, old_state); + if (!new_state->visible) + return tegra_plane_atomic_disable(plane, state); memset(&window, 0, sizeof(window)); - window.src.x = plane->state->src.x1 >> 16; - window.src.y = plane->state->src.y1 >> 16; - window.src.w = drm_rect_width(&plane->state->src) >> 16; - window.src.h = drm_rect_height(&plane->state->src) >> 16; - window.dst.x = plane->state->dst.x1; - window.dst.y = plane->state->dst.y1; - window.dst.w = drm_rect_width(&plane->state->dst); - window.dst.h = drm_rect_height(&plane->state->dst); + window.src.x = new_state->src.x1 >> 16; + window.src.y = new_state->src.y1 >> 16; + window.src.w = drm_rect_width(&new_state->src) >> 16; + window.src.h = drm_rect_height(&new_state->src) >> 16; + window.dst.x = new_state->dst.x1; + window.dst.y = new_state->dst.y1; + window.dst.w = drm_rect_width(&new_state->dst); + window.dst.h = drm_rect_height(&new_state->dst); window.bits_per_pixel = fb->format->cpp[0] * 8; - window.reflect_x = state->reflect_x; - window.reflect_y = state->reflect_y; + window.reflect_x = tegra_plane_state->reflect_x; + window.reflect_y = tegra_plane_state->reflect_y; /* copy from state */ - window.zpos = plane->state->normalized_zpos; - window.tiling = state->tiling; - window.format = state->format; - window.swap = state->swap; + window.zpos = new_state->normalized_zpos; + window.tiling = tegra_plane_state->tiling; + window.format = tegra_plane_state->format; + window.swap = tegra_plane_state->swap; for (i = 0; i < fb->format->num_planes; i++) { - window.base[i] = state->iova[i] + fb->offsets[i]; + window.base[i] = tegra_plane_state->iova[i] + fb->offsets[i]; /* * Tegra uses a shared stride for UV planes. Framebuffers are @@ -831,29 +837,31 @@ static const u32 tegra_cursor_plane_formats[] = { }; static int tegra_cursor_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct tegra_plane *tegra = to_tegra_plane(plane); int err; /* no need for further checks if the plane is being disabled */ - if (!state->crtc) + if (!new_plane_state->crtc) return 0; /* scaling not supported for cursor */ - if ((state->src_w >> 16 != state->crtc_w) || - (state->src_h >> 16 != state->crtc_h)) + if ((new_plane_state->src_w >> 16 != new_plane_state->crtc_w) || + (new_plane_state->src_h >> 16 != new_plane_state->crtc_h)) return -EINVAL; /* only square cursors supported */ - if (state->src_w != state->src_h) + if (new_plane_state->src_w != new_plane_state->src_h) return -EINVAL; - if (state->crtc_w != 32 && state->crtc_w != 64 && - state->crtc_w != 128 && state->crtc_w != 256) + if (new_plane_state->crtc_w != 32 && new_plane_state->crtc_w != 64 && + new_plane_state->crtc_w != 128 && new_plane_state->crtc_w != 256) return -EINVAL; - err = tegra_plane_state_add(tegra, state); + err = tegra_plane_state_add(tegra, new_plane_state); if (err < 0) return err; @@ -861,17 +869,19 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane, } static void tegra_cursor_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct tegra_plane_state *state = to_tegra_plane_state(plane->state); - struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state); + struct tegra_dc *dc = to_tegra_dc(new_state->crtc); u32 value = CURSOR_CLIP_DISPLAY; /* rien ne va plus */ - if (!plane->state->crtc || !plane->state->fb) + if (!new_state->crtc || !new_state->fb) return; - switch (plane->state->crtc_w) { + switch (new_state->crtc_w) { case 32: value |= CURSOR_SIZE_32x32; break; @@ -890,15 +900,15 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, default: WARN(1, "cursor size %ux%u not supported\n", - plane->state->crtc_w, plane->state->crtc_h); + new_state->crtc_w, new_state->crtc_h); return; } - value |= (state->iova[0] >> 10) & 0x3fffff; + value |= (tegra_plane_state->iova[0] >> 10) & 0x3fffff; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - value = (state->iova[0] >> 32) & 0x3; + value = (tegra_plane_state->iova[0] >> 32) & 0x3; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI); #endif @@ -917,14 +927,16 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL); /* position the cursor */ - value = (plane->state->crtc_y & 0x3fff) << 16 | - (plane->state->crtc_x & 0x3fff); + value = (new_state->crtc_y & 0x3fff) << 16 | + (new_state->crtc_x & 0x3fff); tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); } static void tegra_cursor_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct tegra_dc *dc; u32 value; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index e9ce7d6992d2..90709c38c993 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -65,11 +65,14 @@ static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) struct tegra_drm *tegra = drm->dev_private; if (tegra->hub) { + bool fence_cookie = dma_fence_begin_signalling(); + drm_atomic_helper_commit_modeset_disables(drm, old_state); tegra_display_hub_atomic_commit(drm, old_state); drm_atomic_helper_commit_planes(drm, old_state, 0); drm_atomic_helper_commit_modeset_enables(drm, old_state); drm_atomic_helper_commit_hw_done(old_state); + dma_fence_end_signalling(fence_cookie); drm_atomic_helper_wait_for_vblanks(drm, old_state); drm_atomic_helper_cleanup_planes(drm, old_state); } else { diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 5ce771cba133..8e6d329d062b 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -336,25 +336,27 @@ static void tegra_dc_remove_shared_plane(struct tegra_dc *dc, } static int tegra_shared_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct tegra_plane_state *plane_state = to_tegra_plane_state(state); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state); struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane); struct tegra_bo_tiling *tiling = &plane_state->tiling; - struct tegra_dc *dc = to_tegra_dc(state->crtc); + struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc); int err; /* no need for further checks if the plane is being disabled */ - if (!state->crtc || !state->fb) + if (!new_plane_state->crtc || !new_plane_state->fb) return 0; - err = tegra_plane_format(state->fb->format->format, + err = tegra_plane_format(new_plane_state->fb->format->format, &plane_state->format, &plane_state->swap); if (err < 0) return err; - err = tegra_fb_get_tiling(state->fb, tiling); + err = tegra_fb_get_tiling(new_plane_state->fb, tiling); if (err < 0) return err; @@ -369,8 +371,8 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane, * error out if the user tries to display a framebuffer with such a * configuration. */ - if (state->fb->format->num_planes > 2) { - if (state->fb->pitches[2] != state->fb->pitches[1]) { + if (new_plane_state->fb->format->num_planes > 2) { + if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) { DRM_ERROR("unsupported UV-plane configuration\n"); return -EINVAL; } @@ -378,7 +380,7 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane, /* XXX scaling is not yet supported, add a check here */ - err = tegra_plane_state_add(&tegra->base, state); + err = tegra_plane_state_add(&tegra->base, new_plane_state); if (err < 0) return err; @@ -386,8 +388,10 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane, } static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct tegra_plane *p = to_tegra_plane(plane); struct tegra_dc *dc; u32 value; @@ -423,23 +427,25 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, } static void tegra_shared_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct tegra_plane_state *state = to_tegra_plane_state(plane->state); - struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); - unsigned int zpos = plane->state->normalized_zpos; - struct drm_framebuffer *fb = plane->state->fb; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state); + struct tegra_dc *dc = to_tegra_dc(new_state->crtc); + unsigned int zpos = new_state->normalized_zpos; + struct drm_framebuffer *fb = new_state->fb; struct tegra_plane *p = to_tegra_plane(plane); dma_addr_t base; u32 value; int err; /* rien ne va plus */ - if (!plane->state->crtc || !plane->state->fb) + if (!new_state->crtc || !new_state->fb) return; - if (!plane->state->visible) { - tegra_shared_plane_atomic_disable(plane, old_state); + if (!new_state->visible) { + tegra_shared_plane_atomic_disable(plane, state); return; } @@ -477,22 +483,22 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, /* disable compression */ tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL); - base = state->iova[0] + fb->offsets[0]; + base = tegra_plane_state->iova[0] + fb->offsets[0]; - tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH); + tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH); tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS); - value = V_POSITION(plane->state->crtc_y) | - H_POSITION(plane->state->crtc_x); + value = V_POSITION(new_state->crtc_y) | + H_POSITION(new_state->crtc_x); tegra_plane_writel(p, value, DC_WIN_POSITION); - value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w); + value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w); tegra_plane_writel(p, value, DC_WIN_SIZE); value = WIN_ENABLE | COLOR_EXPAND; tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS); - value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w); + value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w); tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE); tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI); @@ -504,15 +510,15 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL; tegra_plane_writel(p, value, DC_WIN_SET_PARAMS); - value = OFFSET_X(plane->state->src_y >> 16) | - OFFSET_Y(plane->state->src_x >> 16); + value = OFFSET_X(new_state->src_y >> 16) | + OFFSET_Y(new_state->src_x >> 16); tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT); if (dc->soc->supports_block_linear) { - unsigned long height = state->tiling.value; + unsigned long height = tegra_plane_state->tiling.value; /* XXX */ - switch (state->tiling.mode) { + switch (tegra_plane_state->tiling.mode) { case TEGRA_BO_TILING_MODE_PITCH: value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) | DC_WINBUF_SURFACE_KIND_PITCH; diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 539d14935728..19e8847a164b 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -8,7 +8,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> #include "dc.h" @@ -198,7 +198,7 @@ int tegra_plane_prepare_fb(struct drm_plane *plane, if (!state->fb) return 0; - drm_gem_fb_prepare_fb(plane, state); + drm_gem_plane_helper_prepare_fb(plane, state); return tegra_dc_pin(dc, to_tegra_plane_state(state)); } diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index 09485c7f0d6f..95f8e0f78e32 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -4,6 +4,8 @@ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ +#include <linux/dma-fence.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> @@ -26,6 +28,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *ddev = old_state->dev; struct tidss_device *tidss = to_tidss(ddev); + bool fence_cookie = dma_fence_begin_signalling(); dev_dbg(ddev->dev, "%s\n", __func__); @@ -36,6 +39,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_modeset_enables(ddev, old_state); drm_atomic_helper_commit_hw_done(old_state); + dma_fence_end_signalling(fence_cookie); drm_atomic_helper_wait_for_flip_done(ddev, old_state); drm_atomic_helper_cleanup_planes(ddev, old_state); diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index 35067ae674ea..1acd15aa4193 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -10,7 +10,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_fb_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include "tidss_crtc.h" #include "tidss_dispc.h" @@ -20,8 +20,10 @@ /* drm_plane_helper_funcs */ static int tidss_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_device *ddev = plane->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); @@ -33,20 +35,22 @@ static int tidss_plane_atomic_check(struct drm_plane *plane, dev_dbg(ddev->dev, "%s\n", __func__); - if (!state->crtc) { + if (!new_plane_state->crtc) { /* * The visible field is not reset by the DRM core but only * updated by drm_plane_helper_check_state(), set it manually. */ - state->visible = false; + new_plane_state->visible = false; return 0; } - crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + 0, INT_MAX, true, true); if (ret < 0) return ret; @@ -63,35 +67,37 @@ static int tidss_plane_atomic_check(struct drm_plane *plane, * check for odd height). */ - finfo = drm_format_info(state->fb->format->format); + finfo = drm_format_info(new_plane_state->fb->format->format); - if ((state->src_x >> 16) % finfo->hsub != 0) { + if ((new_plane_state->src_x >> 16) % finfo->hsub != 0) { dev_dbg(ddev->dev, "%s: x-position %u not divisible subpixel size %u\n", - __func__, (state->src_x >> 16), finfo->hsub); + __func__, (new_plane_state->src_x >> 16), finfo->hsub); return -EINVAL; } - if ((state->src_y >> 16) % finfo->vsub != 0) { + if ((new_plane_state->src_y >> 16) % finfo->vsub != 0) { dev_dbg(ddev->dev, "%s: y-position %u not divisible subpixel size %u\n", - __func__, (state->src_y >> 16), finfo->vsub); + __func__, (new_plane_state->src_y >> 16), finfo->vsub); return -EINVAL; } - if ((state->src_w >> 16) % finfo->hsub != 0) { + if ((new_plane_state->src_w >> 16) % finfo->hsub != 0) { dev_dbg(ddev->dev, "%s: src width %u not divisible by subpixel size %u\n", - __func__, (state->src_w >> 16), finfo->hsub); + __func__, (new_plane_state->src_w >> 16), + finfo->hsub); return -EINVAL; } - if (!state->visible) + if (!new_plane_state->visible) return 0; - hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport; + hw_videoport = to_tidss_crtc(new_plane_state->crtc)->hw_videoport; - ret = dispc_plane_check(tidss->dispc, hw_plane, state, hw_videoport); + ret = dispc_plane_check(tidss->dispc, hw_plane, new_plane_state, + hw_videoport); if (ret) return ret; @@ -99,26 +105,27 @@ static int tidss_plane_atomic_check(struct drm_plane *plane, } static void tidss_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct drm_device *ddev = plane->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); u32 hw_videoport; int ret; dev_dbg(ddev->dev, "%s\n", __func__); - if (!state->visible) { + if (!new_state->visible) { dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); return; } - hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport; + hw_videoport = to_tidss_crtc(new_state->crtc)->hw_videoport; ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id, - state, hw_videoport); + new_state, hw_videoport); if (ret) { dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n", @@ -131,7 +138,7 @@ static void tidss_plane_atomic_update(struct drm_plane *plane, } static void tidss_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct drm_device *ddev = plane->dev; struct tidss_device *tidss = to_tidss(ddev); @@ -151,7 +158,7 @@ static void drm_plane_destroy(struct drm_plane *plane) } static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = { - .prepare_fb = drm_gem_fb_prepare_fb, + .prepare_fb = drm_gem_plane_helper_prepare_fb, .atomic_check = tidss_plane_atomic_check, .atomic_update = tidss_plane_atomic_update, .atomic_disable = tidss_plane_atomic_disable, diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile index 662bf3a348c9..f5190477de72 100644 --- a/drivers/gpu/drm/tilcdc/Makefile +++ b/drivers/gpu/drm/tilcdc/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) +ifeq (, $(findstring -W,$(KCFLAGS))) ccflags-y += -Werror endif diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 30213708fc99..21e7b3d23c7d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -393,7 +393,7 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) return; } } - reg |= info->fdd < 12; + reg |= info->fdd << 12; tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg); if (info->invert_pxl_clk) @@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) drm_crtc_vblank_off(crtc); + spin_lock_irq(&crtc->dev->event_lock); + + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + + spin_unlock_irq(&crtc->dev->event_lock); + tilcdc_crtc_disable_irqs(dev); pm_runtime_put_sync(dev->dev); @@ -904,13 +913,12 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) tilcdc_clear_irqstatus(dev, stat); if (stat & LCDC_END_OF_FRAME0) { - unsigned long flags; bool skip_event = false; ktime_t now; now = ktime_get(); - spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + spin_lock(&tilcdc_crtc->irq_lock); tilcdc_crtc->last_vblank = now; @@ -920,21 +928,21 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) skip_event = true; } - spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); + spin_unlock(&tilcdc_crtc->irq_lock); drm_crtc_handle_vblank(crtc); if (!skip_event) { struct drm_pending_vblank_event *event; - spin_lock_irqsave(&dev->event_lock, flags); + spin_lock(&dev->event_lock); event = tilcdc_crtc->event; tilcdc_crtc->event = NULL; if (event) drm_crtc_send_vblank_event(crtc, event); - spin_unlock_irqrestore(&dev->event_lock, flags); + spin_unlock(&dev->event_lock); } if (tilcdc_crtc->frame_intact) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c index 2f681a713815..74a5c8832229 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c @@ -21,48 +21,51 @@ static const struct drm_plane_funcs tilcdc_plane_funcs = { }; static int tilcdc_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; - struct drm_plane_state *old_state = plane->state; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); unsigned int pitch; - if (!state->crtc) + if (!new_state->crtc) return 0; - if (WARN_ON(!state->fb)) + if (WARN_ON(!new_state->fb)) return -EINVAL; - if (state->crtc_x || state->crtc_y) { + if (new_state->crtc_x || new_state->crtc_y) { dev_err(plane->dev->dev, "%s: crtc position must be zero.", __func__); return -EINVAL; } - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_state->crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; - if (crtc_state->mode.hdisplay != state->crtc_w || - crtc_state->mode.vdisplay != state->crtc_h) { + if (crtc_state->mode.hdisplay != new_state->crtc_w || + crtc_state->mode.vdisplay != new_state->crtc_h) { dev_err(plane->dev->dev, "%s: Size must match mode (%dx%d == %dx%d)", __func__, crtc_state->mode.hdisplay, crtc_state->mode.vdisplay, - state->crtc_w, state->crtc_h); + new_state->crtc_w, new_state->crtc_h); return -EINVAL; } pitch = crtc_state->mode.hdisplay * - state->fb->format->cpp[0]; - if (state->fb->pitches[0] != pitch) { + new_state->fb->format->cpp[0]; + if (new_state->fb->pitches[0] != pitch) { dev_err(plane->dev->dev, "Invalid pitch: fb and crtc widths must be the same"); return -EINVAL; } - if (old_state->fb && state->fb->format != old_state->fb->format) { + if (old_state->fb && new_state->fb->format != old_state->fb->format) { dev_dbg(plane->dev->dev, "%s(): pixel format change requires mode_change\n", __func__); @@ -73,20 +76,21 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane, } static void tilcdc_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *state = plane->state; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); - if (!state->crtc) + if (!new_state->crtc) return; - if (WARN_ON(!state->fb || !state->crtc->state)) + if (WARN_ON(!new_state->fb || !new_state->crtc->state)) return; - if (tilcdc_crtc_update_fb(state->crtc, - state->fb, - state->crtc->state->event) == 0) { - state->crtc->state->event = NULL; + if (tilcdc_crtc_update_fb(new_state->crtc, + new_state->fb, + new_state->crtc->state->event) == 0) { + new_state->crtc->state->event = NULL; } } diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 2b6414f0fa75..9bbaa1a69050 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -1,5 +1,15 @@ # SPDX-License-Identifier: GPL-2.0-only +config DRM_ARCPGU + tristate "ARC PGU" + depends on DRM && OF + select DRM_KMS_CMA_HELPER + select DRM_KMS_HELPER + help + Choose this option if you have an ARC PGU controller. + + If M is selected the module will be called arcpgu. + config DRM_CIRRUS_QEMU tristate "Cirrus driver for QEMU emulated device" depends on DRM && PCI && MMU diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 6ae4e9e5a35f..bef6780bdd6f 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o obj-$(CONFIG_DRM_GM12U320) += gm12u320.o obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c new file mode 100644 index 000000000000..f8531c50a072 --- /dev/null +++ b/drivers/gpu/drm/tiny/arcpgu.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARC PGU DRM driver. + * + * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) + */ + +#include <linux/clk.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_debugfs.h> +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> + +#define ARCPGU_REG_CTRL 0x00 +#define ARCPGU_REG_STAT 0x04 +#define ARCPGU_REG_FMT 0x10 +#define ARCPGU_REG_HSYNC 0x14 +#define ARCPGU_REG_VSYNC 0x18 +#define ARCPGU_REG_ACTIVE 0x1c +#define ARCPGU_REG_BUF0_ADDR 0x40 +#define ARCPGU_REG_STRIDE 0x50 +#define ARCPGU_REG_START_SET 0x84 + +#define ARCPGU_REG_ID 0x3FC + +#define ARCPGU_CTRL_ENABLE_MASK 0x02 +#define ARCPGU_CTRL_VS_POL_MASK 0x1 +#define ARCPGU_CTRL_VS_POL_OFST 0x3 +#define ARCPGU_CTRL_HS_POL_MASK 0x1 +#define ARCPGU_CTRL_HS_POL_OFST 0x4 +#define ARCPGU_MODE_XRGB8888 BIT(2) +#define ARCPGU_STAT_BUSY_MASK 0x02 + +struct arcpgu_drm_private { + struct drm_device drm; + void __iomem *regs; + struct clk *clk; + struct drm_simple_display_pipe pipe; + struct drm_connector sim_conn; +}; + +#define dev_to_arcpgu(x) container_of(x, struct arcpgu_drm_private, drm) + +#define pipe_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, pipe) + +static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu, + unsigned int reg, u32 value) +{ + iowrite32(value, arcpgu->regs + reg); +} + +static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu, + unsigned int reg) +{ + return ioread32(arcpgu->regs + reg); +} + +#define XRES_DEF 640 +#define YRES_DEF 480 + +#define XRES_MAX 8192 +#define YRES_MAX 8192 + +static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + return count; +} + +static const struct drm_connector_helper_funcs +arcpgu_drm_connector_helper_funcs = { + .get_modes = arcpgu_drm_connector_get_modes, +}; + +static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int arcpgu_drm_sim_init(struct drm_device *drm, struct drm_connector *connector) +{ + drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); + return drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); +} + +#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1)) + +static const u32 arc_pgu_supported_formats[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +static void arc_pgu_set_pxl_fmt(struct arcpgu_drm_private *arcpgu) +{ + const struct drm_framebuffer *fb = arcpgu->pipe.plane.state->fb; + uint32_t pixel_format = fb->format->format; + u32 format = DRM_FORMAT_INVALID; + int i; + u32 reg_ctrl; + + for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) { + if (arc_pgu_supported_formats[i] == pixel_format) + format = arc_pgu_supported_formats[i]; + } + + if (WARN_ON(format == DRM_FORMAT_INVALID)) + return; + + reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL); + if (format == DRM_FORMAT_RGB565) + reg_ctrl &= ~ARCPGU_MODE_XRGB8888; + else + reg_ctrl |= ARCPGU_MODE_XRGB8888; + arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl); +} + +static enum drm_mode_status arc_pgu_mode_valid(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode) +{ + struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe); + long rate, clk_rate = mode->clock * 1000; + long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */ + + rate = clk_round_rate(arcpgu->clk, clk_rate); + if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0)) + return MODE_OK; + + return MODE_NOCLOCK; +} + +static void arc_pgu_mode_set(struct arcpgu_drm_private *arcpgu) +{ + struct drm_display_mode *m = &arcpgu->pipe.crtc.state->adjusted_mode; + u32 val; + + arc_pgu_write(arcpgu, ARCPGU_REG_FMT, + ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal)); + + arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC, + ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay, + m->crtc_hsync_end - m->crtc_hdisplay)); + + arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC, + ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay, + m->crtc_vsync_end - m->crtc_vdisplay)); + + arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE, + ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start, + m->crtc_vblank_end - m->crtc_vblank_start)); + + val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL); + + if (m->flags & DRM_MODE_FLAG_PVSYNC) + val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST; + else + val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST); + + if (m->flags & DRM_MODE_FLAG_PHSYNC) + val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST; + else + val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST); + + arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val); + arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0); + arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1); + + arc_pgu_set_pxl_fmt(arcpgu); + + clk_set_rate(arcpgu->clk, m->crtc_clock * 1000); +} + +static void arc_pgu_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) +{ + struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe); + + arc_pgu_mode_set(arcpgu); + + clk_prepare_enable(arcpgu->clk); + arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, + arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) | + ARCPGU_CTRL_ENABLE_MASK); +} + +static void arc_pgu_disable(struct drm_simple_display_pipe *pipe) +{ + struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe); + + clk_disable_unprepare(arcpgu->clk); + arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, + arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) & + ~ARCPGU_CTRL_ENABLE_MASK); +} + +static void arc_pgu_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *state) +{ + struct arcpgu_drm_private *arcpgu; + struct drm_gem_cma_object *gem; + + if (!pipe->plane.state->fb) + return; + + arcpgu = pipe_to_arcpgu_priv(pipe); + gem = drm_fb_cma_get_gem_obj(pipe->plane.state->fb, 0); + arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr); +} + +static const struct drm_simple_display_pipe_funcs arc_pgu_pipe_funcs = { + .update = arc_pgu_update, + .mode_valid = arc_pgu_mode_valid, + .enable = arc_pgu_enable, + .disable = arc_pgu_disable, +}; + +static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops); + +static int arcpgu_load(struct arcpgu_drm_private *arcpgu) +{ + struct platform_device *pdev = to_platform_device(arcpgu->drm.dev); + struct device_node *encoder_node = NULL, *endpoint_node = NULL; + struct drm_connector *connector = NULL; + struct drm_device *drm = &arcpgu->drm; + struct resource *res; + int ret; + + arcpgu->clk = devm_clk_get(drm->dev, "pxlclk"); + if (IS_ERR(arcpgu->clk)) + return PTR_ERR(arcpgu->clk); + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + drm->mode_config.max_width = 1920; + drm->mode_config.max_height = 1080; + drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + arcpgu->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(arcpgu->regs)) + return PTR_ERR(arcpgu->regs); + + dev_info(drm->dev, "arc_pgu ID: 0x%x\n", + arc_pgu_read(arcpgu, ARCPGU_REG_ID)); + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(drm->dev); + if (ret && ret != -ENODEV) + return ret; + + if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32))) + return -ENODEV; + + /* + * There is only one output port inside each device. It is linked with + * encoder endpoint. + */ + endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (endpoint_node) { + encoder_node = of_graph_get_remote_port_parent(endpoint_node); + of_node_put(endpoint_node); + } else { + connector = &arcpgu->sim_conn; + dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n"); + ret = arcpgu_drm_sim_init(drm, connector); + if (ret < 0) + return ret; + } + + ret = drm_simple_display_pipe_init(drm, &arcpgu->pipe, &arc_pgu_pipe_funcs, + arc_pgu_supported_formats, + ARRAY_SIZE(arc_pgu_supported_formats), + NULL, connector); + if (ret) + return ret; + + if (encoder_node) { + struct drm_bridge *bridge; + + /* Locate drm bridge from the hdmi encoder DT node */ + bridge = of_drm_find_bridge(encoder_node); + if (!bridge) + return -EPROBE_DEFER; + + ret = drm_simple_display_pipe_attach_bridge(&arcpgu->pipe, bridge); + if (ret) + return ret; + } + + drm_mode_config_reset(drm); + drm_kms_helper_poll_init(drm); + + platform_set_drvdata(pdev, drm); + return 0; +} + +static int arcpgu_unload(struct drm_device *drm) +{ + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static int arcpgu_show_pxlclock(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *drm = node->minor->dev; + struct arcpgu_drm_private *arcpgu = dev_to_arcpgu(drm); + unsigned long clkrate = clk_get_rate(arcpgu->clk); + unsigned long mode_clock = arcpgu->pipe.crtc.mode.crtc_clock * 1000; + + seq_printf(m, "hw : %lu\n", clkrate); + seq_printf(m, "mode: %lu\n", mode_clock); + return 0; +} + +static struct drm_info_list arcpgu_debugfs_list[] = { + { "clocks", arcpgu_show_pxlclock, 0 }, +}; + +static void arcpgu_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(arcpgu_debugfs_list, + ARRAY_SIZE(arcpgu_debugfs_list), + minor->debugfs_root, minor); +} +#endif + +static const struct drm_driver arcpgu_drm_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + .name = "arcpgu", + .desc = "ARC PGU Controller", + .date = "20160219", + .major = 1, + .minor = 0, + .patchlevel = 0, + .fops = &arcpgu_drm_ops, + DRM_GEM_CMA_DRIVER_OPS, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = arcpgu_debugfs_init, +#endif +}; + +static int arcpgu_probe(struct platform_device *pdev) +{ + struct arcpgu_drm_private *arcpgu; + int ret; + + arcpgu = devm_drm_dev_alloc(&pdev->dev, &arcpgu_drm_driver, + struct arcpgu_drm_private, drm); + if (IS_ERR(arcpgu)) + return PTR_ERR(arcpgu); + + ret = arcpgu_load(arcpgu); + if (ret) + return ret; + + ret = drm_dev_register(&arcpgu->drm, 0); + if (ret) + goto err_unload; + + drm_fbdev_generic_setup(&arcpgu->drm, 16); + + return 0; + +err_unload: + arcpgu_unload(&arcpgu->drm); + + return ret; +} + +static int arcpgu_remove(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + drm_dev_unregister(drm); + arcpgu_unload(drm); + + return 0; +} + +static const struct of_device_id arcpgu_of_table[] = { + {.compatible = "snps,arcpgu"}, + {} +}; + +MODULE_DEVICE_TABLE(of, arcpgu_of_table); + +static struct platform_driver arcpgu_platform_driver = { + .probe = arcpgu_probe, + .remove = arcpgu_remove, + .driver = { + .name = "arcpgu", + .of_match_table = arcpgu_of_table, + }, +}; + +module_platform_driver(arcpgu_platform_driver); + +MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>"); +MODULE_DESCRIPTION("ARC PGU DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index a043e602199e..ad922c3ec681 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -33,8 +33,9 @@ #include <drm/drm_file.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> @@ -311,22 +312,15 @@ static int cirrus_mode_set(struct cirrus_device *cirrus, return 0; } -static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, +static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_map *map, struct drm_rect *rect) { struct cirrus_device *cirrus = to_cirrus(fb->dev); - struct dma_buf_map map; - void *vmap; - int idx, ret; + void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ + int idx; - ret = -ENODEV; if (!drm_dev_enter(&cirrus->dev, &idx)) - goto out; - - ret = drm_gem_shmem_vmap(fb->obj[0], &map); - if (ret) - goto out_dev_exit; - vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ + return -ENODEV; if (cirrus->cpp == fb->format->cpp[0]) drm_fb_memcpy_dstclip(cirrus->vram, @@ -345,16 +339,12 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, else WARN_ON_ONCE("cpp mismatch"); - drm_gem_shmem_vunmap(fb->obj[0], &map); - ret = 0; - -out_dev_exit: drm_dev_exit(idx); -out: - return ret; + + return 0; } -static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb) +static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map) { struct drm_rect fullscreen = { .x1 = 0, @@ -362,7 +352,7 @@ static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb) .y1 = 0, .y2 = fb->height, }; - return cirrus_fb_blit_rect(fb, &fullscreen); + return cirrus_fb_blit_rect(fb, map, &fullscreen); } static int cirrus_check_size(int width, int height, @@ -441,9 +431,10 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state) { struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb); - cirrus_fb_blit_fullscreen(plane_state->fb); + cirrus_fb_blit_fullscreen(plane_state->fb, &shadow_plane_state->map[0]); } static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe, @@ -451,16 +442,15 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe, { struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev); struct drm_plane_state *state = pipe->plane.state; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_crtc *crtc = &pipe->crtc; struct drm_rect rect; - if (pipe->plane.state->fb && - cirrus->cpp != cirrus_cpp(pipe->plane.state->fb)) - cirrus_mode_set(cirrus, &crtc->mode, - pipe->plane.state->fb); + if (state->fb && cirrus->cpp != cirrus_cpp(state->fb)) + cirrus_mode_set(cirrus, &crtc->mode, state->fb); if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - cirrus_fb_blit_rect(pipe->plane.state->fb, &rect); + cirrus_fb_blit_rect(state->fb, &shadow_plane_state->map[0], &rect); } static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = { @@ -468,6 +458,7 @@ static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = { .check = cirrus_pipe_check, .enable = cirrus_pipe_enable, .update = cirrus_pipe_update, + DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, }; static const uint32_t cirrus_formats[] = { diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 33f65f4626e5..a233c86d428b 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -16,8 +16,9 @@ #include <drm/drm_file.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> @@ -83,6 +84,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); struct gm12u320_device { struct drm_device dev; + struct device *dmadev; struct drm_simple_display_pipe pipe; struct drm_connector conn; unsigned char *cmd_buf; @@ -94,6 +96,7 @@ struct gm12u320_device { struct drm_rect rect; int frame; int draw_status_timeout; + struct dma_buf_map src_map; } fb_update; }; @@ -250,7 +253,6 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) { int block, dst_offset, len, remain, ret, x1, x2, y1, y2; struct drm_framebuffer *fb; - struct dma_buf_map map; void *vaddr; u8 *src; @@ -264,20 +266,14 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) x2 = gm12u320->fb_update.rect.x2; y1 = gm12u320->fb_update.rect.y1; y2 = gm12u320->fb_update.rect.y2; - - ret = drm_gem_shmem_vmap(fb->obj[0], &map); - if (ret) { - GM12U320_ERR("failed to vmap fb: %d\n", ret); - goto put_fb; - } - vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ + vaddr = gm12u320->fb_update.src_map.vaddr; /* TODO: Use mapping abstraction properly */ if (fb->obj[0]->import_attach) { ret = dma_buf_begin_cpu_access( fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE); if (ret) { GM12U320_ERR("dma_buf_begin_cpu_access err: %d\n", ret); - goto vunmap; + goto put_fb; } } @@ -321,8 +317,6 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) if (ret) GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret); } -vunmap: - drm_gem_shmem_vunmap(fb->obj[0], &map); put_fb: drm_framebuffer_put(fb); gm12u320->fb_update.fb = NULL; @@ -410,7 +404,7 @@ err: GM12U320_ERR("Frame update error: %d\n", ret); } -static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, +static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, const struct dma_buf_map *map, struct drm_rect *dirty) { struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev); @@ -424,6 +418,7 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, drm_framebuffer_get(fb); gm12u320->fb_update.fb = fb; gm12u320->fb_update.rect = *dirty; + gm12u320->fb_update.src_map = *map; wakeup = true; } else { struct drm_rect *rect = &gm12u320->fb_update.rect; @@ -452,6 +447,7 @@ static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320) mutex_lock(&gm12u320->fb_update.lock); old_fb = gm12u320->fb_update.fb; gm12u320->fb_update.fb = NULL; + dma_buf_map_clear(&gm12u320->fb_update.src_map); mutex_unlock(&gm12u320->fb_update.lock); drm_framebuffer_put(old_fb); @@ -564,9 +560,10 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe, { struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT }; struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT; - gm12u320_fb_mark_dirty(plane_state->fb, &rect); + gm12u320_fb_mark_dirty(plane_state->fb, &shadow_plane_state->map[0], &rect); } static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe) @@ -580,16 +577,18 @@ static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state) { struct drm_plane_state *state = pipe->plane.state; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_rect rect; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect); + gm12u320_fb_mark_dirty(state->fb, &shadow_plane_state->map[0], &rect); } static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = { .enable = gm12u320_pipe_enable, .disable = gm12u320_pipe_disable, .update = gm12u320_pipe_update, + DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, }; static const uint32_t gm12u320_pipe_formats[] = { @@ -601,6 +600,22 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DRM_FORMAT_MOD_INVALID }; +/* + * FIXME: Dma-buf sharing requires DMA support by the importing device. + * This function is a workaround to make USB devices work as well. + * See todo.rst for how to fix the issue in the dma-buf framework. + */ +static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct gm12u320_device *gm12u320 = to_gm12u320(dev); + + if (!gm12u320->dmadev) + return ERR_PTR(-ENODEV); + + return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev); +} + DEFINE_DRM_GEM_FOPS(gm12u320_fops); static const struct drm_driver gm12u320_drm_driver = { @@ -614,6 +629,7 @@ static const struct drm_driver gm12u320_drm_driver = { .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, + .gem_prime_import = gm12u320_gem_prime_import, }; static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = { @@ -640,15 +656,18 @@ static int gm12u320_usb_probe(struct usb_interface *interface, struct gm12u320_device, dev); if (IS_ERR(gm12u320)) return PTR_ERR(gm12u320); + dev = &gm12u320->dev; + + gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (!gm12u320->dmadev) + drm_warn(dev, "buffer sharing not supported"); /* not an error */ INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); - dev = &gm12u320->dev; - ret = drmm_mode_config_init(dev); if (ret) - return ret; + goto err_put_device; dev->mode_config.min_width = GM12U320_USER_WIDTH; dev->mode_config.max_width = GM12U320_USER_WIDTH; @@ -658,15 +677,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface, ret = gm12u320_usb_alloc(gm12u320); if (ret) - return ret; + goto err_put_device; ret = gm12u320_set_ecomode(gm12u320); if (ret) - return ret; + goto err_put_device; ret = gm12u320_conn_init(gm12u320); if (ret) - return ret; + goto err_put_device; ret = drm_simple_display_pipe_init(&gm12u320->dev, &gm12u320->pipe, @@ -676,24 +695,31 @@ static int gm12u320_usb_probe(struct usb_interface *interface, gm12u320_pipe_modifiers, &gm12u320->conn); if (ret) - return ret; + goto err_put_device; drm_mode_config_reset(dev); usb_set_intfdata(interface, dev); ret = drm_dev_register(dev, 0); if (ret) - return ret; + goto err_put_device; drm_fbdev_generic_setup(dev, 0); return 0; + +err_put_device: + put_device(gm12u320->dmadev); + return ret; } static void gm12u320_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); + struct gm12u320_device *gm12u320 = to_gm12u320(dev); + put_device(gm12u320->dmadev); + gm12u320->dmadev = NULL; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); } diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index c6525cd02bc2..3e2c2868a363 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -19,8 +19,8 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> @@ -184,7 +184,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { .enable = yx240qv29_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode yx350hv15_mode = { diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 8e98962db5a2..6b87df19eec1 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -22,8 +22,8 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_rect.h> @@ -328,7 +328,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = { .enable = ili9225_pipe_enable, .disable = ili9225_pipe_disable, .update = ili9225_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode ili9225_mode = { diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index 6ce97f0698eb..a97f3f70e4a6 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -18,8 +18,8 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> @@ -140,7 +140,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { .enable = yx240qv29_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode yx240qv29_mode = { diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index d7ce40eb166a..6422a7f67079 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -17,8 +17,8 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> @@ -153,7 +153,7 @@ static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = { .enable = waveshare_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode waveshare_mode = { diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index ff77f983f803..dc76fe53aa72 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -16,8 +16,8 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modeset_helper.h> @@ -144,7 +144,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { .enable = mi0283qt_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode mi0283qt_mode = { diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 11c602fc9897..2cee07a2e00b 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -29,6 +29,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_format_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> @@ -860,7 +861,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { .enable = repaper_pipe_enable, .disable = repaper_pipe_disable, .update = repaper_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static int repaper_connector_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index ff5cf60f4bd7..7d216fe9267f 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -19,8 +19,8 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_format_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_rect.h> @@ -268,7 +268,7 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { .enable = st7586_pipe_enable, .disable = st7586_pipe_disable, .update = st7586_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct drm_display_mode st7586_mode = { diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index faaba0a033ea..df8872d62cdd 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -19,8 +19,8 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mipi_dbi.h> @@ -136,7 +136,7 @@ static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = { .enable = st7735r_pipe_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, }; static const struct st7735r_cfg jd_t18003_t01_cfg = { diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b6f5f87b270f..40e5e9da7953 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -2,10 +2,9 @@ # # Makefile for the drm device driver. This driver provides support for the -ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ - ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_range_manager.o \ - ttm_resource.o ttm_pool.o +ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ + ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \ + ttm_device.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 8f9fa4188897..0226ae69d3ab 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -49,7 +49,7 @@ struct ttm_agp_backend { int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); - struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; + struct page *dummy_read_page = ttm_glob.dummy_read_page; struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem; int ret, cached = ttm->caching == ttm_cached; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b65f4b12f986..3c23e863a3f0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -44,21 +44,6 @@ #include "ttm_module.h" -static void ttm_bo_global_kobj_release(struct kobject *kobj); - -/* - * ttm_global_mutex - protecting the global BO state - */ -DEFINE_MUTEX(ttm_global_mutex); -unsigned ttm_bo_glob_use_count; -struct ttm_bo_global ttm_bo_glob; -EXPORT_SYMBOL(ttm_bo_glob); - -static struct attribute ttm_bo_count = { - .name = "bo_count", - .mode = S_IRUGO -}; - /* default destructor */ static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) { @@ -84,41 +69,15 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, } } -static ssize_t ttm_bo_global_show(struct kobject *kobj, - struct attribute *attr, - char *buffer) -{ - struct ttm_bo_global *glob = - container_of(kobj, struct ttm_bo_global, kobj); - - return snprintf(buffer, PAGE_SIZE, "%d\n", - atomic_read(&glob->bo_count)); -} - -static struct attribute *ttm_bo_global_attrs[] = { - &ttm_bo_count, - NULL -}; - -static const struct sysfs_ops ttm_bo_global_ops = { - .show = &ttm_bo_global_show -}; - -static struct kobj_type ttm_bo_glob_kobj_type = { - .release = &ttm_bo_global_kobj_release, - .sysfs_ops = &ttm_bo_global_ops, - .default_attrs = ttm_bo_global_attrs -}; - static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; list_del_init(&bo->swap); list_del_init(&bo->lru); - if (bdev->driver->del_from_lru_notify) - bdev->driver->del_from_lru_notify(bo); + if (bdev->funcs->del_from_lru_notify) + bdev->funcs->del_from_lru_notify(bo); } static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, @@ -133,10 +92,11 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct ttm_lru_bulk_move *bulk) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *man; - dma_resv_assert_held(bo->base.resv); + if (!bo->deleted) + dma_resv_assert_held(bo->base.resv); if (bo->pin_count) { ttm_bo_del_from_lru(bo); @@ -150,12 +110,14 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, TTM_PAGE_FLAG_SWAPPED))) { struct list_head *swap; - swap = &ttm_bo_glob.swap_lru[bo->priority]; + swap = &ttm_glob.swap_lru[bo->priority]; list_move_tail(&bo->swap, swap); + } else { + list_del_init(&bo->swap); } - if (bdev->driver->del_from_lru_notify) - bdev->driver->del_from_lru_notify(bo); + if (bdev->funcs->del_from_lru_notify) + bdev->funcs->del_from_lru_notify(bo); if (bulk && !bo->pin_count) { switch (bo->mem.mem_type) { @@ -218,7 +180,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - lru = &ttm_bo_glob.swap_lru[i]; + lru = &ttm_glob.swap_lru[i]; list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); } } @@ -229,7 +191,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_place *hop) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; @@ -255,7 +217,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } } - ret = bdev->driver->move(bo, evict, ctx, mem, hop); + ret = bdev->funcs->move(bo, evict, ctx, mem, hop); if (ret) { if (ret == -EMULTIHOP) return ret; @@ -283,8 +245,8 @@ out_err: static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { - if (bo->bdev->driver->delete_mem_notify) - bo->bdev->driver->delete_mem_notify(bo); + if (bo->bdev->funcs->delete_mem_notify) + bo->bdev->funcs->delete_mem_notify(bo); ttm_bo_tt_destroy(bo); ttm_resource_free(bo, &bo->mem); @@ -309,9 +271,9 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) * reference it any more. The only tricky case is the trylock on * the resv object while holding the lru_lock. */ - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); bo->base.resv = &bo->base._resv; - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); } return r; @@ -370,7 +332,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (unlock_resv) dma_resv_unlock(bo->base.resv); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, 30 * HZ); @@ -380,7 +342,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, else if (lret == 0) return -EBUSY; - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { /* * We raced, and lost, someone else holds the reservation now, @@ -390,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, * delayed destruction would succeed, so just return success * here. */ - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); return 0; } ret = 0; @@ -399,13 +361,13 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (ret || unlikely(list_empty(&bo->ddestroy))) { if (unlock_resv) dma_resv_unlock(bo->base.resv); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); return ret; } ttm_bo_del_from_lru(bo); list_del_init(&bo->ddestroy); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); ttm_bo_cleanup_memtype_use(bo); if (unlock_resv) @@ -420,9 +382,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, * Traverse the delayed list, and call ttm_bo_cleanup_refs on all * encountered buffers. */ -static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) +bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all) { - struct ttm_bo_global *glob = &ttm_bo_glob; + struct ttm_global *glob = &ttm_glob; struct list_head removed; bool empty; @@ -461,22 +423,11 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) return empty; } -static void ttm_bo_delayed_workqueue(struct work_struct *work) -{ - struct ttm_bo_device *bdev = - container_of(work, struct ttm_bo_device, wq.work); - - if (!ttm_bo_delayed_delete(bdev, false)) - schedule_delayed_work(&bdev->wq, - ((HZ / 100) < 1) ? 1 : HZ / 100); -} - static void ttm_bo_release(struct kref *kref) { struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); - struct ttm_bo_device *bdev = bo->bdev; - size_t acc_size = bo->acc_size; + struct ttm_device *bdev = bo->bdev; int ret; if (!bo->deleted) { @@ -489,8 +440,8 @@ static void ttm_bo_release(struct kref *kref) 30 * HZ); } - if (bo->bdev->driver->release_notify) - bo->bdev->driver->release_notify(bo); + if (bo->bdev->funcs->release_notify) + bo->bdev->funcs->release_notify(bo); drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); ttm_mem_io_free(bdev, &bo->mem); @@ -502,41 +453,43 @@ static void ttm_bo_release(struct kref *kref) ttm_bo_flush_all_fences(bo); bo->deleted = true; - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); /* * Make pinned bos immediately available to * shrinkers, now that they are queued for * destruction. + * + * FIXME: QXL is triggering this. Can be removed when the + * driver is fixed. */ - if (WARN_ON(bo->pin_count)) { + if (WARN_ON_ONCE(bo->pin_count)) { bo->pin_count = 0; ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); } kref_init(&bo->kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); schedule_delayed_work(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); return; } - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); ttm_bo_del_from_lru(bo); list_del(&bo->ddestroy); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); ttm_bo_cleanup_memtype_use(bo); dma_resv_unlock(bo->base.resv); - atomic_dec(&ttm_bo_glob.bo_count); + atomic_dec(&ttm_glob.bo_count); dma_fence_put(bo->moving); if (!ttm_bo_uses_embedded_gem_object(bo)) dma_resv_fini(&bo->base._resv); bo->destroy(bo); - ttm_mem_global_free(&ttm_mem_glob, acc_size); } void ttm_bo_put(struct ttm_buffer_object *bo) @@ -545,13 +498,13 @@ void ttm_bo_put(struct ttm_buffer_object *bo) } EXPORT_SYMBOL(ttm_bo_put); -int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) +int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev) { return cancel_delayed_work_sync(&bdev->wq); } EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); -void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) +void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched) { if (resched) schedule_delayed_work(&bdev->wq, @@ -562,7 +515,7 @@ EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource evict_mem; struct ttm_placement placement; struct ttm_place hop; @@ -574,7 +527,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, placement.num_placement = 0; placement.num_busy_placement = 0; - bdev->driver->evict_flags(bo, &placement); + bdev->funcs->evict_flags(bo, &placement); if (!placement.num_placement && !placement.num_busy_placement) { ttm_bo_wait(bo, false, false); @@ -690,7 +643,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo, return r == -EDEADLK ? -EBUSY : r; } -int ttm_mem_evict_first(struct ttm_bo_device *bdev, +int ttm_mem_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_operation_ctx *ctx, @@ -701,7 +654,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev, unsigned i; int ret; - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { list_for_each_entry(bo, &man->lru[i], lru) { bool busy; @@ -714,7 +667,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev, continue; } - if (place && !bdev->driver->eviction_valuable(bo, + if (place && !bdev->funcs->eviction_valuable(bo, place)) { if (locked) dma_resv_unlock(bo->base.resv); @@ -738,7 +691,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev, if (!bo) { if (busy_bo && !ttm_bo_get_unless_zero(busy_bo)) busy_bo = NULL; - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); if (busy_bo) ttm_bo_put(busy_bo); @@ -752,7 +705,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); ret = ttm_bo_evict(bo, ctx); if (locked) @@ -807,7 +760,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); struct ww_acquire_ctx *ticket; int ret; @@ -842,7 +795,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource *mem) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *man; man = ttm_manager_type(bdev, place->mem_type); @@ -852,9 +805,9 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, mem->mem_type = place->mem_type; mem->placement = place->flags; - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); ttm_bo_move_to_lru_tail(bo, mem, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); return 0; } @@ -872,7 +825,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct ttm_operation_ctx *ctx) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; bool type_found = false; int i, ret; @@ -959,8 +912,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, return ret; /* move to the bounce domain */ ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL); - if (ret) + if (ret) { + ttm_resource_free(bo, &hop_mem); return ret; + } return 0; } @@ -991,18 +946,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * stop and the driver will be called to make * the second hop. */ -bounce: ret = ttm_bo_mem_space(bo, placement, &mem, ctx); if (ret) return ret; +bounce: ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop); if (ret == -EMULTIHOP) { ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop); if (ret) - return ret; + goto out; /* try and move to final place now. */ goto bounce; } +out: if (ret) ttm_resource_free(bo, &mem); return ret; @@ -1090,32 +1046,20 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_validate); -int ttm_bo_init_reserved(struct ttm_bo_device *bdev, +int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, size_t size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, struct ttm_operation_ctx *ctx, - size_t acc_size, struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)) { - struct ttm_mem_global *mem_glob = &ttm_mem_glob; bool locked; int ret = 0; - ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); - if (ret) { - pr_err("Out of kernel memory\n"); - if (destroy) - (*destroy)(bo); - else - kfree(bo); - return -ENOMEM; - } - bo->destroy = destroy ? destroy : ttm_bo_default_destroy; kref_init(&bo->kref); @@ -1132,7 +1076,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->mem.bus.addr = NULL; bo->moving = NULL; bo->mem.placement = 0; - bo->acc_size = acc_size; bo->pin_count = 0; bo->sg = sg; if (resv) { @@ -1150,7 +1093,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, dma_resv_init(&bo->base._resv); drm_vma_node_reset(&bo->base.vma_node); } - atomic_inc(&ttm_bo_glob.bo_count); + atomic_inc(&ttm_glob.bo_count); /* * For ttm_bo_type_device buffers, allocate @@ -1186,14 +1129,13 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_init_reserved); -int ttm_bo_init(struct ttm_bo_device *bdev, +int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo, size_t size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, bool interruptible, - size_t acc_size, struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)) @@ -1202,8 +1144,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, int ret; ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, - page_alignment, &ctx, acc_size, - sg, resv, destroy); + page_alignment, &ctx, sg, resv, destroy); if (ret) return ret; @@ -1214,171 +1155,13 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_init); -size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, - unsigned long bo_size, - unsigned struct_size) -{ - unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; - size_t size = 0; - - size += ttm_round_pot(struct_size); - size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); - size += ttm_round_pot(sizeof(struct ttm_tt)); - return size; -} -EXPORT_SYMBOL(ttm_bo_dma_acc_size); - -static void ttm_bo_global_kobj_release(struct kobject *kobj) -{ - struct ttm_bo_global *glob = - container_of(kobj, struct ttm_bo_global, kobj); - - __free_page(glob->dummy_read_page); -} - -static void ttm_bo_global_release(void) -{ - struct ttm_bo_global *glob = &ttm_bo_glob; - - mutex_lock(&ttm_global_mutex); - if (--ttm_bo_glob_use_count > 0) - goto out; - - kobject_del(&glob->kobj); - kobject_put(&glob->kobj); - ttm_mem_global_release(&ttm_mem_glob); - memset(glob, 0, sizeof(*glob)); -out: - mutex_unlock(&ttm_global_mutex); -} - -static int ttm_bo_global_init(void) -{ - struct ttm_bo_global *glob = &ttm_bo_glob; - int ret = 0; - unsigned i; - - mutex_lock(&ttm_global_mutex); - if (++ttm_bo_glob_use_count > 1) - goto out; - - ret = ttm_mem_global_init(&ttm_mem_glob); - if (ret) - goto out; - - spin_lock_init(&glob->lru_lock); - glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); - - if (unlikely(glob->dummy_read_page == NULL)) { - ret = -ENOMEM; - goto out; - } - - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - INIT_LIST_HEAD(&glob->swap_lru[i]); - INIT_LIST_HEAD(&glob->device_list); - atomic_set(&glob->bo_count, 0); - - ret = kobject_init_and_add( - &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); - if (unlikely(ret != 0)) - kobject_put(&glob->kobj); -out: - mutex_unlock(&ttm_global_mutex); - return ret; -} - -int ttm_bo_device_release(struct ttm_bo_device *bdev) -{ - struct ttm_bo_global *glob = &ttm_bo_glob; - int ret = 0; - unsigned i; - struct ttm_resource_manager *man; - - man = ttm_manager_type(bdev, TTM_PL_SYSTEM); - ttm_resource_manager_set_used(man, false); - ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); - - mutex_lock(&ttm_global_mutex); - list_del(&bdev->device_list); - mutex_unlock(&ttm_global_mutex); - - cancel_delayed_work_sync(&bdev->wq); - - if (ttm_bo_delayed_delete(bdev, true)) - pr_debug("Delayed destroy list was clean\n"); - - spin_lock(&glob->lru_lock); - for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) - if (list_empty(&man->lru[0])) - pr_debug("Swap list %d was clean\n", i); - spin_unlock(&glob->lru_lock); - - ttm_pool_fini(&bdev->pool); - - if (!ret) - ttm_bo_global_release(); - - return ret; -} -EXPORT_SYMBOL(ttm_bo_device_release); - -static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) -{ - struct ttm_resource_manager *man = &bdev->sysman; - - /* - * Initialize the system memory buffer type. - * Other types need to be driver / IOCTL initialized. - */ - man->use_tt = true; - - ttm_resource_manager_init(man, 0); - ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); - ttm_resource_manager_set_used(man, true); -} - -int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_bo_driver *driver, - struct device *dev, - struct address_space *mapping, - struct drm_vma_offset_manager *vma_manager, - bool use_dma_alloc, bool use_dma32) -{ - struct ttm_bo_global *glob = &ttm_bo_glob; - int ret; - - if (WARN_ON(vma_manager == NULL)) - return -EINVAL; - - ret = ttm_bo_global_init(); - if (ret) - return ret; - - bdev->driver = driver; - - ttm_bo_init_sysman(bdev); - ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32); - - bdev->vma_manager = vma_manager; - INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); - INIT_LIST_HEAD(&bdev->ddestroy); - bdev->dev_mapping = mapping; - mutex_lock(&ttm_global_mutex); - list_add_tail(&bdev->device_list, &glob->device_list); - mutex_unlock(&ttm_global_mutex); - - return 0; -} -EXPORT_SYMBOL(ttm_bo_device_init); - /* * buffer object vm functions. */ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); ttm_mem_io_free(bdev, &bo->mem); @@ -1414,9 +1197,9 @@ EXPORT_SYMBOL(ttm_bo_wait); * A buffer object shrink method that tries to swap out the first * buffer object on the bo_global::swap_lru list. */ -int ttm_bo_swapout(struct ttm_operation_ctx *ctx) +int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags) { - struct ttm_bo_global *glob = &ttm_bo_glob; + struct ttm_global *glob = &ttm_glob; struct ttm_buffer_object *bo; int ret = -EBUSY; bool locked; @@ -1494,10 +1277,10 @@ int ttm_bo_swapout(struct ttm_operation_ctx *ctx) * anyone tries to access a ttm page. */ - if (bo->bdev->driver->swap_notify) - bo->bdev->driver->swap_notify(bo); + if (bo->bdev->funcs->swap_notify) + bo->bdev->funcs->swap_notify(bo); - ret = ttm_tt_swapout(bo->bdev, bo->ttm); + ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags); out: /** @@ -1520,4 +1303,3 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->bdev, bo->ttm); bo->ttm = NULL; } - diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 398d5013fc39..031e5819fec4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -46,33 +46,33 @@ struct ttm_transfer_obj { struct ttm_buffer_object *bo; }; -int ttm_mem_io_reserve(struct ttm_bo_device *bdev, +int ttm_mem_io_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { if (mem->bus.offset || mem->bus.addr) return 0; mem->bus.is_iomem = false; - if (!bdev->driver->io_mem_reserve) + if (!bdev->funcs->io_mem_reserve) return 0; - return bdev->driver->io_mem_reserve(bdev, mem); + return bdev->funcs->io_mem_reserve(bdev, mem); } -void ttm_mem_io_free(struct ttm_bo_device *bdev, +void ttm_mem_io_free(struct ttm_device *bdev, struct ttm_resource *mem) { if (!mem->bus.offset && !mem->bus.addr) return; - if (bdev->driver->io_mem_free) - bdev->driver->io_mem_free(bdev, mem); + if (bdev->funcs->io_mem_free) + bdev->funcs->io_mem_free(bdev, mem); mem->bus.offset = 0; mem->bus.addr = NULL; } -static int ttm_resource_ioremap(struct ttm_bo_device *bdev, +static int ttm_resource_ioremap(struct ttm_device *bdev, struct ttm_resource *mem, void **virtual) { @@ -102,7 +102,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, return 0; } -static void ttm_resource_iounmap(struct ttm_bo_device *bdev, +static void ttm_resource_iounmap(struct ttm_device *bdev, struct ttm_resource *mem, void *virtual) { @@ -172,7 +172,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_tt *ttm = bo->ttm; struct ttm_resource *old_mem = &bo->mem; @@ -300,7 +300,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - atomic_inc(&ttm_bo_glob.bo_count); + atomic_inc(&ttm_glob.bo_count); INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); @@ -309,7 +309,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, kref_init(&fbo->base.kref); fbo->base.destroy = &ttm_transfered_destroy; - fbo->base.acc_size = 0; fbo->base.pin_count = 0; if (bo->type != ttm_bo_type_sg) fbo->base.base.resv = &fbo->base.base._resv; @@ -602,7 +601,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, struct dma_fence *fence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); /** @@ -628,7 +627,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bool pipeline, struct ttm_resource *new_mem) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); int ret = 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 6dc96cf66744..b31b18058965 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -95,10 +95,10 @@ out_unlock: static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; - if (bdev->driver->io_mem_pfn) - return bdev->driver->io_mem_pfn(bo, page_offset); + if (bdev->funcs->io_mem_pfn) + return bdev->funcs->io_mem_pfn(bo, page_offset); return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; } @@ -216,7 +216,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) goto out_fallback; } - } else if (bo->bdev->driver->io_mem_pfn) { + } else if (bo->bdev->funcs->io_mem_pfn) { for (i = 1; i < fault_page_size; ++i) { if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) goto out_fallback; @@ -278,7 +278,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = vma->vm_private_data; - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; unsigned long page_offset; unsigned long page_last; unsigned long pfn; @@ -488,8 +488,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); break; default: - if (bo->bdev->driver->access_memory) - ret = bo->bdev->driver->access_memory( + if (bo->bdev->funcs->access_memory) + ret = bo->bdev->funcs->access_memory( bo, offset, buf, len, write); else ret = -EIO; @@ -508,7 +508,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { .access = ttm_bo_vm_access, }; -static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, +static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_device *bdev, unsigned long offset, unsigned long pages) { @@ -555,9 +555,8 @@ static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_s } int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_bo_device *bdev) + struct ttm_device *bdev) { - struct ttm_bo_driver *driver; struct ttm_buffer_object *bo; int ret; @@ -568,12 +567,11 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, if (unlikely(!bo)) return -EINVAL; - driver = bo->bdev->driver; - if (unlikely(!driver->verify_access)) { + if (unlikely(!bo->bdev->funcs->verify_access)) { ret = -EPERM; goto out_unref; } - ret = driver->verify_access(bo, filp); + ret = bo->bdev->funcs->verify_access(bo, filp); if (unlikely(ret != 0)) goto out_unref; diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c new file mode 100644 index 000000000000..95e1b7b1f2e6 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +/* + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#define pr_fmt(fmt) "[TTM DEVICE] " fmt + +#include <linux/mm.h> + +#include <drm/ttm/ttm_device.h> +#include <drm/ttm/ttm_tt.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_bo_api.h> + +#include "ttm_module.h" + +/** + * ttm_global_mutex - protecting the global state + */ +DEFINE_MUTEX(ttm_global_mutex); +unsigned ttm_glob_use_count; +struct ttm_global ttm_glob; +EXPORT_SYMBOL(ttm_glob); + +static void ttm_global_release(void) +{ + struct ttm_global *glob = &ttm_glob; + + mutex_lock(&ttm_global_mutex); + if (--ttm_glob_use_count > 0) + goto out; + + ttm_pool_mgr_fini(); + ttm_tt_mgr_fini(); + + __free_page(glob->dummy_read_page); + memset(glob, 0, sizeof(*glob)); +out: + mutex_unlock(&ttm_global_mutex); +} + +static int ttm_global_init(void) +{ + struct ttm_global *glob = &ttm_glob; + unsigned long num_pages; + struct sysinfo si; + int ret = 0; + unsigned i; + + mutex_lock(&ttm_global_mutex); + if (++ttm_glob_use_count > 1) + goto out; + + si_meminfo(&si); + + /* Limit the number of pages in the pool to about 50% of the total + * system memory. + */ + num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT; + ttm_pool_mgr_init(num_pages * 50 / 100); + ttm_tt_mgr_init(); + + spin_lock_init(&glob->lru_lock); + glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); + + if (unlikely(glob->dummy_read_page == NULL)) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&glob->swap_lru[i]); + INIT_LIST_HEAD(&glob->device_list); + atomic_set(&glob->bo_count, 0); + + debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root, + &glob->bo_count); +out: + mutex_unlock(&ttm_global_mutex); + return ret; +} + +static void ttm_init_sysman(struct ttm_device *bdev) +{ + struct ttm_resource_manager *man = &bdev->sysman; + + /* + * Initialize the system memory buffer type. + * Other types need to be driver / IOCTL initialized. + */ + man->use_tt = true; + + ttm_resource_manager_init(man, 0); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); + ttm_resource_manager_set_used(man, true); +} + +static void ttm_device_delayed_workqueue(struct work_struct *work) +{ + struct ttm_device *bdev = + container_of(work, struct ttm_device, wq.work); + + if (!ttm_bo_delayed_delete(bdev, false)) + schedule_delayed_work(&bdev->wq, + ((HZ / 100) < 1) ? 1 : HZ / 100); +} + +/** + * ttm_device_init + * + * @bdev: A pointer to a struct ttm_device to initialize. + * @funcs: Function table for the device. + * @dev: The core kernel device pointer for DMA mappings and allocations. + * @mapping: The address space to use for this bo. + * @vma_manager: A pointer to a vma manager. + * @use_dma_alloc: If coherent DMA allocation API should be used. + * @use_dma32: If we should use GFP_DMA32 for device memory allocations. + * + * Initializes a struct ttm_device: + * Returns: + * !0: Failure. + */ +int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, + struct device *dev, struct address_space *mapping, + struct drm_vma_offset_manager *vma_manager, + bool use_dma_alloc, bool use_dma32) +{ + struct ttm_global *glob = &ttm_glob; + int ret; + + if (WARN_ON(vma_manager == NULL)) + return -EINVAL; + + ret = ttm_global_init(); + if (ret) + return ret; + + bdev->funcs = funcs; + + ttm_init_sysman(bdev); + ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32); + + bdev->vma_manager = vma_manager; + INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue); + INIT_LIST_HEAD(&bdev->ddestroy); + bdev->dev_mapping = mapping; + mutex_lock(&ttm_global_mutex); + list_add_tail(&bdev->device_list, &glob->device_list); + mutex_unlock(&ttm_global_mutex); + + return 0; +} +EXPORT_SYMBOL(ttm_device_init); + +void ttm_device_fini(struct ttm_device *bdev) +{ + struct ttm_global *glob = &ttm_glob; + struct ttm_resource_manager *man; + unsigned i; + + man = ttm_manager_type(bdev, TTM_PL_SYSTEM); + ttm_resource_manager_set_used(man, false); + ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL); + + mutex_lock(&ttm_global_mutex); + list_del(&bdev->device_list); + mutex_unlock(&ttm_global_mutex); + + cancel_delayed_work_sync(&bdev->wq); + + if (ttm_bo_delayed_delete(bdev, true)) + pr_debug("Delayed destroy list was clean\n"); + + spin_lock(&glob->lru_lock); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + if (list_empty(&man->lru[0])) + pr_debug("Swap list %d was clean\n", i); + spin_unlock(&glob->lru_lock); + + ttm_pool_fini(&bdev->pool); + ttm_global_release(); +} +EXPORT_SYMBOL(ttm_device_fini); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 9fa36ed59429..690ab97d52b7 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -51,14 +51,14 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, if (list_empty(list)) return; - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); if (ticket) ww_acquire_fini(ticket); @@ -154,7 +154,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, if (list_empty(list)) return; - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&ttm_glob.lru_lock); list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; @@ -165,7 +165,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&ttm_glob.lru_lock); if (ticket) ww_acquire_fini(ticket); } diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index c0906437cb1c..56b0efdba1a9 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -32,68 +32,22 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/sched.h> +#include <linux/debugfs.h> #include <drm/drm_sysfs.h> #include "ttm_module.h" -static DECLARE_WAIT_QUEUE_HEAD(exit_q); -static atomic_t device_released; - -static struct device_type ttm_drm_class_type = { - .name = "ttm", - /** - * Add pm ops here. - */ -}; - -static void ttm_drm_class_device_release(struct device *dev) -{ - atomic_set(&device_released, 1); - wake_up_all(&exit_q); -} - -static struct device ttm_drm_class_device = { - .type = &ttm_drm_class_type, - .release = &ttm_drm_class_device_release -}; - -struct kobject *ttm_get_kobj(void) -{ - struct kobject *kobj = &ttm_drm_class_device.kobj; - BUG_ON(kobj == NULL); - return kobj; -} +struct dentry *ttm_debugfs_root; static int __init ttm_init(void) { - int ret; - - ret = dev_set_name(&ttm_drm_class_device, "ttm"); - if (unlikely(ret != 0)) - return ret; - - atomic_set(&device_released, 0); - ret = drm_class_device_register(&ttm_drm_class_device); - if (unlikely(ret != 0)) - goto out_no_dev_reg; - + ttm_debugfs_root = debugfs_create_dir("ttm", NULL); return 0; -out_no_dev_reg: - atomic_set(&device_released, 1); - wake_up_all(&exit_q); - return ret; } static void __exit ttm_exit(void) { - drm_class_device_unregister(&ttm_drm_class_device); - - /** - * Refuse to unload until the TTM device is released. - * Not sure this is 100% needed. - */ - - wait_event(exit_q, atomic_read(&device_released) == 1); + debugfs_remove(ttm_debugfs_root); } module_init(ttm_init); diff --git a/drivers/gpu/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h index 45fa318c1585..d7cac5d4b835 100644 --- a/drivers/gpu/drm/ttm/ttm_module.h +++ b/drivers/gpu/drm/ttm/ttm_module.h @@ -31,10 +31,10 @@ #ifndef _TTM_MODULE_H_ #define _TTM_MODULE_H_ -#include <linux/kernel.h> -struct kobject; - #define TTM_PFX "[TTM] " -extern struct kobject *ttm_get_kobj(void); + +struct dentry; + +extern struct dentry *ttm_debugfs_root; #endif /* _TTM_MODULE_H_ */ diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 11e0313db0ea..cb38b1a17b09 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -33,6 +33,8 @@ #include <linux/module.h> #include <linux/dma-mapping.h> +#include <linux/highmem.h> +#include <linux/sched/mm.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -42,6 +44,8 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_tt.h> +#include "ttm_module.h" + /** * struct ttm_pool_dma - Helper object for coherent DMA mappings * @@ -84,7 +88,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, * put_page() on a TTM allocated page is illegal. */ if (order) - gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | + gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; if (!pool->use_dma_alloc) { @@ -218,6 +222,15 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, /* Give pages into a specific pool_type */ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) { + unsigned int i, num_pages = 1 << pt->order; + + for (i = 0; i < num_pages; ++i) { + if (PageHighMem(p)) + clear_highpage(p + i); + else + clear_page(page_address(p + i)); + } + spin_lock(&pt->lock); list_add(&p->lru, &pt->pages); spin_unlock(&pt->lock); @@ -258,13 +271,13 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, /* Remove a pool_type from the global shrinker list and free all pages */ static void ttm_pool_type_fini(struct ttm_pool_type *pt) { - struct page *p, *tmp; + struct page *p; mutex_lock(&shrinker_lock); list_del(&pt->shrinker_list); mutex_unlock(&shrinker_lock); - list_for_each_entry_safe(p, tmp, &pt->pages, lru) + while ((p = ttm_pool_type_take(pt))) ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); } @@ -402,16 +415,10 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, caching = pages + (1 << order); } - r = ttm_mem_global_alloc_page(&ttm_mem_glob, p, - (1 << order) * PAGE_SIZE, - ctx); - if (r) - goto error_free_page; - if (dma_addr) { r = ttm_pool_map(pool, order, p, &dma_addr); if (r) - goto error_global_free; + goto error_free_page; } num_pages -= 1 << order; @@ -425,9 +432,6 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, return 0; -error_global_free: - ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE); - error_free_page: ttm_pool_free_page(pool, tt->caching, order, p); @@ -462,8 +466,6 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) order = ttm_pool_page_order(pool, p); num_pages = 1ULL << order; - ttm_mem_global_free_page(&ttm_mem_glob, p, - num_pages * PAGE_SIZE); if (tt->dma_address) ttm_pool_unmap(pool, tt->dma_address[i], num_pages); @@ -503,10 +505,12 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, pool->use_dma_alloc = use_dma_alloc; pool->use_dma32 = use_dma32; - for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j < MAX_ORDER; ++j) - ttm_pool_type_init(&pool->caching[i].orders[j], - pool, i, j); + if (use_dma_alloc) { + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) + for (j = 0; j < MAX_ORDER; ++j) + ttm_pool_type_init(&pool->caching[i].orders[j], + pool, i, j); + } } /** @@ -521,9 +525,33 @@ void ttm_pool_fini(struct ttm_pool *pool) { unsigned int i, j; - for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j < MAX_ORDER; ++j) - ttm_pool_type_fini(&pool->caching[i].orders[j]); + if (pool->use_dma_alloc) { + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) + for (j = 0; j < MAX_ORDER; ++j) + ttm_pool_type_fini(&pool->caching[i].orders[j]); + } +} + +/* As long as pages are available make sure to release at least one */ +static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + unsigned long num_freed = 0; + + do + num_freed += ttm_pool_shrink(); + while (!num_freed && atomic_long_read(&allocated_pages)); + + return num_freed; +} + +/* Return the number of pages available or SHRINK_EMPTY if we have none */ +static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + unsigned long num_pages = atomic_long_read(&allocated_pages); + + return num_pages ? num_pages : SHRINK_EMPTY; } #ifdef CONFIG_DEBUG_FS @@ -542,6 +570,17 @@ static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) return count; } +/* Print a nice header for the order */ +static void ttm_pool_debugfs_header(struct seq_file *m) +{ + unsigned int i; + + seq_puts(m, "\t "); + for (i = 0; i < MAX_ORDER; ++i) + seq_printf(m, " ---%2u---", i); + seq_puts(m, "\n"); +} + /* Dump information about the different pool types */ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, struct seq_file *m) @@ -553,6 +592,35 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, seq_puts(m, "\n"); } +/* Dump the total amount of allocated pages */ +static void ttm_pool_debugfs_footer(struct seq_file *m) +{ + seq_printf(m, "\ntotal\t: %8lu of %8lu\n", + atomic_long_read(&allocated_pages), page_pool_size); +} + +/* Dump the information for the global pools */ +static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data) +{ + ttm_pool_debugfs_header(m); + + mutex_lock(&shrinker_lock); + seq_puts(m, "wc\t:"); + ttm_pool_debugfs_orders(global_write_combined, m); + seq_puts(m, "uc\t:"); + ttm_pool_debugfs_orders(global_uncached, m); + seq_puts(m, "wc 32\t:"); + ttm_pool_debugfs_orders(global_dma32_write_combined, m); + seq_puts(m, "uc 32\t:"); + ttm_pool_debugfs_orders(global_dma32_uncached, m); + mutex_unlock(&shrinker_lock); + + ttm_pool_debugfs_footer(m); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals); + /** * ttm_pool_debugfs - Debugfs dump function for a pool * @@ -565,23 +633,14 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) { unsigned int i; - mutex_lock(&shrinker_lock); - - seq_puts(m, "\t "); - for (i = 0; i < MAX_ORDER; ++i) - seq_printf(m, " ---%2u---", i); - seq_puts(m, "\n"); - - seq_puts(m, "wc\t:"); - ttm_pool_debugfs_orders(global_write_combined, m); - seq_puts(m, "uc\t:"); - ttm_pool_debugfs_orders(global_uncached, m); + if (!pool->use_dma_alloc) { + seq_puts(m, "unused\n"); + return 0; + } - seq_puts(m, "wc 32\t:"); - ttm_pool_debugfs_orders(global_dma32_write_combined, m); - seq_puts(m, "uc 32\t:"); - ttm_pool_debugfs_orders(global_dma32_uncached, m); + ttm_pool_debugfs_header(m); + mutex_lock(&shrinker_lock); for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { seq_puts(m, "DMA "); switch (i) { @@ -597,39 +656,28 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) } ttm_pool_debugfs_orders(pool->caching[i].orders, m); } - - seq_printf(m, "\ntotal\t: %8lu of %8lu\n", - atomic_long_read(&allocated_pages), page_pool_size); - mutex_unlock(&shrinker_lock); + ttm_pool_debugfs_footer(m); return 0; } EXPORT_SYMBOL(ttm_pool_debugfs); -#endif - -/* As long as pages are available make sure to release at least one */ -static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, - struct shrink_control *sc) +/* Test the shrinker functions and dump the result */ +static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data) { - unsigned long num_freed = 0; + struct shrink_control sc = { .gfp_mask = GFP_NOFS }; - do - num_freed += ttm_pool_shrink(); - while (!num_freed && atomic_long_read(&allocated_pages)); + fs_reclaim_acquire(GFP_KERNEL); + seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc), + ttm_pool_shrinker_scan(&mm_shrinker, &sc)); + fs_reclaim_release(GFP_KERNEL); - return num_freed; + return 0; } +DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink); -/* Return the number of pages available or SHRINK_EMPTY if we have none */ -static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, - struct shrink_control *sc) -{ - unsigned long num_pages = atomic_long_read(&allocated_pages); - - return num_pages ? num_pages : SHRINK_EMPTY; -} +#endif /** * ttm_pool_mgr_init - Initialize globals @@ -659,6 +707,13 @@ int ttm_pool_mgr_init(unsigned long num_pages) ttm_uncached, i); } +#ifdef CONFIG_DEBUG_FS + debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL, + &ttm_pool_debugfs_globals_fops); + debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL, + &ttm_pool_debugfs_shrink_fops); +#endif + mm_shrinker.count_objects = ttm_pool_shrinker_count; mm_shrinker.scan_objects = ttm_pool_shrinker_scan; mm_shrinker.seeks = 1; diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index a39305f742da..707e5c152896 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -111,7 +111,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man, static const struct ttm_resource_manager_func ttm_range_manager_func; -int ttm_range_man_init(struct ttm_bo_device *bdev, +int ttm_range_man_init(struct ttm_device *bdev, unsigned type, bool use_tt, unsigned long p_size) { @@ -138,7 +138,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_range_man_init); -int ttm_range_man_fini(struct ttm_bo_device *bdev, +int ttm_range_man_fini(struct ttm_device *bdev, unsigned type) { struct ttm_resource_manager *man = ttm_manager_type(bdev, type); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index b60699bf4816..ed1672a9f332 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -83,7 +83,7 @@ EXPORT_SYMBOL(ttm_resource_manager_init); * Evict all the objects out of a memory manager until it is empty. * Part of memory manager cleanup sequence. */ -int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, +int ttm_resource_manager_evict_all(struct ttm_device *bdev, struct ttm_resource_manager *man) { struct ttm_operation_ctx ctx = { @@ -91,7 +91,7 @@ int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, .no_wait_gpu = false, .force_alloc = true }; - struct ttm_bo_global *glob = &ttm_bo_glob; + struct ttm_global *glob = &ttm_glob; struct dma_fence *fence; int ret; unsigned i; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7f75a13163f0..2f0833c98d2c 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -38,12 +38,17 @@ #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> +#include "ttm_module.h" + +static struct shrinker mm_shrinker; +static atomic_long_t swapable_pages; + /* * Allocates a ttm structure for the given BO. */ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; uint32_t page_flags = 0; dma_resv_assert_held(bo->base.resv); @@ -66,7 +71,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) return -EINVAL; } - bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); + bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); if (unlikely(bo->ttm == NULL)) return -ENOMEM; @@ -108,7 +113,7 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) return 0; } -void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm) { ttm_tt_unpopulate(bdev, ttm); @@ -119,9 +124,9 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_tt_destroy_common); -void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { - bdev->driver->ttm_tt_destroy(bdev, ttm); + bdev->funcs->ttm_tt_destroy(bdev, ttm); } static void ttm_tt_init_fields(struct ttm_tt *ttm, @@ -223,32 +228,41 @@ out_err: return ret; } -int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +/** + * ttm_tt_swapout - swap out tt object + * + * @bdev: TTM device structure. + * @ttm: The struct ttm_tt. + * @gfp_flags: Flags to use for memory allocation. + * + * Swapout a TT object to a shmem_file, return number of pages swapped out or + * negative error code. + */ +int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, + gfp_t gfp_flags) { + loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; - gfp_t gfp_mask; int i, ret; - swap_storage = shmem_file_setup("ttm swap", - ttm->num_pages << PAGE_SHIFT, - 0); + swap_storage = shmem_file_setup("ttm swap", size, 0); if (IS_ERR(swap_storage)) { pr_err("Failed allocating swap storage\n"); return PTR_ERR(swap_storage); } swap_space = swap_storage->f_mapping; - gfp_mask = mapping_gfp_mask(swap_space); + gfp_flags &= mapping_gfp_mask(swap_space); for (i = 0; i < ttm->num_pages; ++i) { from_page = ttm->pages[i]; if (unlikely(from_page == NULL)) continue; - to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask); + to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags); if (IS_ERR(to_page)) { ret = PTR_ERR(to_page); goto out_err; @@ -263,7 +277,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; - return 0; + return ttm->num_pages; out_err: fput(swap_storage); @@ -271,7 +285,7 @@ out_err: return ret; } -static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm) { pgoff_t i; @@ -280,9 +294,11 @@ static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; ++i) ttm->pages[i]->mapping = bdev->dev_mapping; + + atomic_long_add(ttm->num_pages, &swapable_pages); } -int ttm_tt_populate(struct ttm_bo_device *bdev, +int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { int ret; @@ -293,8 +309,8 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, if (ttm_tt_is_populated(ttm)) return 0; - if (bdev->driver->ttm_tt_populate) - ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); + if (bdev->funcs->ttm_tt_populate) + ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx); else ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); if (ret) @@ -326,18 +342,91 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) (*page)->mapping = NULL; (*page++)->index = 0; } + + atomic_long_sub(ttm->num_pages, &swapable_pages); } -void ttm_tt_unpopulate(struct ttm_bo_device *bdev, +void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { if (!ttm_tt_is_populated(ttm)) return; ttm_tt_clear_mapping(ttm); - if (bdev->driver->ttm_tt_unpopulate) - bdev->driver->ttm_tt_unpopulate(bdev, ttm); + if (bdev->funcs->ttm_tt_unpopulate) + bdev->funcs->ttm_tt_unpopulate(bdev, ttm); else ttm_pool_free(&bdev->pool, ttm); ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; } + +/* As long as pages are available make sure to release at least one */ +static unsigned long ttm_tt_shrinker_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct ttm_operation_ctx ctx = { + .no_wait_gpu = false + }; + int ret; + + ret = ttm_bo_swapout(&ctx, GFP_NOFS); + return ret < 0 ? SHRINK_EMPTY : ret; +} + +/* Return the number of pages available or SHRINK_EMPTY if we have none */ +static unsigned long ttm_tt_shrinker_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + unsigned long num_pages; + + num_pages = atomic_long_read(&swapable_pages); + return num_pages ? num_pages : SHRINK_EMPTY; +} + +#ifdef CONFIG_DEBUG_FS + +/* Test the shrinker functions and dump the result */ +static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data) +{ + struct shrink_control sc = { .gfp_mask = GFP_KERNEL }; + + fs_reclaim_acquire(GFP_KERNEL); + seq_printf(m, "%lu/%lu\n", ttm_tt_shrinker_count(&mm_shrinker, &sc), + ttm_tt_shrinker_scan(&mm_shrinker, &sc)); + fs_reclaim_release(GFP_KERNEL); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink); + +#endif + + + +/** + * ttm_tt_mgr_init - register with the MM shrinker + * + * Register with the MM shrinker for swapping out BOs. + */ +int ttm_tt_mgr_init(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL, + &ttm_tt_debugfs_shrink_fops); +#endif + + mm_shrinker.count_objects = ttm_tt_shrinker_count; + mm_shrinker.scan_objects = ttm_tt_shrinker_scan; + mm_shrinker.seeks = 1; + return register_shrinker(&mm_shrinker); +} + +/** + * ttm_tt_mgr_fini - unregister our MM shrinker + * + * Unregisters the MM shrinker. + */ +void ttm_tt_mgr_fini(void) +{ + unregister_shrinker(&mm_shrinker); +} diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index cb0e837d3dba..50e1fb71869f 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -17,8 +17,8 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_panel.h> #include <drm/drm_vblank.h> @@ -316,7 +316,7 @@ static const struct drm_simple_display_pipe_funcs tve200_display_funcs = { .enable = tve200_display_enable, .disable = tve200_display_disable, .update = tve200_display_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, .enable_vblank = tve200_display_enable_vblank, .disable_vblank = tve200_display_disable_vblank, }; diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 9269092697d8..5703277c6f52 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -32,6 +32,22 @@ static int udl_usb_resume(struct usb_interface *interface) return drm_mode_config_helper_resume(dev); } +/* + * FIXME: Dma-buf sharing requires DMA support by the importing device. + * This function is a workaround to make USB devices work as well. + * See todo.rst for how to fix the issue in the dma-buf framework. + */ +static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct udl_device *udl = to_udl(dev); + + if (!udl->dmadev) + return ERR_PTR(-ENODEV); + + return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev); +} + DEFINE_DRM_GEM_FOPS(udl_driver_fops); static const struct drm_driver driver = { @@ -40,6 +56,7 @@ static const struct drm_driver driver = { /* GEM hooks */ .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, + .gem_prime_import = udl_driver_gem_prime_import, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 875e73551ae9..cc16a13316e4 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -50,6 +50,7 @@ struct urb_list { struct udl_device { struct drm_device drm; struct device *dev; + struct device *dmadev; struct drm_simple_display_pipe display_pipe; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 0e2a376cb075..853f147036f6 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -315,6 +315,10 @@ int udl_init(struct udl_device *udl) DRM_DEBUG("\n"); + udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (!udl->dmadev) + drm_warn(dev, "buffer sharing not supported"); /* not an error */ + mutex_init(&udl->gem_lock); if (!udl_parse_vendor_descriptor(udl)) { @@ -343,12 +347,18 @@ int udl_init(struct udl_device *udl) err: if (udl->urbs.count) udl_free_urb_list(dev); + put_device(udl->dmadev); DRM_ERROR("%d\n", ret); return ret; } int udl_drop_usb(struct drm_device *dev) { + struct udl_device *udl = to_udl(dev); + udl_free_urb_list(dev); + put_device(udl->dmadev); + udl->dmadev = NULL; + return 0; } diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 9d34ec9d03f6..8d98bf69d075 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -15,6 +15,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_modeset_helper_vtables.h> @@ -266,18 +267,17 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y, return 0; } -static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, - int width, int height) +static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_map *map, + int x, int y, int width, int height) { struct drm_device *dev = fb->dev; struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach; + void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */ int i, ret, tmp_ret; char *cmd; struct urb *urb; struct drm_rect clip; int log_bpp; - struct dma_buf_map map; - void *vaddr; ret = udl_log_cpp(fb->format->cpp[0]); if (ret < 0) @@ -297,17 +297,10 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, return ret; } - ret = drm_gem_shmem_vmap(fb->obj[0], &map); - if (ret) { - DRM_ERROR("failed to vmap fb\n"); - goto out_dma_buf_end_cpu_access; - } - vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ - urb = udl_get_urb(dev); if (!urb) { ret = -ENOMEM; - goto out_drm_gem_shmem_vunmap; + goto out_dma_buf_end_cpu_access; } cmd = urb->transfer_buffer; @@ -320,7 +313,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, &cmd, byte_offset, dev_byte_offset, byte_width); if (ret) - goto out_drm_gem_shmem_vunmap; + goto out_dma_buf_end_cpu_access; } if (cmd > (char *)urb->transfer_buffer) { @@ -336,8 +329,6 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, ret = 0; -out_drm_gem_shmem_vunmap: - drm_gem_shmem_vunmap(fb->obj[0], &map); out_dma_buf_end_cpu_access: if (import_attach) { tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf, @@ -375,6 +366,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_framebuffer *fb = plane_state->fb; struct udl_device *udl = to_udl(dev); struct drm_display_mode *mode = &crtc_state->mode; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); char *buf; char *wrptr; int color_depth = UDL_COLOR_DEPTH_16BPP; @@ -400,7 +392,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, udl->mode_buf_len = wrptr - buf; - udl_handle_damage(fb, 0, 0, fb->width, fb->height); + udl_handle_damage(fb, &shadow_plane_state->map[0], 0, 0, fb->width, fb->height); if (!crtc_state->mode_changed) return; @@ -435,6 +427,7 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_plane_state) { struct drm_plane_state *state = pipe->plane.state; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_framebuffer *fb = state->fb; struct drm_rect rect; @@ -442,17 +435,16 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, return; if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect)) - udl_handle_damage(fb, rect.x1, rect.y1, rect.x2 - rect.x1, - rect.y2 - rect.y1); + udl_handle_damage(fb, &shadow_plane_state->map[0], rect.x1, rect.y1, + rect.x2 - rect.x1, rect.y2 - rect.y1); } -static const -struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = { +static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = { .mode_valid = udl_simple_display_pipe_mode_valid, .enable = udl_simple_display_pipe_enable, .disable = udl_simple_display_pipe_disable, .update = udl_simple_display_pipe_update, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, }; /* diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 452682e2209f..8992480c88fa 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -259,7 +259,7 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job) return NULL; } -static void +static enum drm_gpu_sched_stat v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) { enum v3d_queue q; @@ -285,6 +285,8 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) } mutex_unlock(&v3d->reset_lock); + + return DRM_GPU_SCHED_STAT_NOMINAL; } /* If the current address or return address have changed, then the GPU @@ -292,7 +294,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) * could fail if the GPU got in an infinite loop in the CL, but that * is pretty unlikely outside of an i-g-t testcase. */ -static void +static enum drm_gpu_sched_stat v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 *timedout_ctca, u32 *timedout_ctra) { @@ -304,39 +306,39 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, if (*timedout_ctca != ctca || *timedout_ctra != ctra) { *timedout_ctca = ctca; *timedout_ctra = ctra; - return; + return DRM_GPU_SCHED_STAT_NOMINAL; } - v3d_gpu_reset_for_timeout(v3d, sched_job); + return v3d_gpu_reset_for_timeout(v3d, sched_job); } -static void +static enum drm_gpu_sched_stat v3d_bin_job_timedout(struct drm_sched_job *sched_job) { struct v3d_bin_job *job = to_bin_job(sched_job); - v3d_cl_job_timedout(sched_job, V3D_BIN, - &job->timedout_ctca, &job->timedout_ctra); + return v3d_cl_job_timedout(sched_job, V3D_BIN, + &job->timedout_ctca, &job->timedout_ctra); } -static void +static enum drm_gpu_sched_stat v3d_render_job_timedout(struct drm_sched_job *sched_job) { struct v3d_render_job *job = to_render_job(sched_job); - v3d_cl_job_timedout(sched_job, V3D_RENDER, - &job->timedout_ctca, &job->timedout_ctra); + return v3d_cl_job_timedout(sched_job, V3D_RENDER, + &job->timedout_ctca, &job->timedout_ctra); } -static void +static enum drm_gpu_sched_stat v3d_generic_job_timedout(struct drm_sched_job *sched_job) { struct v3d_job *job = to_v3d_job(sched_job); - v3d_gpu_reset_for_timeout(job->v3d, sched_job); + return v3d_gpu_reset_for_timeout(job->v3d, sched_job); } -static void +static enum drm_gpu_sched_stat v3d_csd_job_timedout(struct drm_sched_job *sched_job) { struct v3d_csd_job *job = to_csd_job(sched_job); @@ -348,10 +350,10 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) */ if (job->timedout_batches != batches) { job->timedout_batches = batches; - return; + return DRM_GPU_SCHED_STAT_NOMINAL; } - v3d_gpu_reset_for_timeout(v3d, sched_job); + return v3d_gpu_reset_for_timeout(v3d, sched_job); } static const struct drm_sched_backend_ops v3d_bin_sched_ops = { @@ -401,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_bin_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), - "v3d_bin"); + NULL, "v3d_bin"); if (ret) { dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret); return ret; @@ -411,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_render_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), - "v3d_render"); + NULL, "v3d_render"); if (ret) { dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.", ret); @@ -423,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_tfu_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), - "v3d_tfu"); + NULL, "v3d_tfu"); if (ret) { dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.", ret); @@ -436,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_csd_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), - "v3d_csd"); + NULL, "v3d_csd"); if (ret) { dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.", ret); @@ -448,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_cache_clean_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), - "v3d_cache_clean"); + NULL, "v3d_cache_clean"); if (ret) { dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.", ret); diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index dbc0dd53c69e..964381d55fc1 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -17,6 +17,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -252,13 +253,15 @@ static const struct drm_crtc_funcs vbox_crtc_funcs = { }; static int vbox_primary_atomic_check(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state = NULL; if (new_state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state( - new_state->state, new_state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } @@ -270,22 +273,24 @@ static int vbox_primary_atomic_check(struct drm_plane *plane, } static void vbox_primary_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_crtc *crtc = plane->state->crtc; - struct drm_framebuffer *fb = plane->state->fb; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc *crtc = new_state->crtc; + struct drm_framebuffer *fb = new_state->fb; struct vbox_private *vbox = to_vbox_dev(fb->dev); struct drm_mode_rect *clips; uint32_t num_clips, i; vbox_crtc_set_base_and_mode(crtc, fb, - plane->state->src_x >> 16, - plane->state->src_y >> 16); + new_state->src_x >> 16, + new_state->src_y >> 16); /* Send information about dirty rectangles to VBVA. */ - clips = drm_plane_get_damage_clips(plane->state); - num_clips = drm_plane_get_damage_clips_count(plane->state); + clips = drm_plane_get_damage_clips(new_state); + num_clips = drm_plane_get_damage_clips_count(new_state); if (!num_clips) return; @@ -314,8 +319,10 @@ static void vbox_primary_atomic_update(struct drm_plane *plane, } static void vbox_primary_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_crtc *crtc = old_state->crtc; /* vbox_do_modeset checks plane->state->fb and will disable if NULL */ @@ -325,16 +332,18 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane, } static int vbox_cursor_atomic_check(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state = NULL; u32 width = new_state->crtc_w; u32 height = new_state->crtc_h; int ret; if (new_state->crtc) { - crtc_state = drm_atomic_get_existing_crtc_state( - new_state->state, new_state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } @@ -375,20 +384,24 @@ static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height, } static void vbox_cursor_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct vbox_private *vbox = container_of(plane->dev, struct vbox_private, ddev); - struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc); - struct drm_framebuffer *fb = plane->state->fb; - struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]); - u32 width = plane->state->crtc_w; - u32 height = plane->state->crtc_h; + struct vbox_crtc *vbox_crtc = to_vbox_crtc(new_state->crtc); + struct drm_framebuffer *fb = new_state->fb; + u32 width = new_state->crtc_w; + u32 height = new_state->crtc_h; + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(new_state); + struct dma_buf_map map = shadow_plane_state->map[0]; + u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */ size_t data_size, mask_size; u32 flags; - struct dma_buf_map map; - int ret; - u8 *src; /* * VirtualBox uses the host windowing system to draw the cursor so @@ -401,17 +414,6 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, vbox_crtc->cursor_enabled = true; - ret = drm_gem_vram_vmap(gbo, &map); - if (ret) { - /* - * BUG: we should have pinned the BO in prepare_fb(). - */ - mutex_unlock(&vbox->hw_mutex); - DRM_WARN("Could not map cursor bo, skipping update\n"); - return; - } - src = map.vaddr; /* TODO: Use mapping abstraction properly */ - /* * The mask must be calculated based on the alpha * channel, one bit per ARGB word, and must be 32-bit @@ -421,7 +423,6 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, data_size = width * height * 4 + mask_size; copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); - drm_gem_vram_vunmap(gbo, &map); flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; @@ -434,8 +435,10 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, } static void vbox_cursor_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct vbox_private *vbox = container_of(plane->dev, struct vbox_private, ddev); struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc); @@ -466,17 +469,14 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = { .atomic_check = vbox_cursor_atomic_check, .atomic_update = vbox_cursor_atomic_update, .atomic_disable = vbox_cursor_atomic_disable, - .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, - .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs vbox_cursor_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_primary_helper_destroy, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + DRM_GEM_SHADOW_PLANE_FUNCS, }; static const u32 vbox_primary_plane_formats[] = { diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index b641252939d8..445d3bab89e0 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -1026,7 +1026,6 @@ int vc4_queue_seqno_cb(struct drm_device *dev, void (*func)(struct vc4_seqno_cb *cb)) { struct vc4_dev *vc4 = to_vc4_dev(dev); - int ret = 0; unsigned long irqflags; cb->func = func; @@ -1041,7 +1040,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev, } spin_unlock_irqrestore(&vc4->job_lock, irqflags); - return ret; + return 0; } /* Scheduled when any job has been completed, this walks the list of diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index bd7514612e95..1fda574579af 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -132,24 +132,57 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi) HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL); } +#ifdef CONFIG_DRM_VC4_HDMI_CEC +static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) +{ + u16 clk_cnt; + u32 value; + + value = HDMI_READ(HDMI_CEC_CNTRL_1); + value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; + + /* + * Set the clock divider: the hsm_clock rate and this divider + * setting will give a 40 kHz CEC clock. + */ + clk_cnt = clk_get_rate(vc4_hdmi->cec_clock) / CEC_CLOCK_FREQ; + value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT; + HDMI_WRITE(HDMI_CEC_CNTRL_1, value); +} +#else +static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {} +#endif + static enum drm_connector_status vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) { struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); + bool connected = false; if (vc4_hdmi->hpd_gpio) { if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^ vc4_hdmi->hpd_active_low) - return connector_status_connected; - cec_phys_addr_invalidate(vc4_hdmi->cec_adap); - return connector_status_disconnected; + connected = true; + } else if (drm_probe_ddc(vc4_hdmi->ddc)) { + connected = true; + } else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) { + connected = true; } - if (drm_probe_ddc(vc4_hdmi->ddc)) - return connector_status_connected; + if (connected) { + if (connector->status != connector_status_connected) { + struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc); + + if (edid) { + cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); + vc4_hdmi->encoder.hdmi_monitor = drm_detect_hdmi_monitor(edid); + kfree(edid); + } + } - if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) return connector_status_connected; + } + cec_phys_addr_invalidate(vc4_hdmi->cec_adap); return connector_status_disconnected; } @@ -758,6 +791,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, return; } + vc4_hdmi_cec_update_clk_div(vc4_hdmi); + /* * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup * at 300MHz. @@ -779,9 +814,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, return; } - if (vc4_hdmi->variant->reset) - vc4_hdmi->variant->reset(vc4_hdmi); - if (vc4_hdmi->variant->phy_init) vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state); @@ -1423,15 +1455,22 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) } #ifdef CONFIG_DRM_VC4_HDMI_CEC -static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv) +static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv) +{ + struct vc4_hdmi *vc4_hdmi = priv; + + if (vc4_hdmi->cec_rx_msg.len) + cec_received_msg(vc4_hdmi->cec_adap, + &vc4_hdmi->cec_rx_msg); + + return IRQ_HANDLED; +} + +static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; - if (vc4_hdmi->cec_irq_was_rx) { - if (vc4_hdmi->cec_rx_msg.len) - cec_received_msg(vc4_hdmi->cec_adap, - &vc4_hdmi->cec_rx_msg); - } else if (vc4_hdmi->cec_tx_ok) { + if (vc4_hdmi->cec_tx_ok) { cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); } else { @@ -1445,15 +1484,35 @@ static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv) return IRQ_HANDLED; } +static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv) +{ + struct vc4_hdmi *vc4_hdmi = priv; + irqreturn_t ret; + + if (vc4_hdmi->cec_irq_was_rx) + ret = vc4_cec_irq_handler_rx_thread(irq, priv); + else + ret = vc4_cec_irq_handler_tx_thread(irq, priv); + + return ret; +} + static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) { + struct drm_device *dev = vc4_hdmi->connector.dev; struct cec_msg *msg = &vc4_hdmi->cec_rx_msg; unsigned int i; msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >> VC4_HDMI_CEC_REC_WRD_CNT_SHIFT); + + if (msg->len > 16) { + drm_err(dev, "Attempting to read too much data (%d)\n", msg->len); + return; + } + for (i = 0; i < msg->len; i += 4) { - u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i); + u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + (i >> 2)); msg->msg[i] = val & 0xff; msg->msg[i + 1] = (val >> 8) & 0xff; @@ -1462,31 +1521,55 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) } } +static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv) +{ + struct vc4_hdmi *vc4_hdmi = priv; + u32 cntrl1; + + cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); + vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; + cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; + HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv) +{ + struct vc4_hdmi *vc4_hdmi = priv; + u32 cntrl1; + + vc4_hdmi->cec_rx_msg.len = 0; + cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); + vc4_cec_read_msg(vc4_hdmi, cntrl1); + cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; + HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); + cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; + + HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); + + return IRQ_WAKE_THREAD; +} + static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS); - u32 cntrl1, cntrl5; + irqreturn_t ret; + u32 cntrl5; if (!(stat & VC4_HDMI_CPU_CEC)) return IRQ_NONE; - vc4_hdmi->cec_rx_msg.len = 0; - cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); + cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5); vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT; - if (vc4_hdmi->cec_irq_was_rx) { - vc4_cec_read_msg(vc4_hdmi, cntrl1); - cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; - HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); - cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; - } else { - vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; - cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; - } - HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); - HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC); + if (vc4_hdmi->cec_irq_was_rx) + ret = vc4_cec_irq_handler_rx_bare(irq, priv); + else + ret = vc4_cec_irq_handler_tx_bare(irq, priv); - return IRQ_WAKE_THREAD; + HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC); + return ret; } static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) @@ -1523,9 +1606,11 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); - HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); + if (!vc4_hdmi->variant->external_irq_controller) + HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); } else { - HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); + if (!vc4_hdmi->variant->external_irq_controller) + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); HDMI_WRITE(HDMI_CEC_CNTRL_5, val | VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); } @@ -1546,11 +1631,17 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + struct drm_device *dev = vc4_hdmi->connector.dev; u32 val; unsigned int i; + if (msg->len > 16) { + drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len); + return -ENOMEM; + } + for (i = 0; i < msg->len; i += 4) - HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i, + HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2), (msg->msg[i]) | (msg->msg[i + 1] << 8) | (msg->msg[i + 2] << 16) | @@ -1577,11 +1668,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) { struct cec_connector_info conn_info; struct platform_device *pdev = vc4_hdmi->pdev; + struct device *dev = &pdev->dev; u32 value; int ret; - if (!vc4_hdmi->variant->cec_available) + if (!of_find_property(dev->of_node, "interrupts", NULL)) { + dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); return 0; + } vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, vc4_hdmi, "vc4", @@ -1594,23 +1688,39 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); - HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff); value = HDMI_READ(HDMI_CEC_CNTRL_1); - value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; - /* - * Set the logical address to Unregistered and set the clock - * divider: the hsm_clock rate and this divider setting will - * give a 40 kHz CEC clock. - */ - value |= VC4_HDMI_CEC_ADDR_MASK | - (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT); + /* Set the logical address to Unregistered */ + value |= VC4_HDMI_CEC_ADDR_MASK; HDMI_WRITE(HDMI_CEC_CNTRL_1, value); - ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0), - vc4_cec_irq_handler, - vc4_cec_irq_handler_thread, 0, - "vc4 hdmi cec", vc4_hdmi); - if (ret) - goto err_delete_cec_adap; + + vc4_hdmi_cec_update_clk_div(vc4_hdmi); + + if (vc4_hdmi->variant->external_irq_controller) { + ret = devm_request_threaded_irq(&pdev->dev, + platform_get_irq_byname(pdev, "cec-rx"), + vc4_cec_irq_handler_rx_bare, + vc4_cec_irq_handler_rx_thread, 0, + "vc4 hdmi cec rx", vc4_hdmi); + if (ret) + goto err_delete_cec_adap; + + ret = devm_request_threaded_irq(&pdev->dev, + platform_get_irq_byname(pdev, "cec-tx"), + vc4_cec_irq_handler_tx_bare, + vc4_cec_irq_handler_tx_thread, 0, + "vc4 hdmi cec tx", vc4_hdmi); + if (ret) + goto err_delete_cec_adap; + } else { + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff); + + ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0), + vc4_cec_irq_handler, + vc4_cec_irq_handler_thread, 0, + "vc4 hdmi cec", vc4_hdmi); + if (ret) + goto err_delete_cec_adap; + } ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); if (ret < 0) @@ -1710,6 +1820,7 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) return PTR_ERR(vc4_hdmi->hsm_clock); } vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock; + vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock; return 0; } @@ -1803,6 +1914,12 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) return PTR_ERR(vc4_hdmi->audio_clock); } + vc4_hdmi->cec_clock = devm_clk_get(dev, "cec"); + if (IS_ERR(vc4_hdmi->cec_clock)) { + DRM_ERROR("Failed to get CEC clock\n"); + return PTR_ERR(vc4_hdmi->cec_clock); + } + vc4_hdmi->reset = devm_reset_control_get(dev, NULL); if (IS_ERR(vc4_hdmi->reset)) { DRM_ERROR("Failed to get HDMI reset line\n"); @@ -1875,6 +1992,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->disable_wifi_frequencies = of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence"); + if (vc4_hdmi->variant->reset) + vc4_hdmi->variant->reset(vc4_hdmi); + pm_runtime_enable(dev); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); @@ -1970,7 +2090,6 @@ static const struct vc4_hdmi_variant bcm2835_variant = { .debugfs_name = "hdmi_regs", .card_name = "vc4-hdmi", .max_pixel_clock = 162000000, - .cec_available = true, .registers = vc4_hdmi_fields, .num_registers = ARRAY_SIZE(vc4_hdmi_fields), @@ -1999,6 +2118,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { PHY_LANE_CK, }, .unsupported_odd_h_timings = true, + .external_irq_controller = true, .init_resources = vc5_hdmi_init_resources, .csc_setup = vc5_hdmi_csc_setup, @@ -2025,6 +2145,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { PHY_LANE_2, }, .unsupported_odd_h_timings = true, + .external_irq_controller = true, .init_resources = vc5_hdmi_init_resources, .csc_setup = vc5_hdmi_csc_setup, diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 4c8994cfd932..3cebd1fd00fc 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -42,9 +42,6 @@ struct vc4_hdmi_variant { /* Filename to expose the registers in debugfs */ const char *debugfs_name; - /* Set to true when the CEC support is available */ - bool cec_available; - /* Maximum pixel clock supported by the controller (in Hz) */ unsigned long long max_pixel_clock; @@ -64,6 +61,13 @@ struct vc4_hdmi_variant { /* The BCM2711 cannot deal with odd horizontal pixel timings */ bool unsupported_odd_h_timings; + /* + * The BCM2711 CEC/hotplug IRQ controller is shared between the + * two HDMI controllers, and we have a proper irqchip driver for + * it. + */ + bool external_irq_controller; + /* Callback to get the resources (memory region, interrupts, * clocks, etc) for that variant. */ @@ -155,6 +159,7 @@ struct vc4_hdmi { bool cec_tx_ok; bool cec_irq_was_rx; + struct clk *cec_clock; struct clk *pixel_clock; struct clk *hsm_clock; struct clk *audio_clock; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index 401863cb8c98..e1b58eac766f 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -29,6 +29,7 @@ enum vc4_hdmi_field { HDMI_CEC_CPU_MASK_SET, HDMI_CEC_CPU_MASK_STATUS, HDMI_CEC_CPU_STATUS, + HDMI_CEC_CPU_SET, /* * Transmit data, first byte is low byte of the 32-bit reg. @@ -199,9 +200,10 @@ static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = { VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0), VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4), VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340), + VC4_HDMI_REG(HDMI_CEC_CPU_SET, 0x0344), VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348), VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c), - VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c), + VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x0350), VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354), VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400), }; diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2b3a597fa65f..c239045e05d6 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -622,11 +622,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * for now we just allocate globally. */ if (!hvs->hvs5) - /* 96kB */ - drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + /* 48k words of 2x12-bit pixels */ + drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else - /* 70k words */ - drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024); + /* 60k words of 4x12-bit pixels */ + drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024); /* Upload filter kernels. We only have the one for now, so we * keep it around for the lifetime of the driver. diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index f09254c2497d..bb5529a7a9c2 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -363,9 +363,8 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(old_crtc_state); - struct drm_crtc_commit *commit; unsigned int channel = vc4_crtc_state->assigned_channel; - unsigned long done; + int ret; if (channel == VC4_HVS_CHANNEL_DISABLED) continue; @@ -373,17 +372,9 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) if (!old_hvs_state->fifo_state[channel].in_use) continue; - commit = old_hvs_state->fifo_state[i].pending_commit; - if (!commit) - continue; - - done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ); - if (!done) - drm_err(dev, "Timed out waiting for hw_done\n"); - - done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); - if (!done) - drm_err(dev, "Timed out waiting for flip_done\n"); + ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit); + if (ret) + drm_err(dev, "Timed out waiting for commit\n"); } drm_atomic_helper_commit_modeset_disables(dev, state); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 6bd8260aa9f2..c76e73a452e0 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -20,7 +20,7 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> -#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> #include "uapi/drm/vc4_drm.h" @@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_reset(plane, &vc4_state->base); } -static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) +static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state) { if (vc4_state->dlist_count == vc4_state->dlist_size) { u32 new_size = max(4u, vc4_state->dlist_count * 2); @@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) vc4_state->dlist_size = new_size; } - vc4_state->dlist[vc4_state->dlist_count++] = val; + vc4_state->dlist_count++; +} + +static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) +{ + unsigned int idx = vc4_state->dlist_count; + + vc4_dlist_counter_increment(vc4_state); + vc4_state->dlist[idx] = val; } /* Returns the scl0/scl1 field based on whether the dimensions need to @@ -437,6 +445,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) static u32 vc4_lbm_size(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); u32 pix_per_line; u32 lbm; @@ -472,7 +481,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) lbm = pix_per_line * 16; } - lbm = roundup(lbm, 32); + /* Align it to 64 or 128 (hvs5) bytes */ + lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64); + + /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ + lbm /= vc4->hvs->hvs5 ? 4 : 2; return lbm; } @@ -912,9 +925,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane, if (!vc4_state->is_unity) { vc4_dlist_write(vc4_state, VC4_SET_FIELD(vc4_state->crtc_w, - SCALER_POS1_SCL_WIDTH) | + SCALER5_POS1_SCL_WIDTH) | VC4_SET_FIELD(vc4_state->crtc_h, - SCALER_POS1_SCL_HEIGHT)); + SCALER5_POS1_SCL_HEIGHT)); } /* Position Word 2: Source Image Size */ @@ -973,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * be set when calling vc4_plane_allocate_lbm(). */ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || - vc4_state->y_scaling[1] != VC4_SCALING_NONE) - vc4_state->lbm_offset = vc4_state->dlist_count++; + vc4_state->y_scaling[1] != VC4_SCALING_NONE) { + vc4_state->lbm_offset = vc4_state->dlist_count; + vc4_dlist_counter_increment(vc4_state); + } if (num_planes > 1) { /* Emit Cb/Cr as channel 0 and Y as channel @@ -1040,25 +1055,27 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * in the CRTC's flush. */ static int vc4_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { - struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state); int ret; vc4_state->dlist_count = 0; - if (!plane_enabled(state)) + if (!plane_enabled(new_plane_state)) return 0; - ret = vc4_plane_mode_set(plane, state); + ret = vc4_plane_mode_set(plane, new_plane_state); if (ret) return ret; - return vc4_plane_allocate_lbm(state); + return vc4_plane_allocate_lbm(new_plane_state); } static void vc4_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { /* No contents here. Since we don't know where in the CRTC's * dlist we should be stored, our dlist is uploaded to the @@ -1118,32 +1135,34 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) } static void vc4_plane_atomic_async_update(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct vc4_plane_state *vc4_state, *new_vc4_state; - swap(plane->state->fb, state->fb); - plane->state->crtc_x = state->crtc_x; - plane->state->crtc_y = state->crtc_y; - plane->state->crtc_w = state->crtc_w; - plane->state->crtc_h = state->crtc_h; - plane->state->src_x = state->src_x; - plane->state->src_y = state->src_y; - plane->state->src_w = state->src_w; - plane->state->src_h = state->src_h; - plane->state->src_h = state->src_h; - plane->state->alpha = state->alpha; - plane->state->pixel_blend_mode = state->pixel_blend_mode; - plane->state->rotation = state->rotation; - plane->state->zpos = state->zpos; - plane->state->normalized_zpos = state->normalized_zpos; - plane->state->color_encoding = state->color_encoding; - plane->state->color_range = state->color_range; - plane->state->src = state->src; - plane->state->dst = state->dst; - plane->state->visible = state->visible; - - new_vc4_state = to_vc4_plane_state(state); + swap(plane->state->fb, new_plane_state->fb); + plane->state->crtc_x = new_plane_state->crtc_x; + plane->state->crtc_y = new_plane_state->crtc_y; + plane->state->crtc_w = new_plane_state->crtc_w; + plane->state->crtc_h = new_plane_state->crtc_h; + plane->state->src_x = new_plane_state->src_x; + plane->state->src_y = new_plane_state->src_y; + plane->state->src_w = new_plane_state->src_w; + plane->state->src_h = new_plane_state->src_h; + plane->state->src_h = new_plane_state->src_h; + plane->state->alpha = new_plane_state->alpha; + plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode; + plane->state->rotation = new_plane_state->rotation; + plane->state->zpos = new_plane_state->zpos; + plane->state->normalized_zpos = new_plane_state->normalized_zpos; + plane->state->color_encoding = new_plane_state->color_encoding; + plane->state->color_range = new_plane_state->color_range; + plane->state->src = new_plane_state->src; + plane->state->dst = new_plane_state->dst; + plane->state->visible = new_plane_state->visible; + + new_vc4_state = to_vc4_plane_state(new_plane_state); vc4_state = to_vc4_plane_state(plane->state); vc4_state->crtc_x = new_vc4_state->crtc_x; @@ -1187,23 +1206,25 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, } static int vc4_plane_atomic_async_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct vc4_plane_state *old_vc4_state, *new_vc4_state; int ret; u32 i; - ret = vc4_plane_mode_set(plane, state); + ret = vc4_plane_mode_set(plane, new_plane_state); if (ret) return ret; old_vc4_state = to_vc4_plane_state(plane->state); - new_vc4_state = to_vc4_plane_state(state); + new_vc4_state = to_vc4_plane_state(new_plane_state); if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset || - vc4_lbm_size(plane->state) != vc4_lbm_size(state)) + vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state)) return -EINVAL; /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update @@ -1235,7 +1256,7 @@ static int vc4_prepare_fb(struct drm_plane *plane, bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); - drm_gem_fb_prepare_fb(plane, state); + drm_gem_plane_helper_prepare_fb(plane, state); if (plane->state->fb == state->fb) return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index b4ec479c32cd..b375394193be 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -163,6 +163,7 @@ int virtio_gpu_init(struct drm_device *dev) vgdev->host_visible_region.len, dev_name(&vgdev->vdev->dev))) { DRM_ERROR("Could not reserve host visible region\n"); + ret = -EBUSY; goto err_vqs; } diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 42ac08ed1442..4e1b17548007 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -83,20 +83,23 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = { }; static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; struct drm_crtc_state *crtc_state; int ret; - if (!state->fb || WARN_ON(!state->crtc)) + if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, is_cursor, true); @@ -127,8 +130,10 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, } static void virtio_gpu_primary_plane_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = NULL; @@ -239,8 +244,10 @@ static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, } static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = NULL; diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 0443b7deeaef..6164349cdf11 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ +#include <linux/dma-fence.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> @@ -14,7 +16,9 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) struct drm_crtc *crtc = &output->crtc; struct vkms_crtc_state *state; u64 ret_overrun; - bool ret; + bool ret, fence_cookie; + + fence_cookie = dma_fence_begin_signalling(); ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, output->period_ns); @@ -49,6 +53,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) DRM_DEBUG_DRIVER("Composer worker already queued\n"); } + dma_fence_end_signalling(fence_cookie); + return HRTIMER_RESTART; } diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 0824327cc860..6d310d31b75d 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -5,6 +5,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_gem_shmem_helper.h> @@ -92,20 +93,22 @@ static const struct drm_plane_funcs vkms_plane_funcs = { }; static void vkms_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct vkms_plane_state *vkms_plane_state; - struct drm_framebuffer *fb = plane->state->fb; + struct drm_framebuffer *fb = new_state->fb; struct vkms_composer *composer; - if (!plane->state->crtc || !fb) + if (!new_state->crtc || !fb) return; - vkms_plane_state = to_vkms_plane_state(plane->state); + vkms_plane_state = to_vkms_plane_state(new_state); composer = vkms_plane_state->composer; - memcpy(&composer->src, &plane->state->src, sizeof(struct drm_rect)); - memcpy(&composer->dst, &plane->state->dst, sizeof(struct drm_rect)); + memcpy(&composer->src, &new_state->src, sizeof(struct drm_rect)); + memcpy(&composer->dst, &new_state->dst, sizeof(struct drm_rect)); memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer)); drm_framebuffer_get(&composer->fb); composer->offset = fb->offsets[0]; @@ -114,23 +117,26 @@ static void vkms_plane_atomic_update(struct drm_plane *plane, } static int vkms_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; bool can_position = false; int ret; - if (!state->fb || WARN_ON(!state->crtc)) + if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); if (plane->type == DRM_PLANE_TYPE_CURSOR) can_position = true; - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, can_position, true); @@ -138,7 +144,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane, return ret; /* for now primary plane must be visible and full screen */ - if (!state->visible && !can_position) + if (!new_plane_state->visible && !can_position) return -EINVAL; return 0; @@ -159,7 +165,7 @@ static int vkms_prepare_fb(struct drm_plane *plane, if (ret) DRM_ERROR("vmap failed: %d\n", ret); - return drm_gem_fb_prepare_fb(plane, state); + return drm_gem_plane_helper_prepare_fb(plane, state); } static void vkms_cleanup_fb(struct drm_plane *plane, diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index 78fdc1d59186..0935686475a0 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -42,11 +42,8 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder, } if (fb->format->format != vkms_wb_formats[0]) { - struct drm_format_name_buf format_name; - - DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(fb->format->format, - &format_name)); + DRM_DEBUG_KMS("Invalid pixel format %p4cc\n", + &fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index cc4cdca7176e..8c02fa5852e7 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ - ttm_object.o ttm_lock.o + ttm_object.o ttm_lock.o ttm_memory.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c index a3bfbd9cea68..e972af07d029 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c @@ -28,7 +28,6 @@ #define pr_fmt(fmt) "[TTM] " fmt -#include <drm/ttm/ttm_memory.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> @@ -36,9 +35,11 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/swap.h> -#include <drm/ttm/ttm_pool.h> -#include "ttm_module.h" +#include <drm/drm_device.h> +#include <drm/drm_file.h> + +#include "ttm_memory.h" #define TTM_MEMORY_ALLOC_RETRIES 4 @@ -276,9 +277,9 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, while (ttm_zones_above_swap_target(glob, from_wq, extra)) { spin_unlock(&glob->lock); - ret = ttm_bo_swapout(ctx); + ret = ttm_bo_swapout(ctx, GFP_KERNEL); spin_lock(&glob->lock); - if (unlikely(ret != 0)) + if (unlikely(ret < 0)) break; } @@ -413,7 +414,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, } #endif -int ttm_mem_global_init(struct ttm_mem_global *glob) +int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev) { struct sysinfo si; int ret; @@ -423,8 +424,9 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) spin_lock_init(&glob->lock); glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); - ret = kobject_init_and_add( - &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); + + ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type, + &dev->kobj, "memory_accounting"); if (unlikely(ret != 0)) { kobject_put(&glob->kobj); return ret; @@ -452,7 +454,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) pr_info("Zone %7s: Available graphics memory: %llu KiB\n", zone->name, (unsigned long long)zone->max_mem >> 10); } - ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE)); return 0; out_no_zone: ttm_mem_global_release(glob); @@ -464,9 +465,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) struct ttm_mem_zone *zone; unsigned int i; - /* let the page allocator first stop the shrink work. */ - ttm_pool_mgr_fini(); - flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); glob->swap_queue = NULL; diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h new file mode 100644 index 000000000000..c50dba774485 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/ttm_memory.h @@ -0,0 +1,96 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef TTM_MEMORY_H +#define TTM_MEMORY_H + +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/bug.h> +#include <linux/wait.h> +#include <linux/errno.h> +#include <linux/kobject.h> +#include <linux/mm.h> + +#include <drm/ttm/ttm_bo_api.h> + +/** + * struct ttm_mem_global - Global memory accounting structure. + * + * @shrink: A single callback to shrink TTM memory usage. Extend this + * to a linked list to be able to handle multiple callbacks when needed. + * @swap_queue: A workqueue to handle shrinking in low memory situations. We + * need a separate workqueue since it will spend a lot of time waiting + * for the GPU, and this will otherwise block other workqueue tasks(?) + * At this point we use only a single-threaded workqueue. + * @work: The workqueue callback for the shrink queue. + * @lock: Lock to protect the @shrink - and the memory accounting members, + * that is, essentially the whole structure with some exceptions. + * @lower_mem_limit: include lower limit of swap space and lower limit of + * system memory. + * @zones: Array of pointers to accounting zones. + * @num_zones: Number of populated entries in the @zones array. + * @zone_kernel: Pointer to the kernel zone. + * @zone_highmem: Pointer to the highmem zone if there is one. + * @zone_dma32: Pointer to the dma32 zone if there is one. + * + * Note that this structure is not per device. It should be global for all + * graphics devices. + */ + +#define TTM_MEM_MAX_ZONES 2 +struct ttm_mem_zone; +extern struct ttm_mem_global { + struct kobject kobj; + struct workqueue_struct *swap_queue; + struct work_struct work; + spinlock_t lock; + uint64_t lower_mem_limit; + struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; + unsigned int num_zones; + struct ttm_mem_zone *zone_kernel; +#ifdef CONFIG_HIGHMEM + struct ttm_mem_zone *zone_highmem; +#else + struct ttm_mem_zone *zone_dma32; +#endif +} ttm_mem_glob; + +int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev); +void ttm_mem_global_release(struct ttm_mem_global *glob); +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size); +size_t ttm_round_pot(size_t size); +bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, + struct ttm_operation_ctx *ctx); +#endif diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 0fe869d0fad1..112394dd0ab6 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -42,6 +42,14 @@ */ +#define pr_fmt(fmt) "[TTM] " fmt + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include "ttm_object.h" + /** * struct ttm_object_file * @@ -55,16 +63,9 @@ * * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, * for fast lookup of ref objects given a base object. + * + * @refcount: reference/usage count */ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/atomic.h> -#include "ttm_object.h" - struct ttm_object_file { struct ttm_object_device *tdev; spinlock_t lock; @@ -73,7 +74,7 @@ struct ttm_object_file { struct kref refcount; }; -/** +/* * struct ttm_object_device * * @object_lock: lock that protects the object_hash hash table. @@ -96,7 +97,7 @@ struct ttm_object_device { struct idr idr; }; -/** +/* * struct ttm_ref_object * * @hash: Hash entry for the per-file object reference hash. @@ -568,7 +569,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) /** * get_dma_buf_unless_doomed - get a dma_buf reference if possible. * - * @dma_buf: Non-refcounted pointer to a struct dma-buf. + * @dmabuf: Non-refcounted pointer to a struct dma-buf. * * Obtain a file reference from a lookup structure that doesn't refcount * the file, but synchronizes with its release method to make sure it has diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h index ede26df87c93..49b064f0cb19 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.h +++ b/drivers/gpu/drm/vmwgfx/ttm_object.h @@ -43,7 +43,8 @@ #include <linux/rcupdate.h> #include <drm/drm_hashtab.h> -#include <drm/ttm/ttm_memory.h> + +#include "ttm_memory.h" /** * enum ttm_ref_type diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index 180f6dbc9460..81f525a82b77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -330,6 +330,8 @@ static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi) * * @cbs: Pointer to the context binding state tracker. * @bi: Information about the binding to track. + * @shader_slot: The shader slot of the binding. + * @slot: The slot of the binding. * * Starts tracking the binding in the context binding * state structure @cbs. @@ -367,6 +369,7 @@ void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot, * vmw_binding_transfer: Transfer a context binding tracking entry. * * @cbs: Pointer to the persistent context binding state tracker. + * @from: Staged binding info built during execbuf * @bi: Information about the binding to track. * */ @@ -484,9 +487,8 @@ void vmw_binding_res_list_scrub(struct list_head *head) /** * vmw_binding_state_commit - Commit staged binding info * - * @ctx: Pointer to context to commit the staged binding info to. + * @to: Staged binding info area to copy into to. * @from: Staged binding info built during execbuf. - * @scrubbed: Transfer only scrubbed bindings. * * Transfers binding info from a temporary structure * (typically used by execbuf) to the persistent @@ -511,7 +513,7 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to, /** * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context * - * @ctx: The context resource + * @cbs: Pointer to the context binding state tracker. * * Walks through the context binding list and rebinds all scrubbed * resources. @@ -789,6 +791,7 @@ static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs, * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * @shader_slot: The shader slot of the binding. */ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, int shader_slot) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 9f2779ddcf08..3a438ae4d3f4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -431,6 +431,7 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, * @src_stride: Source stride in bytes. * @w: Width of blit. * @h: Height of blit. + * @diff: The struct vmw_diff_cpy used to track the modified bounding box. * return: Zero on success. Negative error value on failure. Will print out * kernel warnings on caller bugs. * @@ -465,13 +466,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, dma_resv_assert_held(src->base.resv); if (!ttm_tt_is_populated(dst->ttm)) { - ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); + ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); if (ret) return ret; } if (!ttm_tt_is_populated(src->ttm)) { - ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx); + ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx); if (ret) return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 63dbc44eebe0..50e529a01677 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -131,7 +131,6 @@ err: * * @dev_priv: Driver private. * @buf: DMA buffer to move. - * @pin: Pin buffer if true. * @interruptible: Use interruptible wait. * Return: Zero on success, Negative error code on failure. In particular * -ERESTARTSYS if interrupted by a signal @@ -508,11 +507,16 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, acc_size = ttm_round_pot(sizeof(*bo)); acc_size += ttm_round_pot(npages * sizeof(void *)); acc_size += ttm_round_pot(sizeof(struct ttm_tt)); + + ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); + if (unlikely(ret)) + goto error_free; + ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, ttm_bo_type_device, placement, 0, - &ctx, acc_size, NULL, NULL, NULL); + &ctx, NULL, NULL, NULL); if (unlikely(ret)) - goto error_free; + goto error_account; ttm_bo_pin(bo); ttm_bo_unreserve(bo); @@ -520,6 +524,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, return 0; +error_account: + ttm_mem_global_free(&ttm_mem_glob, acc_size); + error_free: kfree(bo); return ret; @@ -546,7 +553,7 @@ int vmw_bo_init(struct vmw_private *dev_priv, void (*bo_free)(struct ttm_buffer_object *bo)) { struct ttm_operation_ctx ctx = { interruptible, false }; - struct ttm_bo_device *bdev = &dev_priv->bdev; + struct ttm_device *bdev = &dev_priv->bdev; size_t acc_size; int ret; bool user = (bo_free == &vmw_user_bo_destroy); @@ -559,11 +566,17 @@ int vmw_bo_init(struct vmw_private *dev_priv, vmw_bo->base.priority = 3; vmw_bo->res_tree = RB_ROOT; + ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); + if (unlikely(ret)) + return ret; + ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, ttm_bo_type_device, placement, - 0, &ctx, acc_size, NULL, NULL, bo_free); - if (unlikely(ret)) + 0, &ctx, NULL, NULL, bo_free); + if (unlikely(ret)) { + ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; + } if (pin) ttm_bo_pin(&vmw_bo->base); @@ -635,6 +648,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, * @handle: Pointer to where the handle value should be assigned. * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer * should be assigned. + * @p_base: The TTM base object pointer about to be allocated. * Return: Zero on success, negative error code on error. */ int vmw_user_bo_alloc(struct vmw_private *dev_priv, @@ -1058,7 +1072,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile, void vmw_bo_fence_single(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 7400d617ae3c..20246a7c97c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -276,7 +276,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, return ret; } -/** +/* * Reserve @bytes number of bytes in the fifo. * * This function will return NULL (error) on two conditions: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 45fbc41440f1..2e23e537cdf5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -48,6 +48,7 @@ * @hw_submitted: List of command buffers submitted to hardware. * @preempted: List of preempted command buffers. * @num_hw_submitted: Number of buffers currently being processed by hardware + * @block_submission: Identifies a block command submission. */ struct vmw_cmdbuf_context { struct list_head submitted; @@ -58,7 +59,7 @@ struct vmw_cmdbuf_context { }; /** - * struct vmw_cmdbuf_man: - Command buffer manager + * struct vmw_cmdbuf_man - Command buffer manager * * @cur_mutex: Mutex protecting the command buffer used for incremental small * kernel command submissions, @cur. @@ -88,7 +89,7 @@ struct vmw_cmdbuf_context { * @max_hw_submitted: Max number of in-flight command buffers the device can * handle. Immutable. * @lock: Spinlock protecting command submission queues. - * @header: Pool of DMA memory for device command buffer headers. + * @headers: Pool of DMA memory for device command buffer headers. * Internal protection. * @dheaders: Pool of DMA memory for device command buffer headers with trailing * space for inline data. Internal protection. @@ -143,7 +144,7 @@ struct vmw_cmdbuf_man { * @cb_context: The device command buffer context. * @list: List head for attaching to the manager lists. * @node: The range manager node. - * @handle. The DMA address of @cb_header. Handed to the device on command + * @handle: The DMA address of @cb_header. Handed to the device on command * buffer submission. * @cmd: Pointer to the command buffer space of this buffer. * @size: Size of the command buffer space of this buffer. @@ -249,7 +250,7 @@ static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its * associated structures. * - * header: Pointer to the header to free. + * @header: Pointer to the header to free. * * For internal use. Must be called with man::lock held. */ @@ -365,10 +366,11 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, } /** - * vmw_cmdbuf_ctx_submit: Process a command buffer context. + * vmw_cmdbuf_ctx_process - Process a command buffer context. * * @man: The command buffer manager. * @ctx: The command buffer context. + * @notempty: Pass back count of non-empty command submitted lists. * * Submit command buffers to hardware if possible, and process finished * buffers. Typically freeing them, but on preemption or error take @@ -1161,6 +1163,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, * context. * * @man: The command buffer manager. + * @context: Device context to pass command through. * * Synchronously sends a preempt command. */ @@ -1184,6 +1187,7 @@ static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) * context. * * @man: The command buffer manager. + * @context: Device context to start/stop. * @enable: Whether to enable or disable the context. * * Synchronously sends a device start / stop context command. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 44d858ce4ce7..b262d61d839d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -69,7 +69,7 @@ struct vmw_cmdbuf_res_manager { * vmw_cmdbuf_res_lookup - Look up a command buffer resource * * @man: Pointer to the command buffer resource manager - * @resource_type: The resource type, that combined with the user key + * @res_type: The resource type, that combined with the user key * identifies the resource. * @user_key: The user key. * @@ -148,7 +148,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list) /** * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions * - * @man: Pointer to the command buffer resource manager * @list: Caller's list of command buffer resource action * * This function reverts a list of command buffer resource @@ -160,7 +159,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list) void vmw_cmdbuf_res_revert(struct list_head *list) { struct vmw_cmdbuf_res *entry, *next; - int ret; list_for_each_entry_safe(entry, next, list, head) { switch (entry->state) { @@ -168,8 +166,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) vmw_cmdbuf_res_free(entry->man, entry); break; case VMW_CMDBUF_RES_DEL: - ret = drm_ht_insert_item(&entry->man->resources, - &entry->hash); + drm_ht_insert_item(&entry->man->resources, &entry->hash); list_del(&entry->head); list_add_tail(&entry->head, &entry->man->list); entry->state = VMW_CMDBUF_RES_COMMITTED; @@ -327,7 +324,6 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) } /** - * * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed * resource manager * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 6f4d0da11ad8..4a5a3e246216 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -112,7 +112,7 @@ static const struct vmw_res_func vmw_dx_context_func = { .unbind = vmw_dx_context_unbind }; -/** +/* * Context management: */ @@ -672,7 +672,7 @@ static int vmw_dx_context_destroy(struct vmw_resource *res) return 0; } -/** +/* * User-space context management: */ @@ -698,7 +698,7 @@ static void vmw_user_context_free(struct vmw_resource *res) vmw_user_context_size); } -/** +/* * This function is called when user space has no more references on the * base object. It releases the base-object's reference on the resource object. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ba658fa9cf6c..42321b9c8129 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -63,6 +63,7 @@ struct vmw_cotable { * @min_initial_entries: Min number of initial intries at cotable allocation * for this cotable type. * @size: Size of each entry. + * @unbind_func: Unbind call-back function. */ struct vmw_cotable_info { u32 min_initial_entries; @@ -297,7 +298,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback) * * @res: Pointer to the cotable resource. * @readback: Whether to read back cotable data to the backup buffer. - * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller + * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller * for convenience / fencing. * * Unbinds the cotable from the device and fences the backup buffer. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 408847a68cf3..6910111099c8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -47,12 +47,6 @@ #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 -#ifndef VMWGFX_GIT_VERSION -#define VMWGFX_GIT_VERSION "Unknown" -#endif - -#define VMWGFX_REPO "In Tree" - #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) @@ -153,7 +147,7 @@ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ struct drm_vmw_msg_arg) -/** +/* * The core DRM version of this macro doesn't account for * DRM_COMMAND_BASE. */ @@ -161,7 +155,7 @@ #define VMW_IOCTL_DEF(ioctl, func, flags) \ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} -/** +/* * Ioctl definitions. */ @@ -526,7 +520,7 @@ static void vmw_release_device_late(struct vmw_private *dev_priv) vmw_fifo_release(dev_priv, &dev_priv->fifo); } -/** +/* * Sets the initial_[width|height] fields on the given vmw_private. * * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then @@ -599,7 +593,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) /** * vmw_dma_masks - set required page- and dma masks * - * @dev: Pointer to struct drm-device + * @dev_priv: Pointer to struct drm-device * * With 32-bit we can only handle 32 bit PFNs. Optionally set that * restriction also for 64-bit systems. @@ -668,9 +662,10 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, fifo_size, MEMREMAP_WB); - if (unlikely(dev->fifo_mem == NULL)) { + if (IS_ERR(dev->fifo_mem)) { DRM_ERROR("Failed mapping FIFO memory.\n"); - return -ENOMEM; + pci_release_regions(pdev); + return PTR_ERR(dev->fifo_mem); } /* @@ -716,7 +711,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) return ret; ret = vmw_detect_version(dev_priv); if (ret) - return ret; + goto out_no_pci_or_version; mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); @@ -884,12 +879,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) drm_vma_offset_manager_init(&dev_priv->vma_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); - ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver, - dev_priv->drm.dev, - dev_priv->drm.anon_inode->i_mapping, - &dev_priv->vma_manager, - dev_priv->map_mode == vmw_dma_alloc_coherent, - false); + ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, + dev_priv->drm.dev, + dev_priv->drm.anon_inode->i_mapping, + &dev_priv->vma_manager, + dev_priv->map_mode == vmw_dma_alloc_coherent, + false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_no_bdev; @@ -966,8 +961,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) if (ret) goto out_no_fifo; - DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC) - ? "yes." : "no."); if (dev_priv->sm_type == VMW_SM_5) DRM_INFO("SM5 support available.\n"); if (dev_priv->sm_type == VMW_SM_4_1) @@ -975,11 +968,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) if (dev_priv->sm_type == VMW_SM_4) DRM_INFO("SM4 support available.\n"); - snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", - VMWGFX_REPO, VMWGFX_GIT_VERSION); - vmw_host_log(host_log); - - memset(host_log, 0, sizeof(host_log)); snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, VMWGFX_DRIVER_PATCHLEVEL); @@ -1006,14 +994,13 @@ out_no_kms: vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_vram_manager_fini(dev_priv); out_no_vram: - (void)ttm_bo_device_release(&dev_priv->bdev); + ttm_device_fini(&dev_priv->bdev); out_no_bdev: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) vmw_irq_uninstall(&dev_priv->drm); out_no_irq: - pci_release_regions(pdev); ttm_object_device_release(&dev_priv->tdev); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) @@ -1021,7 +1008,8 @@ out_err0: if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); - kfree(dev_priv); +out_no_pci_or_version: + pci_release_regions(pdev); return ret; } @@ -1053,13 +1041,12 @@ static void vmw_driver_unload(struct drm_device *dev) if (dev_priv->has_mob) vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); vmw_vram_manager_fini(dev_priv); - (void) ttm_bo_device_release(&dev_priv->bdev); + ttm_device_fini(&dev_priv->bdev); drm_vma_offset_manager_destroy(&dev_priv->vma_manager); vmw_release_device_late(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) vmw_irq_uninstall(&dev_priv->drm); - pci_release_regions(pdev); ttm_object_device_release(&dev_priv->tdev); if (dev_priv->ctx.staged_bindings) @@ -1068,7 +1055,7 @@ static void vmw_driver_unload(struct drm_device *dev) for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); - kfree(dev_priv); + pci_release_regions(pdev); } static void vmw_postclose(struct drm_device *dev, @@ -1268,6 +1255,7 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + ttm_mem_global_release(&ttm_mem_glob); drm_dev_unregister(dev); vmw_driver_unload(dev); } @@ -1383,7 +1371,7 @@ static int vmw_pm_freeze(struct device *kdev) vmw_execbuf_release_pinned_bo(dev_priv); vmw_resource_evict_all(dev_priv); vmw_release_device_early(dev_priv); - while (ttm_bo_swapout(&ctx) == 0); + while (ttm_bo_swapout(&ctx, GFP_KERNEL) > 0); if (dev_priv->enable_fb) vmw_fifo_resource_dec(dev_priv); if (atomic_read(&dev_priv->num_fifo_resources) != 0) { @@ -1516,9 +1504,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (IS_ERR(vmw)) return PTR_ERR(vmw); - vmw->drm.pdev = pdev; pci_set_drvdata(pdev, &vmw->drm); + ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); + if (ret) + return ret; + ret = vmw_driver_load(vmw, ent->device); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5fa5bcd20cc5..bb2ce6327944 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -55,10 +55,10 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20200114" +#define VMWGFX_DRIVER_DATE "20210218" #define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MINOR 18 -#define VMWGFX_DRIVER_PATCHLEVEL 0 +#define VMWGFX_DRIVER_PATCHLEVEL 1 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 #define VMWGFX_MAX_VALIDATIONS 2048 @@ -484,7 +484,7 @@ enum vmw_sm_type { struct vmw_private { struct drm_device drm; - struct ttm_bo_device bdev; + struct ttm_device bdev; struct vmw_fifo_state fifo; @@ -999,7 +999,7 @@ extern struct ttm_placement vmw_evictable_placement; extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_mob_placement; extern struct ttm_placement vmw_nonfixed_placement; -extern struct ttm_bo_driver vmw_bo_driver; +extern struct ttm_device_funcs vmw_bo_driver; extern const struct vmw_sg_table * vmw_bo_sg_table(struct ttm_buffer_object *bo); extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv, @@ -1525,6 +1525,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) *buf = NULL; if (tmp_buf != NULL) { + if (tmp_buf->base.pin_count > 0) + ttm_bo_unpin(&tmp_buf->base); ttm_bo_put(&tmp_buf->base); } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 462f17320708..7a24196f92c3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -80,7 +80,8 @@ struct vmw_relocation { * with a NOP. * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after * validation is -1, the command is replaced with a NOP. Otherwise no action. - */ + * @vmw_res_rel_max: Last value in the enum - used for error checking +*/ enum vmw_resource_relocation_type { vmw_res_rel_normal, vmw_res_rel_nop, @@ -122,9 +123,11 @@ struct vmw_ctx_validation_info { /** * struct vmw_cmd_entry - Describe a command for the verifier * + * @func: Call-back to handle the command. * @user_allow: Whether allowed from the execbuf ioctl. * @gb_disable: Whether disabled if guest-backed objects are available. * @gb_enable: Whether enabled iff guest-backed objects are available. + * @cmd_name: Name of the command. */ struct vmw_cmd_entry { int (*func) (struct vmw_private *, struct vmw_sw_context *, @@ -203,6 +206,7 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) * * @dev_priv: Pointer to the device private: * @sw_context: The command submission context + * @res: Pointer to the resource * @node: The validation node holding the context resource metadata */ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, @@ -509,7 +513,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, /** * vmw_resource_relocation_add - Add a relocation to the relocation list * - * @list: Pointer to head of relocation list. + * @sw_context: Pointer to the software context. * @res: The resource. * @offset: Offset into the command buffer currently being parsed where the id * that needs fixup is located. Granularity is one byte. @@ -639,7 +643,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) * @converter: User-space visisble type specific information. * @id_loc: Pointer to the location in the command buffer currently being parsed * from where the user-space resource id handle is located. - * @p_val: Pointer to pointer to resource validalidation node. Populated on + * @p_res: Pointer to pointer to resource validalidation node. Populated on * exit. */ static int @@ -1700,7 +1704,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. - * @val_node: The validation node representing the resource. + * @res: Pointer to the resource. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. @@ -3739,7 +3743,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, return 0; } -/** +/* * vmw_execbuf_fence_commands - create and submit a command stream fence * * Creates a fence object and submits a command stream marker. @@ -3939,9 +3943,9 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, * On successful return, the function returns a pointer to the data in the * command buffer and *@header is set to non-NULL. * - * If command buffers could not be used, the function will return the value of - * @kernel_commands on function call. That value may be NULL. In that case, the - * value of *@header will be set to NULL. + * @kernel_commands: If command buffers could not be used, the function will + * return the value of @kernel_commands on function call. That value may be + * NULL. In that case, the value of *@header will be set to NULL. * * If an error is encountered, the function will return a pointer error value. * If the function is interrupted by a signal while sleeping, it will return diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 378ec7600154..23523eb3cac2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -58,13 +58,11 @@ struct vmw_user_fence { /** * struct vmw_event_fence_action - fence action that delivers a drm event. * - * @e: A struct drm_pending_event that controls the event delivery. * @action: A struct vmw_fence_action to hook up to a fence. + * @event: A pointer to the pending event. * @fence: A referenced pointer to the fence to keep it alive while @action * hangs on it. * @dev: Pointer to a struct drm_device so we can access the event stuff. - * @kref: Both @e and @action has destructors, so we need to refcount. - * @size: Size accounted for this object. * @tv_sec: If non-null, the variable pointed to will be assigned * current time tv_sec val when the fence signals. * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will @@ -87,7 +85,7 @@ fman_from_fence(struct vmw_fence_obj *fence) return container_of(fence->base.lock, struct vmw_fence_manager, lock); } -/** +/* * Note on fencing subsystem usage of irqs: * Typically the vmw_fences_update function is called * @@ -250,7 +248,7 @@ static const struct dma_fence_ops vmw_fence_ops = { }; -/** +/* * Execute signal actions on fences recently signaled. * This is done from a workqueue so we don't have to execute * signal actions from atomic context. @@ -708,7 +706,7 @@ int vmw_wait_dma_fence(struct vmw_fence_manager *fman, } -/** +/* * vmw_fence_fifo_down - signal all unsignaled fence objects. */ @@ -948,8 +946,8 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) /** * vmw_fence_obj_add_action - Add an action to a fence object. * - * @fence - The fence object. - * @action - The action to add. + * @fence: The fence object. + * @action: The action to add. * * Note that the action callbacks may be executed before this function * returns. @@ -1001,6 +999,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, * @fence: The fence object on which to post the event. * @event: Event to be posted. This event should've been alloced * using k[mz]alloc, and should've been completely initialized. + * @tv_sec: If non-null, the variable pointed to will be assigned + * current time tv_sec val when the fence signals. + * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will + * be assigned the current time tv_usec val when the fence signals. * @interruptible: Interruptible waits if possible. * * As a side effect, the object pointed to by @event may have been diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 80af8772b8c2..b36032964b2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -437,7 +437,7 @@ __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) * @filp: See the linux fops read documentation. * @buffer: See the linux fops read documentation. * @count: See the linux fops read documentation. - * offset: See the linux fops read documentation. + * @offset: See the linux fops read documentation. * * Wrapper around the drm_read function that makes sure the device is * processing the fifo if drm_read decides to wait. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 9a89f658e501..abbca8b0b3c5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -370,12 +370,16 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); s32 hotspot_x, hotspot_y; int ret = 0; @@ -383,9 +387,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; - if (plane->state->fb) { - hotspot_x += plane->state->fb->hot_x; - hotspot_y += plane->state->fb->hot_y; + if (new_state->fb) { + hotspot_x += new_state->fb->hot_x; + hotspot_y += new_state->fb->hot_y; } du->cursor_surface = vps->surf; @@ -400,8 +404,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_y); } else if (vps->bo) { ret = vmw_cursor_update_bo(dev_priv, vps->bo, - plane->state->crtc_w, - plane->state->crtc_h, + new_state->crtc_w, + new_state->crtc_h, hotspot_x, hotspot_y); } else { vmw_cursor_update_position(dev_priv, false, 0, 0); @@ -409,8 +413,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, } if (!ret) { - du->cursor_x = plane->state->crtc_x + du->set_gui_x; - du->cursor_y = plane->state->crtc_y + du->set_gui_y; + du->cursor_x = new_state->crtc_x + du->set_gui_x; + du->cursor_y = new_state->crtc_y + du->set_gui_y; vmw_cursor_update_position(dev_priv, true, du->cursor_x + hotspot_x, @@ -437,26 +441,28 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, * Returns 0 on success */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state = NULL; - struct drm_framebuffer *new_fb = state->fb; + struct drm_framebuffer *new_fb = new_state->fb; int ret; - if (state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); + if (new_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + new_state->crtc); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, false, true); if (!ret && new_fb) { - struct drm_crtc *crtc = state->crtc; - struct vmw_connector_state *vcs; + struct drm_crtc *crtc = new_state->crtc; struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - vcs = vmw_connector_state_to_vcs(du->connector.state); + vmw_connector_state_to_vcs(du->connector.state); } @@ -468,7 +474,7 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, * vmw_du_cursor_plane_atomic_check - check if the new state is okay * * @plane: cursor plane - * @state: info on the new plane state + * @new_state: info on the new plane state * * This is a chance to fail if the new cursor state does not fit * our requirements. @@ -476,8 +482,10 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, * Returns 0 on success */ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); int ret = 0; struct drm_crtc_state *crtc_state = NULL; struct vmw_surface *surface = NULL; @@ -891,7 +899,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs; enum SVGA3dSurfaceFormat format; int ret; - struct drm_format_name_buf format_name; /* 3D is only supported on HWv8 and newer hosts */ if (dev_priv->active_display_unit == vmw_du_legacy) @@ -929,8 +936,8 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, format = SVGA3D_A1R5G5B5; break; default: - DRM_ERROR("Invalid pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); + DRM_ERROR("Invalid pixel format: %p4cc\n", + &mode_cmd->pixel_format); return -EINVAL; } @@ -1058,7 +1065,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { .dirty = vmw_framebuffer_bo_dirty_ext, }; -/** +/* * Pin the bofer in a location suitable for access by the * display system. */ @@ -1145,7 +1152,6 @@ static int vmw_create_bo_proxy(struct drm_device *dev, uint32_t format; struct vmw_resource *res; unsigned int bytes_pp; - struct drm_format_name_buf format_name; int ret; switch (mode_cmd->pixel_format) { @@ -1167,8 +1173,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev, break; default: - DRM_ERROR("Invalid framebuffer format %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); + DRM_ERROR("Invalid framebuffer format %p4cc\n", + &mode_cmd->pixel_format); return -EINVAL; } @@ -1212,7 +1218,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_bo *vfbd; unsigned int requested_size; - struct drm_format_name_buf format_name; int ret; requested_size = mode_cmd->height * mode_cmd->pitches[0]; @@ -1232,8 +1237,8 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, case DRM_FORMAT_RGB565: break; default: - DRM_ERROR("Invalid pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); + DRM_ERROR("Invalid pixel format: %p4cc\n", + &mode_cmd->pixel_format); return -EINVAL; } } @@ -1268,6 +1273,7 @@ out_err1: /** * vmw_kms_srf_ok - check if a surface can be created * + * @dev_priv: Pointer to device private struct. * @width: requested width * @height: requested height * @@ -1779,10 +1785,6 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) drm_property_create_range(&dev_priv->drm, DRM_MODE_PROP_IMMUTABLE, "hotplug_mode_update", 0, 1); - - if (!dev_priv->hotplug_mode_update_property) - return; - } int vmw_kms_init(struct vmw_private *dev_priv) @@ -1897,7 +1899,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, } -/** +/* * Function called by DRM code called with vbl_lock held. */ u32 vmw_get_vblank_counter(struct drm_crtc *crtc) @@ -1905,7 +1907,7 @@ u32 vmw_get_vblank_counter(struct drm_crtc *crtc) return 0; } -/** +/* * Function called by DRM code called with vbl_lock held. */ int vmw_enable_vblank(struct drm_crtc *crtc) @@ -1913,7 +1915,7 @@ int vmw_enable_vblank(struct drm_crtc *crtc) return -EINVAL; } -/** +/* * Function called by DRM code called with vbl_lock held. */ void vmw_disable_vblank(struct drm_crtc *crtc) @@ -2057,6 +2059,10 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x720@60Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344, + 1472, 1664, 0, 720, 723, 728, 748, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@60Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1472, 1664, 0, 768, 771, 778, 798, 0, @@ -2101,6 +2107,10 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1080@60Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048, + 2248, 2576, 0, 1080, 1083, 1088, 1120, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@60Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, @@ -2109,10 +2119,26 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 2560x1440@60Hz */ + { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608, + 2640, 2720, 0, 1440, 1443, 1448, 1481, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 2880x1800@60Hz */ + { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928, + 2960, 3040, 0, 1800, 1803, 1809, 1852, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 3840x2160@60Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888, + 3920, 4000, 0, 2160, 2163, 2168, 2222, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 3840x2400@60Hz */ + { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888, + 3920, 4000, 0, 2400, 2403, 2409, 2469, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* Terminate */ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, }; @@ -2121,7 +2147,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { * vmw_guess_mode_timing - Provide fake timings for a * 60Hz vrefresh mode. * - * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay + * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay * members filled in. */ void vmw_guess_mode_timing(struct drm_display_mode *mode) @@ -2176,6 +2202,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, mode->hdisplay = du->pref_width; mode->vdisplay = du->pref_height; vmw_guess_mode_timing(mode); + drm_mode_set_name(mode); if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * assumed_bpp, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 03f3694015ce..bbc809f7bd8a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -245,7 +245,7 @@ struct vmw_framebuffer_bo { }; -static const uint32_t vmw_primary_plane_formats[] = { +static const uint32_t __maybe_unused vmw_primary_plane_formats[] = { DRM_FORMAT_XRGB1555, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, @@ -253,7 +253,7 @@ static const uint32_t vmw_primary_plane_formats[] = { DRM_FORMAT_ARGB8888, }; -static const uint32_t vmw_cursor_plane_formats[] = { +static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = { DRM_FORMAT_ARGB8888, }; @@ -456,11 +456,11 @@ void vmw_du_cursor_plane_destroy(struct drm_plane *plane); /* Atomic Helpers */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state); + struct drm_atomic_state *state); int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state); void vmw_du_plane_cleanup_fb(struct drm_plane *plane, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 9a9508edbc9e..87e0b303d900 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -49,7 +49,7 @@ struct vmw_legacy_display { struct vmw_framebuffer *fb; }; -/** +/* * Display unit using the legacy register interface. */ struct vmw_legacy_display_unit { @@ -206,6 +206,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) * vmw_ldu_crtc_atomic_enable - Noop * * @crtc: CRTC associated with the new screen + * @state: Unused * * This is called after a mode set has been completed. Here's * usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active @@ -221,6 +222,7 @@ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc, * vmw_ldu_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off + * @state: Unused */ static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) @@ -282,18 +284,22 @@ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = { static void vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct vmw_private *dev_priv; struct vmw_legacy_display_unit *ldu; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; - struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; ldu = vmw_crtc_to_ldu(crtc); dev_priv = vmw_priv(plane->dev); - fb = plane->state->fb; + fb = new_state->fb; vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index a372980fe6a5..a0b53141dded 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -277,6 +277,7 @@ out_no_setup: &batch->otables[i]); } + ttm_bo_unpin(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; return ret; @@ -342,6 +343,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, vmw_bo_fence_single(bo, NULL); ttm_bo_unreserve(bo); + ttm_bo_unpin(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; } @@ -528,6 +530,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, void vmw_mob_destroy(struct vmw_mob *mob) { if (mob->pt_bo) { + ttm_bo_unpin(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } @@ -643,6 +646,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, out_no_cmd_space: vmw_fifo_resource_dec(dev_priv); if (pt_set_up) { + ttm_bo_unpin(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 15b5bde69324..609269625468 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -253,7 +253,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, * vmw_send_msg: Sends a message to the host * * @channel: RPC channel - * @logmsg: NULL terminated string + * @msg: NULL terminated string * * Returns: 0 on success */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index d6d282c13b7f..ac4a9b722279 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -42,7 +42,7 @@ struct vmw_stream { struct drm_vmw_control_stream_arg saved; }; -/** +/* * Overlay control */ struct vmw_overlay { @@ -85,7 +85,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, cmd->flush.streamId = stream_id; } -/** +/* * Send put command to hw. * * Returns @@ -174,7 +174,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, return 0; } -/** +/* * Send stop command to hw. * * Returns @@ -216,7 +216,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, return 0; } -/** +/* * Move a buffer to vram or gmr if @pin is set, else unpin the buffer. * * With the introduction of screen objects buffers could now be @@ -235,7 +235,7 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter); } -/** +/* * Stop or pause a stream. * * If the stream is paused the no evict flag is removed from the buffer @@ -285,7 +285,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, return 0; } -/** +/* * Update a stream and send any put or stop fifo commands needed. * * The caller must hold the overlay lock. @@ -353,7 +353,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, return 0; } -/** +/* * Try to resume all paused streams. * * Used by the kms code after moving a new scanout buffer to vram. @@ -387,7 +387,7 @@ int vmw_overlay_resume_all(struct vmw_private *dev_priv) return 0; } -/** +/* * Pauses all active streams. * * Used by the kms code when moving a new scanout buffer to vram. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 0a900afc66ff..45c9c6a7f1d6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -500,8 +500,6 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, vm_fault_t ret; pgoff_t fault_page_size; bool write = vmf->flags & FAULT_FLAG_WRITE; - bool is_cow_mapping = - (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; switch (pe_size) { case PE_SIZE_PMD: @@ -518,7 +516,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, } /* Always do write dirty-tracking and COW on PTE level. */ - if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping)) + if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags))) return VM_FAULT_FALLBACK; ret = ttm_bo_vm_reserve(bo, vmf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index d1e7b9608145..c3a724e37104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -202,7 +202,6 @@ int vmw_resource_alloc_id(struct vmw_resource *res) * * @dev_priv: Pointer to a device private struct. * @res: The struct vmw_resource to initialize. - * @obj_type: Resource object type. * @delay_id: Boolean whether to defer device id allocation until * the first validation. * @res_free: Resource destructor. @@ -288,8 +287,6 @@ out_bad_resource: * @tfile: Pointer to a struct ttm_object_file identifying the caller * @handle: The TTM user-space handle * @converter: Pointer to an object describing the resource type - * @p_res: On successful return the location pointed to will contain - * a pointer to a refcounted struct vmw_resource. * * If the handle can't be found or is associated with an incorrect resource * type, -EINVAL will be returned. @@ -315,7 +312,7 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, return converter->base_obj_to_res(base); } -/** +/* * Helper function that looks either a surface or bo. * * The pointer this pointed at by out_surf and out_buf needs to be null. @@ -388,6 +385,7 @@ out_no_bo: * @res: The resource to make visible to the device. * @val_buf: Information about a buffer possibly * containing backup data if a bind operation is needed. + * @dirtying: Transfer dirty regions. * * On hardware resource shortage, this function returns -EBUSY and * should be retried once resources have been freed up. @@ -586,7 +584,7 @@ out_no_reserve: return ret; } -/** +/* * vmw_resource_reserve - Reserve a resource for command submission * * @res: The resource to reserve. @@ -858,7 +856,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem) { struct vmw_buffer_object *dx_query_mob; - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct vmw_private *dev_priv; @@ -973,7 +971,7 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv) mutex_unlock(&dev_priv->cmdbuf_mutex); } -/** +/* * vmw_resource_pin - Add a pin reference on a resource * * @res: The resource to add a pin reference on diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index b0db059b8cfb..9bc9a0714664 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -84,7 +84,7 @@ struct vmw_kms_sou_define_gmrfb { SVGAFifoCmdDefineGMRFB body; }; -/** +/* * Display unit using screen objects. */ struct vmw_screen_object_unit { @@ -112,7 +112,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) vmw_sou_destroy(vmw_crtc_to_sou(crtc)); } -/** +/* * Send the fifo command to create a screen. */ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, @@ -160,7 +160,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, return 0; } -/** +/* * Send the fifo command to destroy a screen. */ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, @@ -263,7 +263,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) /** * vmw_sou_crtc_helper_prepare - Noop * - * @crtc: CRTC associated with the new screen + * @crtc: CRTC associated with the new screen * * Prepares the CRTC for a mode set, but we don't need to do anything here. */ @@ -275,6 +275,7 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc) * vmw_sou_crtc_atomic_enable - Noop * * @crtc: CRTC associated with the new screen + * @state: Unused * * This is called after a mode set has been completed. */ @@ -287,6 +288,7 @@ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, * vmw_sou_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off + * @state: Unused */ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) @@ -728,18 +730,20 @@ static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv, static void vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_crtc *crtc = plane->state->crtc; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *crtc = new_state->crtc; struct drm_pending_vblank_event *event = NULL; struct vmw_fence_obj *fence = NULL; int ret; /* In case of device error, maintain consistent atomic state */ - if (crtc && plane->state->fb) { + if (crtc && new_state->fb) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct vmw_framebuffer *vfb = - vmw_framebuffer_to_vfb(plane->state->fb); + vmw_framebuffer_to_vfb(new_state->fb); if (vfb->bo) ret = vmw_sou_plane_update_bo(dev_priv, plane, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 905ae50aaa2a..a0db06564013 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -125,7 +125,7 @@ static const struct vmw_res_func vmw_dx_shader_func = { .commit_notify = vmw_dx_shader_commit_notify, }; -/** +/* * Shader management: */ @@ -654,7 +654,7 @@ out_resource_init: -/** +/* * User-space shader management: */ @@ -686,7 +686,7 @@ static void vmw_shader_free(struct vmw_resource *res) vmw_shader_size); } -/** +/* * This function is called when user space has no more references on the * base object. It releases the base-object's reference on the resource object. */ @@ -945,13 +945,13 @@ int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, * vmw_compat_shader_add - Create a compat shader and stage it for addition * as a command buffer managed resource. * + * @dev_priv: Pointer to device private structure. * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is * unique to the shader type. * @bytecode: Pointer to the bytecode of the shader. * @shader_type: Shader type. - * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is - * to be created with. + * @size: Command size. * @list: Caller's list of staged command buffer resource actions. * */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index 7369dd86d3a9..2877c7b43bd7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -42,6 +42,7 @@ /** * struct vmw_view - view metadata * + * @rcu: RCU callback head * @res: The struct vmw_resource we derive from * @ctx: Non-refcounted pointer to the context this view belongs to. * @srf: Refcounted pointer to the surface pointed to by this view. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index fbe977881364..7b11f0285786 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -61,6 +61,7 @@ enum stdu_content_type { * @bottom: Bottom side of bounding box. * @fb_left: Left side of the framebuffer/content bounding box * @fb_top: Top of the framebuffer/content bounding box + * @pitch: framebuffer pitch (stride) * @buf: buffer object when DMA-ing between buffer and screen targets. * @sid: Surface ID when copying between surface and screen targets. */ @@ -109,8 +110,11 @@ struct vmw_stdu_update_gb_image { * content_vfbs dimensions, then this is a pointer into the * corresponding field in content_vfbs. If not, then this * is a separate buffer to which content_vfbs will blit to. - * @content_type: content_fb type - * @defined: true if the current display unit has been initialized + * @content_fb_type: content_fb type + * @display_width: display width + * @display_height: display height + * @defined: true if the current display unit has been initialized + * @cpp: Bytes per pixel */ struct vmw_screen_target_display_unit { struct vmw_display_unit base; @@ -652,6 +656,7 @@ out_cleanup: * @file_priv: Pointer to a struct drm-file identifying the caller. May be * set to NULL, but then @user_fence_rep must also be set to NULL. * @vfb: Pointer to the buffer-object backed framebuffer. + * @user_fence_rep: User-space provided structure for fence information. * @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. @@ -1575,10 +1580,12 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv, */ static void vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); - struct drm_crtc *crtc = plane->state->crtc; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + struct drm_crtc *crtc = new_state->crtc; struct vmw_screen_target_display_unit *stdu; struct drm_pending_vblank_event *event; struct vmw_fence_obj *fence = NULL; @@ -1586,9 +1593,9 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, int ret; /* If case of device error, maintain consistent atomic state */ - if (crtc && plane->state->fb) { + if (crtc && new_state->fb) { struct vmw_framebuffer *vfb = - vmw_framebuffer_to_vfb(plane->state->fb); + vmw_framebuffer_to_vfb(new_state->fb); stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index f6cab77075a0..c3e55c1376eb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -41,10 +41,12 @@ /** * struct vmw_user_surface - User-space visible surface resource * + * @prime: The TTM prime object. * @base: The TTM base object handling user-space visibility. * @srf: The surface metadata. * @size: TTM accounting size for the surface. - * @master: master of the creating client. Used for security check. + * @master: Master of the creating client. Used for security check. + * @backup_base: The TTM base object of the backup buffer. */ struct vmw_user_surface { struct ttm_prime_object prime; @@ -69,7 +71,7 @@ struct vmw_surface_offset { }; /** - * vmw_surface_dirty - Surface dirty-tracker + * struct vmw_surface_dirty - Surface dirty-tracker * @cache: Cached layout information of the surface. * @size: Accounting size for the struct vmw_surface_dirty. * @num_subres: Number of subresources. @@ -162,7 +164,7 @@ static const struct vmw_res_func vmw_gb_surface_func = { .clean = vmw_surface_clean, }; -/** +/* * struct vmw_surface_dma - SVGA3D DMA command */ struct vmw_surface_dma { @@ -172,7 +174,7 @@ struct vmw_surface_dma { SVGA3dCmdSurfaceDMASuffix suffix; }; -/** +/* * struct vmw_surface_define - SVGA3D Surface Define command */ struct vmw_surface_define { @@ -180,7 +182,7 @@ struct vmw_surface_define { SVGA3dCmdDefineSurface body; }; -/** +/* * struct vmw_surface_destroy - SVGA3D Surface Destroy command */ struct vmw_surface_destroy { @@ -544,6 +546,7 @@ static int vmw_legacy_srf_bind(struct vmw_resource *res, * * @res: Pointer to a struct vmw_res embedded in a struct * vmw_surface. + * @readback: Readback - only true if dirty * @val_buf: Pointer to a struct ttm_validate_buffer containing * information about the backup buffer. * @@ -1060,8 +1063,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, /** * vmw_surface_define_encode - Encode a surface_define command. * - * @srf: Pointer to a struct vmw_surface object. - * @cmd_space: Pointer to memory area in which the commands should be encoded. + * @res: Pointer to a struct vmw_resource embedded in a struct + * vmw_surface. */ static int vmw_gb_surface_create(struct vmw_resource *res) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index e8e79de255cf..eb63cbe64909 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -11,6 +11,7 @@ /** * struct vmw_thp_manager - Range manager implementing huge page alignment * + * @manager: TTM resource manager. * @mm: The underlying range manager. Protected by @lock. * @lock: Manager lock. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index dbb068830d80..63f10c865061 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -265,6 +265,7 @@ static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) * * @viter: Pointer to the iterator to initialize * @vsgt: Pointer to a struct vmw_sg_table to initialize from + * @p_offset: Pointer offset used to update current array position * * Note that we're following the convention of __sg_page_iter_start, so that * the iterator doesn't point to a valid page after initialization; it has @@ -482,7 +483,7 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) } -static int vmw_ttm_bind(struct ttm_bo_device *bdev, +static int vmw_ttm_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = @@ -526,7 +527,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev, return ret; } -static void vmw_ttm_unbind(struct ttm_bo_device *bdev, +static void vmw_ttm_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = @@ -552,7 +553,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev, } -static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm); @@ -572,21 +573,42 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) } -static int vmw_ttm_populate(struct ttm_bo_device *bdev, +static int vmw_ttm_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { + unsigned int i; + int ret; + /* TODO: maybe completely drop this ? */ if (ttm_tt_is_populated(ttm)) return 0; - return ttm_pool_alloc(&bdev->pool, ttm, ctx); + ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); + if (ret) + return ret; + + for (i = 0; i < ttm->num_pages; ++i) { + ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i], + PAGE_SIZE, ctx); + if (ret) + goto error; + } + return 0; + +error: + while (i--) + ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], + PAGE_SIZE); + ttm_pool_free(&bdev->pool, ttm); + return ret; } -static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, +static void vmw_ttm_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm); + unsigned int i; if (vmw_tt->mob) { vmw_mob_destroy(vmw_tt->mob); @@ -594,6 +616,11 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, } vmw_ttm_unmap_dma(vmw_tt); + + for (i = 0; i < ttm->num_pages; ++i) + ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], + PAGE_SIZE); + ttm_pool_free(&bdev->pool, ttm); } @@ -639,7 +666,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) return vmw_user_bo_verify_access(bo, tfile); } -static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) +static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); @@ -664,6 +691,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc * vmw_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. + * @evict: Unused * @mem: The struct ttm_resource indicating to what memory * region the move is taking place. * @@ -742,7 +770,7 @@ vmw_delete_mem_notify(struct ttm_buffer_object *bo) vmw_move_notify(bo, false, NULL); } -struct ttm_bo_driver vmw_bo_driver = { +struct ttm_device_funcs vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 3c03b1746661..cb9975889e2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -49,7 +49,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &vmw_vm_ops; /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */ - if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) + if (!is_cow_mapping(vma->vm_flags)) vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP; return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index f2e2bf6d1421..e7570f422400 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -48,7 +48,6 @@ struct vmw_validation_bo_node { u32 as_mob : 1; u32 cpu_blit : 1; }; - /** * struct vmw_validation_res_node - Resource validation metadata. * @head: List head for the resource validation list. @@ -64,6 +63,8 @@ struct vmw_validation_bo_node { * @first_usage: True iff the resource has been seen only once in the current * validation batch. * @reserved: Whether the resource is currently reserved by this process. + * @dirty_set: Change dirty status of the resource. + * @dirty: Dirty information VMW_RES_DIRTY_XX. * @private: Optionally additional memory for caller-private data. * * Bit fields are used since these structures are allocated and freed in @@ -205,7 +206,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, * vmw_validation_find_res_dup - Find a duplicate resource entry in the * validation context's lists. * @ctx: The validation context to search. - * @vbo: The buffer object to search for. + * @res: Reference counted resource pointer. * * Return: Pointer to the struct vmw_validation_bo_node referencing the * duplicate, or NULL if none found. diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig index fab1373e1fb3..7eb0ce345547 100644 --- a/drivers/gpu/drm/xen/Kconfig +++ b/drivers/gpu/drm/xen/Kconfig @@ -1,15 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_XEN - bool "DRM Support for Xen guest OS" - depends on XEN - help - Choose this option if you want to enable DRM support - for Xen. + bool config DRM_XEN_FRONTEND tristate "Para-virtualized frontend driver for Xen guest OS" - depends on DRM_XEN - depends on DRM + depends on XEN && DRM + select DRM_XEN select DRM_KMS_HELPER select VIDEOMODE_HELPERS select XEN_XENBUS_FRONTEND diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index ef11b1e4de39..371202ebe900 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -13,6 +13,7 @@ #include <drm/drm_drv.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -301,7 +302,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = { .mode_valid = display_mode_valid, .enable = display_enable, .disable = display_disable, - .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, .check = display_check, .update = display_update, }; diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index c685d94409b0..109d627968ac 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1143,18 +1143,21 @@ static inline struct zynqmp_disp_layer *plane_to_layer(struct drm_plane *plane) static int zynqmp_disp_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state; - if (!state->crtc) + if (!new_plane_state->crtc) return 0; - crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - return drm_atomic_helper_check_plane_state(state, crtc_state, + return drm_atomic_helper_check_plane_state(new_plane_state, + crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, false, false); @@ -1162,8 +1165,10 @@ zynqmp_disp_plane_atomic_check(struct drm_plane *plane, static void zynqmp_disp_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct zynqmp_disp_layer *layer = plane_to_layer(plane); if (!old_state->fb) @@ -1174,13 +1179,15 @@ zynqmp_disp_plane_atomic_disable(struct drm_plane *plane, static void zynqmp_disp_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct zynqmp_disp_layer *layer = plane_to_layer(plane); bool format_changed = false; if (!old_state->fb || - old_state->fb->format->format != plane->state->fb->format->format) + old_state->fb->format->format != new_state->fb->format->format) format_changed = true; /* @@ -1192,10 +1199,10 @@ zynqmp_disp_plane_atomic_update(struct drm_plane *plane, if (old_state->fb) zynqmp_disp_layer_disable(layer); - zynqmp_disp_layer_set_format(layer, plane->state); + zynqmp_disp_layer_set_format(layer, new_state); } - zynqmp_disp_layer_update(layer, plane->state); + zynqmp_disp_layer_update(layer, new_state); /* Enable or re-enable the plane is the format has changed. */ if (format_changed) @@ -1396,19 +1403,11 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp) */ static void zynqmp_disp_disable(struct zynqmp_disp *disp) { - struct drm_crtc *crtc = &disp->crtc; - zynqmp_disp_audio_disable(&disp->audio); zynqmp_disp_avbuf_disable_audio(&disp->avbuf); zynqmp_disp_avbuf_disable_channels(&disp->avbuf); zynqmp_disp_avbuf_disable(&disp->avbuf); - - /* Mark the flip is done as crtc is disabled anyway */ - if (crtc->state->event) { - complete_all(crtc->state->event->base.completion); - crtc->state->event = NULL; - } } static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc) @@ -1480,8 +1479,6 @@ static void zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, - crtc); struct zynqmp_disp *disp = crtc_to_disp(crtc); struct drm_plane_state *old_plane_state; @@ -1490,15 +1487,21 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc, * .shutdown() path if the plane is already disabled, skip * zynqmp_disp_plane_atomic_disable() in that case. */ - old_plane_state = drm_atomic_get_old_plane_state(old_crtc_state->state, - crtc->primary); + old_plane_state = drm_atomic_get_old_plane_state(state, crtc->primary); if (old_plane_state) - zynqmp_disp_plane_atomic_disable(crtc->primary, old_plane_state); + zynqmp_disp_plane_atomic_disable(crtc->primary, state); zynqmp_disp_disable(disp); drm_crtc_vblank_off(&disp->crtc); + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); + clk_disable_unprepare(disp->pclk); pm_runtime_put_sync(disp->dev); } diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c index 78d787afe594..93bcca428e35 100644 --- a/drivers/gpu/drm/zte/zx_plane.c +++ b/drivers/gpu/drm/zte/zx_plane.c @@ -46,8 +46,10 @@ static const uint32_t vl_formats[] = { #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) static int zx_vl_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *plane_state) + struct drm_atomic_state *state) { + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_framebuffer *fb = plane_state->fb; struct drm_crtc *crtc = plane_state->crtc; struct drm_crtc_state *crtc_state; @@ -57,7 +59,7 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state, + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -179,13 +181,14 @@ static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format, } static void zx_vl_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { struct zx_plane *zplane = to_zx_plane(plane); - struct drm_plane_state *state = plane->state; - struct drm_framebuffer *fb = state->fb; - struct drm_rect *src = &state->src; - struct drm_rect *dst = &state->dst; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *fb = new_state->fb; + struct drm_rect *src = &new_state->src; + struct drm_rect *dst = &new_state->dst; struct drm_gem_cma_object *cma_obj; void __iomem *layer = zplane->layer; void __iomem *hbsc = zplane->hbsc; @@ -257,8 +260,10 @@ static void zx_vl_plane_atomic_update(struct drm_plane *plane, } static void zx_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct zx_plane *zplane = to_zx_plane(plane); void __iomem *hbsc = zplane->hbsc; @@ -275,8 +280,10 @@ static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = { }; static int zx_gl_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *plane_state) + struct drm_atomic_state *state) { + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_framebuffer *fb = plane_state->fb; struct drm_crtc *crtc = plane_state->crtc; struct drm_crtc_state *crtc_state; @@ -284,7 +291,7 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state, + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); if (WARN_ON(!crtc_state)) return -EINVAL; @@ -347,10 +354,12 @@ static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h, } static void zx_gl_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct zx_plane *zplane = to_zx_plane(plane); - struct drm_framebuffer *fb = plane->state->fb; + struct drm_framebuffer *fb = new_state->fb; struct drm_gem_cma_object *cma_obj; void __iomem *layer = zplane->layer; void __iomem *csc = zplane->csc; @@ -369,15 +378,15 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane, format = fb->format->format; stride = fb->pitches[0]; - src_x = plane->state->src_x >> 16; - src_y = plane->state->src_y >> 16; - src_w = plane->state->src_w >> 16; - src_h = plane->state->src_h >> 16; + src_x = new_state->src_x >> 16; + src_y = new_state->src_y >> 16; + src_w = new_state->src_w >> 16; + src_h = new_state->src_h >> 16; - dst_x = plane->state->crtc_x; - dst_y = plane->state->crtc_y; - dst_w = plane->state->crtc_w; - dst_h = plane->state->crtc_h; + dst_x = new_state->crtc_x; + dst_y = new_state->crtc_y; + dst_w = new_state->crtc_w; + dst_h = new_state->crtc_h; bpp = fb->format->cpp[0]; |