diff options
Diffstat (limited to 'drivers/gpu')
612 files changed, 41222 insertions, 13447 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 147d61b9674e..64376dd298ed 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -182,13 +182,6 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. -config DRM_TTM_DMA_PAGE_POOL - bool - depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU) - default y - help - Choose this if you need the TTM dma page pool - config DRM_VRAM_HELPER tristate depends on DRM @@ -287,6 +280,7 @@ config DRM_VKMS tristate "Virtual KMS (EXPERIMENTAL)" depends on DRM select DRM_KMS_HELPER + select DRM_GEM_SHMEM_HELPER select CRC32 default n help diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 2f31579f91d4..81569009f884 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -100,7 +100,7 @@ obj-$(CONFIG_DRM_MSM) += msm/ obj-$(CONFIG_DRM_TEGRA) += tegra/ obj-$(CONFIG_DRM_STM) += stm/ obj-$(CONFIG_DRM_STI) += sti/ -obj-$(CONFIG_DRM_IMX) += imx/ +obj-y += imx/ obj-$(CONFIG_DRM_INGENIC) += ingenic/ obj-$(CONFIG_DRM_MEDIATEK) += mediatek/ obj-$(CONFIG_DRM_MESON) += meson/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index bc9f0e42e0a2..ea391ca7f2f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -208,11 +208,11 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s }) /* GPUVM API */ -int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, +int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid, void **vm, void **process_info, struct dma_fence **ef); int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, - struct file *filp, unsigned int pasid, + struct file *filp, u32 pasid, void **vm, void **process_info, struct dma_fence **ef); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 1529815838f7..4763bab7a4d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -104,7 +104,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 275f20399373..b91d27e39bad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -139,7 +139,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 4997189d8b36..5ce0ce704a21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -96,7 +96,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index b75bf0bb05ae..7d6c0013af35 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -108,7 +108,7 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index de9eca55b0ef..e64deba8900f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -26,7 +26,7 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, unsigned int vmid); int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 703cd5a7b8f7..1755386470e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -995,7 +995,7 @@ create_evict_fence_fail: return ret; } -int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, +int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid, void **vm, void **process_info, struct dma_fence **ef) { @@ -1031,7 +1031,7 @@ amdgpu_vm_init_fail: } int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, - struct file *filp, unsigned int pasid, + struct file *filp, u32 pasid, void **vm, void **process_info, struct dma_fence **ef) { @@ -1478,7 +1478,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( } } - if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count) + if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) amdgpu_bo_fence(bo, &avm->process_info->eviction_fence->base, true); @@ -1557,7 +1557,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( * required. */ if (mem->mapped_to_gpu_memory == 0 && - !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count) + !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && + !mem->bo->tbo.pin_count) amdgpu_amdkfd_remove_eviction_fence(mem->bo, process_info->eviction_fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 79342976fa76..8d2878e950da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -404,13 +404,12 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, - .resv = bo->tbo.base.resv, - .flags = 0 + .resv = bo->tbo.base.resv }; uint32_t domain; int r; - if (bo->pin_count) + if (bo->tbo.pin_count) return 0; /* Don't move this buffer if we have depleted our allowance diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 41ca13f0acd5..5c1f3725c741 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1320,6 +1320,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = drm_to_adev(dev); + struct ttm_resource_manager *man; int r; r = pm_runtime_get_sync(dev->dev); @@ -1328,7 +1329,9 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) return r; } - seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + r = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + seq_printf(m, "(%d)\n", r); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 8affe50824c2..2e8a8b57639f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -133,10 +133,7 @@ static void amdgpu_display_unpin_work_func(struct work_struct *__work) /* unpin of the old buffer */ r = amdgpu_bo_reserve(work->old_abo, true); if (likely(r == 0)) { - r = amdgpu_bo_unpin(work->old_abo); - if (unlikely(r != 0)) { - DRM_ERROR("failed to unpin buffer after flip\n"); - } + amdgpu_bo_unpin(work->old_abo); amdgpu_bo_unreserve(work->old_abo); } else DRM_ERROR("failed to reserve buffer after flip\n"); @@ -250,8 +247,7 @@ pflip_cleanup: } unpin: if (!adev->enable_virtual_display) - if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) - DRM_ERROR("failed to unpin new abo in error path\n"); + amdgpu_bo_unpin(new_abo); unreserve: amdgpu_bo_unreserve(new_abo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 427a7c5ba93d..5b465ab774d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -281,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, struct sg_table *sgt; long r; - if (!bo->pin_count) { + if (!bo->tbo.pin_count) { /* move buffer into GTT or VRAM */ struct ttm_operation_ctx ctx = { false, false }; unsigned domains = AMDGPU_GEM_DOMAIN_GTT; @@ -303,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, switch (bo->tbo.mem.mem_type) { case TTM_PL_TT: - sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, + sgt = drm_prime_pages_to_sg(obj->dev, + bo->tbo.ttm->pages, bo->tbo.num_pages); if (IS_ERR(sgt)) return sgt; @@ -389,7 +390,8 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, if (unlikely(ret != 0)) return ret; - if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { + if (!bo->tbo.pin_count && + (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 5a49380d4b5c..26cf369003e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1176,25 +1176,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - adev = kzalloc(sizeof(*adev), GFP_KERNEL); - if (!adev) - return -ENOMEM; + adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev); + if (IS_ERR(adev)) + return PTR_ERR(adev); adev->dev = &pdev->dev; adev->pdev = pdev; ddev = adev_to_drm(adev); - ret = drm_dev_init(ddev, &kms_driver, &pdev->dev); - if (ret) - goto err_free; - - drmm_add_final_kfree(ddev, adev); if (!supports_atomic) ddev->driver_features &= ~DRIVER_ATOMIC; ret = pci_enable_device(pdev); if (ret) - goto err_free; + return ret; ddev->pdev = pdev; pci_set_drvdata(pdev, ddev); @@ -1222,8 +1217,6 @@ retry_init: err_pci: pci_disable_device(pdev); -err_free: - drm_dev_put(ddev); return ret; } @@ -1240,7 +1233,6 @@ amdgpu_pci_remove(struct pci_dev *pdev) amdgpu_driver_unload_kms(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); - drm_dev_put(dev); } static void @@ -1539,19 +1531,13 @@ static struct drm_driver kms_driver = { .lastclose = amdgpu_driver_lastclose_kms, .irq_handler = amdgpu_irq_handler, .ioctls = amdgpu_ioctls_kms, - .gem_free_object_unlocked = amdgpu_gem_object_free, - .gem_open_object = amdgpu_gem_object_open, - .gem_close_object = amdgpu_gem_object_close, .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, .fops = &amdgpu_driver_kms_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = amdgpu_gem_prime_export, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_vmap = amdgpu_gem_prime_vmap, - .gem_prime_vunmap = amdgpu_gem_prime_vunmap, .gem_prime_mmap = amdgpu_gem_prime_mmap, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 5b50860eece4..df94db199532 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -36,9 +36,12 @@ #include "amdgpu.h" #include "amdgpu_display.h" +#include "amdgpu_dma_buf.h" #include "amdgpu_xgmi.h" -void amdgpu_gem_object_free(struct drm_gem_object *gobj) +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; + +static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); @@ -87,6 +90,7 @@ retry: return r; } *obj = &bo->tbo.base; + (*obj)->funcs = &amdgpu_gem_object_funcs; return 0; } @@ -119,8 +123,8 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) * Call from drm_gem_handle_create which appear in both new and open ioctl * case. */ -int amdgpu_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv) +static int amdgpu_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); @@ -152,8 +156,8 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, return 0; } -void amdgpu_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv) +static void amdgpu_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); @@ -211,6 +215,15 @@ out_unlock: ttm_eu_backoff_reservation(&ticket, &list); } +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { + .free = amdgpu_gem_object_free, + .open = amdgpu_gem_object_open, + .close = amdgpu_gem_object_close, + .export = amdgpu_gem_prime_export, + .vmap = amdgpu_gem_prime_vmap, + .vunmap = amdgpu_gem_prime_vunmap, +}; + /* * GEM ioctls. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index e0f025dd1b14..637bf51dbf06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -33,11 +33,6 @@ #define AMDGPU_GEM_DOMAIN_MAX 0x3 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base) -void amdgpu_gem_object_free(struct drm_gem_object *obj); -int amdgpu_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv); -void amdgpu_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv); unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 1449489cbe78..056cb87d09ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_dma_tt *ttm; switch (bo->tbo.mem.mem_type) { case TTM_PL_TT: - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); - *addr = ttm->dma_address[0]; + *addr = bo->tbo.ttm->dma_address[0]; break; case TTM_PL_VRAM: *addr = amdgpu_bo_gpu_offset(bo); @@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); - struct ttm_dma_tt *ttm; - if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached) + if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) return AMDGPU_BO_INVALID_OFFSET; - ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); - if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) + if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) return AMDGPU_BO_INVALID_OFFSET; - return adev->gmc.agp_start + ttm->dma_address[0]; + return adev->gmc.agp_start + bo->ttm->dma_address[0]; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index a30a2ec89a5e..8980329cded0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -95,8 +95,6 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) man->use_tt = true; man->func = &amdgpu_gtt_mgr_func; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); @@ -138,7 +136,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); if (ret) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 7521f4ab55de..6e9a9e5dbea0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -43,7 +43,7 @@ static DEFINE_IDA(amdgpu_pasid_ida); /* Helper to free pasid from a fence callback */ struct amdgpu_pasid_cb { struct dma_fence_cb cb; - unsigned int pasid; + u32 pasid; }; /** @@ -79,7 +79,7 @@ int amdgpu_pasid_alloc(unsigned int bits) * amdgpu_pasid_free - Free a PASID * @pasid: PASID to free */ -void amdgpu_pasid_free(unsigned int pasid) +void amdgpu_pasid_free(u32 pasid) { trace_amdgpu_pasid_freed(pasid); ida_simple_remove(&amdgpu_pasid_ida, pasid); @@ -105,7 +105,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence, * Free the pasid only after all the fences in resv are signaled. */ void amdgpu_pasid_free_delayed(struct dma_resv *resv, - unsigned int pasid) + u32 pasid) { struct dma_fence *fence, **fences; struct amdgpu_pasid_cb *cb; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 8e58325bbca2..0c3b4fa1f936 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -71,9 +71,9 @@ struct amdgpu_vmid_mgr { }; int amdgpu_pasid_alloc(unsigned int bits); -void amdgpu_pasid_free(unsigned int pasid); +void amdgpu_pasid_free(u32 pasid); void amdgpu_pasid_free_delayed(struct dma_resv *resv, - unsigned int pasid); + u32 pasid); bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 011c36e317ec..918d4e9c6461 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1095,7 +1095,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; struct amdgpu_bo *pd; - unsigned int pasid; + u32 pasid; int handle; if (!fpriv) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b191701098f0..c6c9723d3d8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -78,7 +78,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); - if (bo->pin_count > 0) + if (bo->tbo.pin_count > 0) amdgpu_bo_subtract_pin_size(bo); amdgpu_bo_kunmap(bo); @@ -136,8 +136,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + places[c].mem_type = TTM_PL_VRAM; + places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; @@ -152,52 +152,48 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_TT; - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) - places[c].flags |= TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - else - places[c].flags |= TTM_PL_FLAG_CACHED; + places[c].mem_type = TTM_PL_TT; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_CPU) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_SYSTEM; - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) - places[c].flags |= TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - else - places[c].flags |= TTM_PL_FLAG_CACHED; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_GDS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + places[c].mem_type = AMDGPU_PL_GDS; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_GWS) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + places[c].mem_type = AMDGPU_PL_GWS; + places[c].flags = 0; c++; } if (domain & AMDGPU_GEM_DOMAIN_OA) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + places[c].mem_type = AMDGPU_PL_OA; + places[c].flags = 0; c++; } if (!c) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + places[c].mem_type = TTM_PL_SYSTEM; + places[c].flags = 0; c++; } @@ -520,9 +516,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, struct ttm_operation_ctx ctx = { .interruptible = (bp->type != ttm_bo_type_kernel), .no_wait_gpu = bp->no_wait_gpu, - .resv = bp->resv, - .flags = bp->type != ttm_bo_type_kernel ? - TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, + .allow_res_evict = bp->type != ttm_bo_type_kernel, + .resv = bp->resv }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; @@ -594,7 +591,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && - bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + bo->tbo.mem.mem_type == TTM_PL_VRAM) { struct dma_fence *fence; r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); @@ -715,7 +712,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) uint32_t domain; int r; - if (bo->pin_count) + if (bo->tbo.pin_count) return 0; domain = bo->preferred_domains; @@ -912,13 +909,13 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, */ domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); - if (bo->pin_count) { + if (bo->tbo.pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; if (!(domain & amdgpu_mem_type_to_domain(mem_type))) return -EINVAL; - bo->pin_count++; + ttm_bo_pin(&bo->tbo); if (max_offset != 0) { u64 domain_start = amdgpu_ttm_domain_start(adev, @@ -949,7 +946,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (!bo->placements[i].lpfn || (lpfn && lpfn < bo->placements[i].lpfn)) bo->placements[i].lpfn = lpfn; - bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); @@ -958,7 +954,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, goto error; } - bo->pin_count = 1; + ttm_bo_pin(&bo->tbo); domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); if (domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -1000,34 +996,16 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_unpin(struct amdgpu_bo *bo) +void amdgpu_bo_unpin(struct amdgpu_bo *bo) { - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_operation_ctx ctx = { false, false }; - int r, i; - - if (WARN_ON_ONCE(!bo->pin_count)) { - dev_warn(adev->dev, "%p unpin not necessary\n", bo); - return 0; - } - bo->pin_count--; - if (bo->pin_count) - return 0; + ttm_bo_unpin(&bo->tbo); + if (bo->tbo.pin_count) + return; amdgpu_bo_subtract_pin_size(bo); if (bo->tbo.base.import_attach) dma_buf_unpin(bo->tbo.base.import_attach); - - for (i = 0; i < bo->placement.num_placement; i++) { - bo->placements[i].lpfn = 0; - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - } - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (unlikely(r)) - dev_err(adev->dev, "%p validate failed for unpin\n", bo); - - return r; } /** @@ -1042,6 +1020,8 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) */ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { + struct ttm_resource_manager *man; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ #ifndef CONFIG_HIBERNATION if (adev->flags & AMD_IS_APU) { @@ -1049,7 +1029,9 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) return 0; } #endif - return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); + + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(&adev->mman.bdev, man); } static const char *amdgpu_vram_names[] = { @@ -1355,19 +1337,14 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo *abo; + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); unsigned long offset, size; int r; - if (!amdgpu_bo_is_amdgpu_bo(bo)) - return 0; - - abo = ttm_to_amdgpu_bo(bo); - /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; @@ -1380,8 +1357,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; /* Can't move a pinned BO to visible VRAM */ - if (abo->pin_count > 0) - return -EINVAL; + if (abo->tbo.pin_count > 0) + return VM_FAULT_SIGBUS; /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); @@ -1393,15 +1370,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) abo->placement.busy_placement = &abo->placements[1]; r = ttm_bo_validate(bo, &abo->placement, &ctx); - if (unlikely(r != 0)) - return r; + if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(r)) + return VM_FAULT_SIGBUS; offset = bo->mem.start << PAGE_SHIFT; /* this should never happen */ if (bo->mem.mem_type == TTM_PL_VRAM && (offset + size) > adev->gmc.visible_vram_size) - return -EINVAL; + return VM_FAULT_SIGBUS; + ttm_bo_move_to_lru_tail_unlocked(bo); return 0; } @@ -1484,7 +1464,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && - !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); + !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); @@ -1576,7 +1556,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) seq_printf(m, "\t\t0x%08x: %12lld byte %s", id, size, placement); - pin_count = READ_ONCE(bo->pin_count); + pin_count = READ_ONCE(bo->tbo.pin_count); if (pin_count) seq_printf(m, " pin count %d", pin_count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 621c0bfee6e3..a5a7992b5773 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -89,7 +89,6 @@ struct amdgpu_bo { struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; u64 flags; - unsigned pin_count; u64 tiling_flags; u64 metadata_flags; void *metadata; @@ -267,7 +266,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo); int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 min_offset, u64 max_offset); -int amdgpu_bo_unpin(struct amdgpu_bo *bo); +void amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_evict_vram(struct amdgpu_device *adev); int amdgpu_bo_init(struct amdgpu_device *adev); int amdgpu_bo_late_init(struct amdgpu_device *adev); @@ -285,7 +284,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *new_mem); void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); -int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); +vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 69ebd07f3eee..12aa35ac0eb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -47,7 +47,6 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> @@ -63,12 +62,17 @@ #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 +static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem); +static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm); + static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, unsigned int type, uint64_t size_in_page) { return ttm_range_man_init(&adev->mman.bdev, type, - TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED, false, size_in_page); } @@ -88,7 +92,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }; /* Don't handle scatter gather BOs */ @@ -175,24 +180,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) } /** - * amdgpu_move_null - Register memory for a buffer object - * - * @bo: The bo to assign the memory to - * @new_mem: The memory to be assigned. - * - * Assign the memory from new_mem to the memory of the buffer object bo. - */ -static void amdgpu_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} - -/** * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. * * @bo: The bo to assign the memory to. @@ -306,11 +293,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, cpu_addr = &job->ibs[0].ptr[num_dw]; if (mem->mem_type == TTM_PL_TT) { - struct ttm_dma_tt *dma; dma_addr_t *dma_address; - dma = container_of(bo->ttm, struct ttm_dma_tt, ttm); - dma_address = &dma->dma_address[offset >> PAGE_SHIFT]; + dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT]; r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, cpu_addr); if (r) @@ -514,9 +499,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, /* Always block for VM page tables before committing the new location */ if (bo->type == ttm_bo_type_kernel) - r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); else - r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); dma_fence_put(fence); return r; @@ -551,21 +536,20 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = 0; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit from VRAM\n"); return r; } - /* set caching flags */ - r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); - if (unlikely(r)) { + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(r)) goto out_cleanup; - } /* Bind the memory to the GTT space */ - r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -576,8 +560,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, goto out_cleanup; } - /* move BO (in tmp_mem) to new_mem */ - r = ttm_bo_move_ttm(bo, ctx, new_mem); + r = ttm_bo_wait_ctx(bo, ctx); + if (unlikely(r)) + goto out_cleanup; + + amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); + ttm_bo_assign_mem(bo, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); return r; @@ -607,7 +596,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = 0; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit to VRAM\n"); @@ -615,11 +605,16 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, } /* move/bind old memory to GTT space */ - r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(r)) + return r; + + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } + ttm_bo_assign_mem(bo, &tmp_mem); /* copy to VRAM */ r = amdgpu_move_blit(bo, evict, new_mem, old_mem); if (unlikely(r)) { @@ -668,25 +663,43 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; + if (new_mem->mem_type == TTM_PL_TT) { + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); + if (r) + return r; + } + + amdgpu_bo_move_notify(bo, evict, new_mem); + /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); - if (WARN_ON_ONCE(abo->pin_count > 0)) + if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) return -EINVAL; adev = amdgpu_ttm_adev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } - if ((old_mem->mem_type == TTM_PL_TT && - new_mem->mem_type == TTM_PL_SYSTEM) || - (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_TT)) { - /* bind is enough */ - amdgpu_move_null(bo, new_mem); + if (old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_TT) { + ttm_bo_move_null(bo, new_mem); return 0; } + + if (old_mem->mem_type == TTM_PL_TT && + new_mem->mem_type == TTM_PL_SYSTEM) { + r = ttm_bo_wait_ctx(bo, ctx); + if (r) + goto fail; + + amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); + ttm_bo_assign_mem(bo, new_mem); + return 0; + } + if (old_mem->mem_type == AMDGPU_PL_GDS || old_mem->mem_type == AMDGPU_PL_GWS || old_mem->mem_type == AMDGPU_PL_OA || @@ -694,7 +707,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_OA) { /* Nothing to save here */ - amdgpu_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } @@ -720,12 +733,12 @@ memcpy: if (!amdgpu_mem_visible(adev, old_mem) || !amdgpu_mem_visible(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); - return r; + goto fail; } r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) - return r; + goto fail; } if (bo->type == ttm_bo_type_device && @@ -740,6 +753,11 @@ memcpy: /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); return 0; +fail: + swap(*new_mem, bo->mem); + amdgpu_bo_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + return r; } /** @@ -773,8 +791,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.offset; - mem->bus.base = adev->gmc.aper_base; + mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; + mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; @@ -785,12 +804,13 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); uint64_t offset = (page_offset << PAGE_SHIFT); struct drm_mm_node *mm; mm = amdgpu_find_mm_node(&bo->mem, &offset); - return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + - (offset >> PAGE_SHIFT); + offset += adev->gmc.aper_base; + return mm->start + (offset >> PAGE_SHIFT); } /** @@ -818,12 +838,13 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) * TTM backend functions. */ struct amdgpu_ttm_tt { - struct ttm_dma_tt ttm; + struct ttm_tt ttm; struct drm_gem_object *gobj; u64 offset; uint64_t userptr; struct task_struct *usertask; uint32_t userflags; + bool bound; #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) struct hmm_range *range; #endif @@ -949,7 +970,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) if (!gtt || !gtt->userptr) return false; - DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", + DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", gtt->userptr, ttm->num_pages); WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, @@ -991,9 +1012,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) * * Called by amdgpu_ttm_backend_bind() **/ -static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) +static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; @@ -1028,9 +1050,10 @@ release_sg: /** * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages */ -static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); @@ -1099,7 +1122,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, gart_bind_fail: if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); return r; @@ -1111,23 +1134,30 @@ gart_bind_fail: * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * This handles binding GTT memory to the device address space. */ -static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, +static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); + struct amdgpu_ttm_tt *gtt = (void*)ttm; uint64_t flags; int r = 0; + if (!bo_mem) + return -EINVAL; + + if (gtt->bound) + return 0; + if (gtt->userptr) { - r = amdgpu_ttm_tt_pin_userptr(ttm); + r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { DRM_ERROR("failed to pin userptr\n"); return r; } } if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } @@ -1150,8 +1180,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, ttm->pages, gtt->ttm.dma_address, flags); if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); + gtt->bound = true; return r; } @@ -1191,8 +1222,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | - TTM_PL_FLAG_TT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->mem.placement; r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); if (unlikely(r)) @@ -1243,15 +1274,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * ttm_tt_destroy(). */ -static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; + if (!gtt->bound) + return; + /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) - amdgpu_ttm_tt_unpin_userptr(ttm); + amdgpu_ttm_tt_unpin_userptr(bdev, ttm); if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) return; @@ -1259,27 +1294,25 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); if (r) - DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", - gtt->ttm.ttm.num_pages, gtt->offset); + DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", + gtt->ttm.num_pages, gtt->offset); + gtt->bound = false; } -static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) +static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + amdgpu_ttm_backend_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); if (gtt->usertask) put_task_struct(gtt->usertask); - ttm_dma_tt_fini(>t->ttm); + ttm_tt_fini(>t->ttm); kfree(gtt); } -static struct ttm_backend_func amdgpu_backend_func = { - .bind = &amdgpu_ttm_backend_bind, - .unbind = &amdgpu_ttm_backend_unbind, - .destroy = &amdgpu_ttm_backend_destroy, -}; - /** * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO * @@ -1290,21 +1323,27 @@ static struct ttm_backend_func amdgpu_backend_func = { static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; + enum ttm_caching caching; gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } - gtt->ttm.ttm.func = &amdgpu_backend_func; gtt->gobj = &bo->base; + if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + caching = ttm_write_combined; + else + caching = ttm_cached; + /* allocate space for the uninitialized page entries */ - if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { + if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { kfree(gtt); return NULL; } - return >t->ttm.ttm; + return >t->ttm; } /** @@ -1313,10 +1352,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, * Map the pages of a ttm_tt object to an address space visible * to the underlying device. */ -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx) +static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ @@ -1326,7 +1366,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, return -ENOMEM; ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm->state = tt_unbound; return 0; } @@ -1346,19 +1385,10 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); - ttm->state = tt_unbound; return 0; } -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev, ctx); - } -#endif - - /* fall back to generic helper to populate the page array - * and map them to the device */ - return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); + return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); } /** @@ -1367,7 +1397,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it. */ -static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) +static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_device *adev; @@ -1391,17 +1422,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) if (ttm->page_flags & TTM_PAGE_FLAG_SG) return; - adev = amdgpu_ttm_adev(ttm->bdev); - -#ifdef CONFIG_SWIOTLB - if (adev->need_swiotlb && swiotlb_nr_tbl()) { - ttm_dma_unpopulate(>t->ttm, adev->dev); - return; - } -#endif - - /* fall back to generic helper to unmap and unpopulate array */ - ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); + adev = amdgpu_ttm_adev(bdev); + return ttm_pool_free(&adev->mman.bdev.pool, ttm); } /** @@ -1472,7 +1494,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, /* Return false if no part of the ttm_tt object lies within * the range */ - size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; + size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) return false; @@ -1523,7 +1545,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) if (mem && mem->mem_type == TTM_PL_TT) { flags |= AMDGPU_PTE_SYSTEM; - if (ttm->caching_state == tt_cached) + if (ttm->caching == ttm_cached) flags |= AMDGPU_PTE_SNOOPED; } @@ -1693,17 +1715,23 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, return ret; } +static void +amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + amdgpu_bo_move_notify(bo, false, NULL); +} + static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, + .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, .verify_access = &amdgpu_verify_access, - .move_notify = &amdgpu_bo_move_notify, + .delete_mem_notify = &amdgpu_bo_delete_mem_notify, .release_notify = &amdgpu_bo_release_notify, - .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, .access_memory = &amdgpu_ttm_access_memory, @@ -1875,10 +1903,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&adev->mman.bdev, - &amdgpu_bo_driver, + r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->vma_offset_manager, + adev->need_swiotlb, dma_addressing_limited(adev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -1886,9 +1914,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } adev->mman.initialized = true; - /* We opt to avoid OOM on system pages allocations */ - adev->mman.bdev.no_retry = true; - /* Initialize VRAM pool with all of VRAM divided into pages */ r = amdgpu_vram_mgr_init(adev); if (r) { @@ -2083,15 +2108,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) adev->mman.buffer_funcs_enabled = enable; } +static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; + vm_fault_t ret; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + ret = amdgpu_bo_fault_reserve_notify(bo); + if (ret) + goto unlock; + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT, 1); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +unlock: + dma_resv_unlock(bo->base.resv); + return ret; +} + +static struct vm_operations_struct amdgpu_ttm_vm_ops = { + .fault = amdgpu_ttm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv = filp->private_data; struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev); + int r; - if (adev == NULL) - return -EINVAL; + r = ttm_bo_mmap(filp, vma, &adev->mman.bdev); + if (unlikely(r != 0)) + return r; - return ttm_bo_mmap(filp, vma, &adev->mman.bdev); + vma->vm_ops = &amdgpu_ttm_vm_ops; + return 0; } int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, @@ -2275,16 +2333,22 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) return 0; } +static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + return ttm_pool_debugfs(&adev->mman.bdev.pool, m); +} + static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, - {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, -#ifdef CONFIG_SWIOTLB - {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} -#endif + {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL}, }; /** @@ -2577,12 +2641,6 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) } count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); - -#ifdef CONFIG_SWIOTLB - if (!(adev->need_swiotlb && swiotlb_nr_tbl())) - --count; -#endif - return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); #else return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 684a9ee9da75..bdca2970173e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -32,10 +32,6 @@ #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) -#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0) -#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) -#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) - #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index fdbe7d4e8b8b..0768c8686983 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -609,7 +609,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) if (!amdgpu_bo_is_amdgpu_bo(bo)) return; - if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) + if (bo->pin_count) return; abo = ttm_to_amdgpu_bo(bo); @@ -1747,7 +1747,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, resv = vm->root.base.bo->tbo.base.resv; } else { struct drm_gem_object *obj = &bo->tbo.base; - struct ttm_dma_tt *ttm; resv = bo->tbo.base.resv; if (obj->import_attach && bo_va->is_xgmi) { @@ -1760,10 +1759,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } mem = &bo->tbo.mem; nodes = mem->mm_node; - if (mem->mem_type == TTM_PL_TT) { - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); - pages_addr = ttm->dma_address; - } + if (mem->mem_type == TTM_PL_TT) + pages_addr = bo->tbo.ttm->dma_address; } if (bo) { @@ -2782,7 +2779,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) * 0 for success, error for failure. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context, unsigned int pasid) + int vm_context, u32 pasid) { struct amdgpu_bo_param bp; struct amdgpu_bo *root; @@ -2953,7 +2950,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * 0 for success, -errno for errors. */ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned int pasid) + u32 pasid) { bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; @@ -3251,7 +3248,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) * @pasid: PASID identifier for VM * @task_info: task_info to fill. */ -void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, +void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info) { struct amdgpu_vm *vm; @@ -3295,7 +3292,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * Try to gracefully handle a VM fault. Return true if the fault was handled and * shouldn't be reported any more. */ -bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, uint64_t addr) { struct amdgpu_bo *root; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 496e1ed1577d..ca2344beed81 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -378,8 +378,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev); long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context, unsigned int pasid); -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid); + int vm_context, u32 pasid); +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, @@ -436,9 +436,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); -void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, +void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); -bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, uint64_t addr); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 3cb68fdec94d..c99c2180785f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -178,9 +178,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) struct ttm_resource_manager *man = &mgr->manager; int ret; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); man->func = &amdgpu_vram_mgr_func; @@ -217,7 +214,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); if (ret) return; diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 8e64c01565ac..fe14e473f026 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -91,7 +91,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev, (const struct cik_ih_ring_entry *)ih_ring_entry; uint32_t context_id = ihre->data & 0xfffffff; unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8; - unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16; + u32 pasid = (ihre->ring_id & 0xffff0000) >> 16; if (pasid == 0) return; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index 27bcc5b472f6..b258a3dae767 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -45,7 +45,7 @@ static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev) } static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, - unsigned int pasid, uint64_t vmid0_address, + u32 pasid, uint64_t vmid0_address, uint32_t *packet_buff, size_t size_in_bytes) { struct pm4__release_mem *rm_packet; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h index a04a1fe1d0d9..f9c6df1fdc5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h @@ -275,7 +275,7 @@ struct kfd_dbgdev { }; struct kfd_dbgmgr { - unsigned int pasid; + u32 pasid; struct kfd_dev *dev; struct kfd_dbgdev *dbgdev; }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index c579615451ba..f0a6f6665c81 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -40,7 +40,7 @@ #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, - unsigned int pasid, unsigned int vmid); + u32 pasid, unsigned int vmid); static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, @@ -931,7 +931,7 @@ out: } static int -set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, +set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( @@ -1966,8 +1966,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm) kfree(dqm); } -int kfd_process_vm_fault(struct device_queue_manager *dqm, - unsigned int pasid) +int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid) { struct kfd_process_device *pdd; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index a9583b95fcc1..ba2c2ce0c55a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -460,7 +460,7 @@ static void set_event_from_interrupt(struct kfd_process *p, } } -void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, +void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint32_t valid_id_bits) { struct kfd_event *ev = NULL; @@ -872,7 +872,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, } #ifdef KFD_SUPPORT_IOMMU_V2 -void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, +void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, unsigned long address, bool is_write_requested, bool is_execute_requested) { @@ -950,7 +950,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, } #endif /* KFD_SUPPORT_IOMMU_V2 */ -void kfd_signal_hw_exception_event(unsigned int pasid) +void kfd_signal_hw_exception_event(u32 pasid) { /* * Because we are called from arbitrary context (workqueue) as opposed @@ -971,7 +971,7 @@ void kfd_signal_hw_exception_event(unsigned int pasid) kfd_unref_process(p); } -void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid, +void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, struct kfd_vm_fault_info *info) { struct kfd_event *ev; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h index c7ac6c73af86..c8fe5dbdad55 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h @@ -79,7 +79,7 @@ struct kfd_event { #define KFD_EVENT_TYPE_DEBUG 5 #define KFD_EVENT_TYPE_MEMORY 8 -extern void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, - uint32_t valid_id_bits); +extern void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, + uint32_t valid_id_bits); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 5a64915abaf7..66bbca61e3ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -139,7 +139,7 @@ void kfd_iommu_unbind_process(struct kfd_process *p) } /* Callback for process shutdown invoked by the IOMMU driver */ -static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) +static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) { struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); struct kfd_process *p; @@ -185,8 +185,8 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) } /* This function called by IOMMU driver on PPR failure */ -static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, - unsigned long address, u16 flags) +static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, + unsigned long address, u16 flags) { struct kfd_dev *dev; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 2a07c4f2cd0d..af5816f51e55 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -51,7 +51,7 @@ unsigned int kfd_get_pasid_limit(void) return 1U << pasid_bits; } -unsigned int kfd_pasid_alloc(void) +u32 kfd_pasid_alloc(void) { int r = amdgpu_pasid_alloc(pasid_bits); @@ -63,7 +63,7 @@ unsigned int kfd_pasid_alloc(void) return 0; } -void kfd_pasid_free(unsigned int pasid) +void kfd_pasid_free(u32 pasid) { amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index e2ebd5a1d4de..09599efa41fc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -763,7 +763,7 @@ struct kfd_process { /* We want to receive a notification when the mm_struct is destroyed */ struct mmu_notifier mmu_notifier; - uint16_t pasid; + u32 pasid; /* * List of kfd_process_device structures, @@ -839,7 +839,7 @@ int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); struct kfd_process *kfd_create_process(struct file *filep); struct kfd_process *kfd_get_process(const struct task_struct *); -struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid); +struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); void kfd_unref_process(struct kfd_process *p); int kfd_process_evict_queues(struct kfd_process *p); @@ -880,8 +880,8 @@ int kfd_pasid_init(void); void kfd_pasid_exit(void); bool kfd_set_pasid_limit(unsigned int new_limit); unsigned int kfd_get_pasid_limit(void); -unsigned int kfd_pasid_alloc(void); -void kfd_pasid_free(unsigned int pasid); +u32 kfd_pasid_alloc(void); +void kfd_pasid_free(u32 pasid); /* Doorbells */ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); @@ -967,7 +967,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm); struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, enum kfd_queue_type type); void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); -int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid); +int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid); /* Process Queue Manager */ struct process_queue_node { @@ -1089,12 +1089,12 @@ int kfd_wait_on_events(struct kfd_process *p, uint32_t num_events, void __user *data, bool all, uint32_t user_timeout_ms, uint32_t *wait_result); -void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, +void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint32_t valid_id_bits); void kfd_signal_iommu_event(struct kfd_dev *dev, - unsigned int pasid, unsigned long address, - bool is_write_requested, bool is_execute_requested); -void kfd_signal_hw_exception_event(unsigned int pasid); + u32 pasid, unsigned long address, + bool is_write_requested, bool is_execute_requested); +void kfd_signal_hw_exception_event(u32 pasid); int kfd_set_event(struct kfd_process *p, uint32_t event_id); int kfd_reset_event(struct kfd_process *p, uint32_t event_id); int kfd_event_page_set(struct kfd_process *p, void *kernel_address, @@ -1105,7 +1105,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, uint64_t *event_page_offset, uint32_t *event_slot_index); int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); -void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid, +void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, struct kfd_vm_fault_info *info); void kfd_signal_reset_event(struct kfd_dev *dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 2807e1c4d59b..65803e153a22 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1472,7 +1472,7 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, } /* This increments the process->ref counter. */ -struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid) +struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid) { struct kfd_process *p, *ret_p = NULL; unsigned int temp; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b7442d673ea6..2855bb918535 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6003,19 +6003,21 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc, } static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct dc *dc = adev->dm.dc; - struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); int ret = -EINVAL; - trace_amdgpu_dm_crtc_atomic_check(state); + trace_amdgpu_dm_crtc_atomic_check(crtc_state); - dm_update_crtc_active_planes(crtc, state); + dm_update_crtc_active_planes(crtc, crtc_state); if (unlikely(!dm_crtc_state->stream && - modeset_required(state, NULL, dm_crtc_state->stream))) { + modeset_required(crtc_state, NULL, dm_crtc_state->stream))) { WARN_ON(1); return ret; } @@ -6026,8 +6028,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, * planes are disabled, which is not supported by the hardware. And there is legacy * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. */ - if (state->enable && - !(state->plane_mask & drm_plane_mask(crtc->primary))) + if (crtc_state->enable && + !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) return -EINVAL; /* In some use cases, like reset, no stream is attached */ @@ -8001,6 +8003,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_atomic_helper_calc_timestamping_constants(state); dm_state = dm_atomic_get_new_state(state); if (dm_state && dm_state->context) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 2a41c385e284..6f975c16779d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -649,7 +649,7 @@ static void try_disable_dsc(struct drm_atomic_state *state, for (i = 0; i < count; i++) { if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 - && !params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { + && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; tried[i] = false; remaining_to_try += 1; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index e37b4b9f626d..95c656d205ed 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -231,7 +231,7 @@ struct kfd2kgd_calls { uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); - int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid, + int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, u32 pasid, unsigned int vmid); int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id); diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index be7c29cec318..042d7b54a6de 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -116,7 +116,7 @@ static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); @@ -127,7 +127,7 @@ static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc, } static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index f33418d6e1a0..df0b9eeb8933 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -74,16 +74,18 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st) */ static int komeda_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct komeda_crtc *kcrtc = to_kcrtc(crtc); - struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state); + struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_state); int err; - if (drm_atomic_crtc_needs_modeset(state)) + if (drm_atomic_crtc_needs_modeset(crtc_state)) komeda_crtc_update_clock_ratio(kcrtc_st); - if (state->active) { + if (crtc_state->active) { err = komeda_build_display_data_flow(kcrtc, kcrtc_st); if (err) return err; @@ -273,8 +275,10 @@ komeda_crtc_do_flush(struct drm_crtc *crtc, static void komeda_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old) + struct drm_atomic_state *state) { + struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state, + crtc); pm_runtime_get_sync(crtc->dev->dev); komeda_crtc_prepare(to_kcrtc(crtc)); drm_crtc_vblank_on(crtc); @@ -319,8 +323,10 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, static void komeda_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old) + struct drm_atomic_state *state) { + struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state, + crtc); struct komeda_crtc *kcrtc = to_kcrtc(crtc); struct komeda_crtc_state *old_st = to_kcrtc_st(old); struct komeda_pipeline *master = kcrtc->master; @@ -379,8 +385,10 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc, static void komeda_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old) + struct drm_atomic_state *state) { + struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state, + crtc); /* commit with modeset will be handled in enable/disable */ if (drm_atomic_crtc_needs_modeset(crtc->state)) return; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index 1d767473ba8a..1f8195bad536 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -41,18 +41,7 @@ static int komeda_register_show(struct seq_file *sf, void *x) return 0; } -static int komeda_register_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, komeda_register_show, inode->i_private); -} - -static const struct file_operations komeda_register_fops = { - .owner = THIS_MODULE, - .open = komeda_register_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(komeda_register); #ifdef CONFIG_DEBUG_FS static void komeda_debugfs_init(struct komeda_dev *mdev) @@ -261,8 +250,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev) goto disable_clk; } - dev->dma_parms = &mdev->dma_parms; - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(dev, U32_MAX); mdev->iommu = iommu_get_domain_for_dev(mdev->dev); if (!mdev->iommu) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h index ce27f2f27c24..5b536f0cb548 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h @@ -163,8 +163,6 @@ struct komeda_dev { struct device *dev; /** @reg_base: the base address of komeda io space */ u32 __iomem *reg_base; - /** @dma_parms: the dma parameters of komeda */ - struct device_dma_parameters dma_parms; /** @chip: the basic chip information */ struct komeda_chip_info chip; diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index af67fefed38d..a3234bfb0917 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -168,7 +168,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); @@ -179,7 +179,7 @@ static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc, } static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); @@ -205,7 +205,7 @@ static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc, } static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event = crtc->state->event; diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 587d94798f5c..108e7a31bd26 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -46,7 +46,7 @@ static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc, } static void malidp_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; @@ -70,8 +70,10 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc, } static void malidp_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; int err; @@ -335,8 +337,10 @@ mclk_calc: } static int malidp_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; struct drm_plane *plane; @@ -371,7 +375,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, */ /* first count the number of rotated planes */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { struct drm_framebuffer *fb = pstate->fb; if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) @@ -387,7 +391,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, rot_mem_free += hwdev->rotation_memory[1]; /* now validate the rotation memory requirements */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { struct malidp_plane *mp = to_malidp_plane(plane); struct malidp_plane_state *ms = to_malidp_plane_state(pstate); struct drm_framebuffer *fb = pstate->fb; @@ -415,18 +419,18 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, } /* If only the writeback routing has changed, we don't need a modeset */ - if (state->connectors_changed) { + if (crtc_state->connectors_changed) { u32 old_mask = crtc->state->connector_mask; - u32 new_mask = state->connector_mask; + u32 new_mask = crtc_state->connector_mask; if ((old_mask ^ new_mask) == (1 << drm_connector_index(&malidp->mw_connector.base))) - state->connectors_changed = false; + crtc_state->connectors_changed = false; } - ret = malidp_crtc_atomic_check_gamma(crtc, state); - ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state); - ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state); + ret = malidp_crtc_atomic_check_gamma(crtc, crtc_state); + ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, crtc_state); + ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, crtc_state); return ret; } diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 38dfaa46d306..ca643f4e2064 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -413,21 +413,23 @@ static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) } static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); - if (state->gamma_lut && drm_color_lut_size(state->gamma_lut) != 256) + if (crtc_state->gamma_lut && drm_color_lut_size(crtc_state->gamma_lut) != 256) return -EINVAL; - if (state->color_mgmt_changed) - state->planes_changed = true; + if (crtc_state->color_mgmt_changed) + crtc_state->planes_changed = true; return 0; } static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); @@ -441,7 +443,7 @@ static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, } static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); @@ -467,8 +469,10 @@ static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc, } static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); struct drm_pending_vblank_event *event; @@ -503,8 +507,10 @@ static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc, } static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); @@ -757,7 +763,7 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) static void armada_drm_crtc_destroy(struct drm_crtc *crtc) { struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); - struct armada_private *priv = crtc->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(crtc->dev); if (dcrtc->cursor_obj) drm_gem_object_put(&dcrtc->cursor_obj->obj); @@ -901,7 +907,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, struct resource *res, int irq, const struct armada_variant *variant, struct device_node *port) { - struct armada_private *priv = drm->dev_private; + struct armada_private *priv = drm_to_armada_dev(drm); struct armada_crtc *dcrtc; struct drm_plane *primary; void __iomem *base; diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c index c6fc2f1d58e9..29f4b52e3c8d 100644 --- a/drivers/gpu/drm/armada/armada_debugfs.c +++ b/drivers/gpu/drm/armada/armada_debugfs.c @@ -19,7 +19,7 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_printer p = drm_seq_file_printer(m); mutex_lock(&priv->linear_lock); diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index a11bdaccbb33..6a5a87932576 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h @@ -73,6 +73,8 @@ struct armada_private { #endif }; +#define drm_to_armada_dev(dev) container_of(dev, struct armada_private, drm) + int armada_fbdev_init(struct drm_device *); void armada_fbdev_fini(struct drm_device *); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 5fc25c3f445c..22247cfce80b 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -37,13 +37,10 @@ DEFINE_DRM_GEM_FOPS(armada_drm_fops); static struct drm_driver armada_drm_driver = { .lastclose = drm_fb_helper_lastclose, - .gem_free_object_unlocked = armada_gem_free_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = armada_gem_prime_export, .gem_prime_import = armada_gem_prime_import, .dumb_create = armada_gem_dumb_create, - .gem_vm_ops = &armada_gem_vm_ops, .major = 1, .minor = 0, .name = "armada-drm", @@ -87,24 +84,13 @@ static int armada_drm_bind(struct device *dev) "armada-drm")) return -EBUSY; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - /* - * The drm_device structure must be at the start of - * armada_private for drm_dev_put() to work correctly. - */ - BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0); - - ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev); - if (ret) { - dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n", - __func__, ret); - kfree(priv); - return ret; + priv = devm_drm_dev_alloc(dev, &armada_drm_driver, + struct armada_private, drm); + if (IS_ERR(priv)) { + dev_err(dev, "[" DRM_NAME ":%s] devm_drm_dev_alloc failed: %li\n", + __func__, PTR_ERR(priv)); + return PTR_ERR(priv); } - drmm_add_final_kfree(&priv->drm, priv); /* Remove early framebuffers */ ret = drm_fb_helper_remove_conflicting_framebuffers(NULL, @@ -117,8 +103,6 @@ static int armada_drm_bind(struct device *dev) return ret; } - priv->drm.dev_private = priv; - dev_set_drvdata(dev, &priv->drm); /* Mode setting support */ @@ -174,14 +158,13 @@ static int armada_drm_bind(struct device *dev) err_kms: drm_mode_config_cleanup(&priv->drm); drm_mm_takedown(&priv->linear); - drm_dev_put(&priv->drm); return ret; } static void armada_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct armada_private *priv = drm->dev_private; + struct armada_private *priv = drm_to_armada_dev(drm); drm_kms_helper_poll_fini(&priv->drm); armada_fbdev_fini(&priv->drm); @@ -194,8 +177,6 @@ static void armada_drm_unbind(struct device *dev) drm_mode_config_cleanup(&priv->drm); drm_mm_takedown(&priv->linear); - - drm_dev_put(&priv->drm); } static int compare_of(struct device *dev, void *data) diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 0c4601275507..38f5170c0fea 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -117,7 +117,7 @@ static const struct drm_fb_helper_funcs armada_fb_helper_funcs = { int armada_fbdev_init(struct drm_device *dev) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_fb_helper *fbh; int ret; @@ -151,7 +151,7 @@ int armada_fbdev_init(struct drm_device *dev) void armada_fbdev_fini(struct drm_device *dev) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_fb_helper *fbh = priv->fbdev; if (fbh) { diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 8005614d2e6b..21909642ee4c 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -25,7 +25,7 @@ static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) return vmf_insert_pfn(vmf->vma, vmf->address, pfn); } -const struct vm_operations_struct armada_gem_vm_ops = { +static const struct vm_operations_struct armada_gem_vm_ops = { .fault = armada_gem_vm_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, @@ -39,7 +39,7 @@ static size_t roundup_gem_size(size_t size) void armada_gem_free_object(struct drm_gem_object *obj) { struct armada_gem_object *dobj = drm_to_armada_gem(obj); - struct armada_private *priv = obj->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(obj->dev); DRM_DEBUG_DRIVER("release obj %p\n", dobj); @@ -77,7 +77,7 @@ void armada_gem_free_object(struct drm_gem_object *obj) int armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); size_t size = obj->obj.size; if (obj->page || obj->linear) @@ -184,6 +184,12 @@ armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj) return dobj->addr; } +static const struct drm_gem_object_funcs armada_gem_object_funcs = { + .free = armada_gem_free_object, + .export = armada_gem_prime_export, + .vm_ops = &armada_gem_vm_ops, +}; + struct armada_gem_object * armada_gem_alloc_private_object(struct drm_device *dev, size_t size) { @@ -195,6 +201,8 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size) if (!obj) return NULL; + obj->obj.funcs = &armada_gem_object_funcs; + drm_gem_private_object_init(dev, &obj->obj, size); DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size); @@ -214,6 +222,8 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, if (!obj) return NULL; + obj->obj.funcs = &armada_gem_object_funcs; + if (drm_gem_object_init(dev, &obj->obj, size)) { kfree(obj); return NULL; @@ -379,7 +389,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, struct armada_gem_object *dobj = drm_to_armada_gem(obj); struct scatterlist *sg; struct sg_table *sgt; - int i, num; + int i; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) @@ -395,22 +405,18 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, mapping = dobj->obj.filp->f_mapping; - for_each_sg(sgt->sgl, sg, count, i) { + for_each_sgtable_sg(sgt, sg, i) { struct page *page; page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) { - num = i; + if (IS_ERR(page)) goto release; - } sg_set_page(sg, page, PAGE_SIZE, 0); } - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) { - num = sgt->nents; + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto release; - } } else if (dobj->page) { /* Single contiguous page */ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) @@ -418,7 +424,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0); - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto free_table; } else if (dobj->linear) { /* Single contiguous physical region - no struct page */ @@ -432,8 +438,9 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, return sgt; release: - for_each_sg(sgt->sgl, sg, num, i) - put_page(sg_page(sg)); + for_each_sgtable_sg(sgt, sg, i) + if (sg_page(sg)) + put_page(sg_page(sg)); free_table: sg_free_table(sgt); free_sgt: @@ -449,11 +456,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, int i; if (!dobj->linear) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + dma_unmap_sgtable(attach->dev, sgt, dir, 0); if (dobj->obj.filp) { struct scatterlist *sg; - for_each_sg(sgt->sgl, sg, sgt->nents, i) + + for_each_sgtable_sg(sgt, sg, i) put_page(sg_page(sg)); } diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h index de04cc2c8f0e..ffcc7e8dd351 100644 --- a/drivers/gpu/drm/armada/armada_gem.h +++ b/drivers/gpu/drm/armada/armada_gem.h @@ -21,8 +21,6 @@ struct armada_gem_object { void *update_data; }; -extern const struct vm_operations_struct armada_gem_vm_ops; - #define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj) void armada_gem_free_object(struct drm_gem_object *); diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 07f0da4d9ba1..30e01101f59e 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -344,7 +344,7 @@ static int armada_overlay_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, uint64_t val) { - struct armada_private *priv = plane->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(plane->dev); #define K2R(val) (((val) >> 0) & 0xff) #define K2G(val) (((val) >> 8) & 0xff) @@ -412,7 +412,7 @@ static int armada_overlay_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, uint64_t *val) { - struct armada_private *priv = plane->dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(plane->dev); #define C2K(c,s) (((c) >> (s)) & 0xff) #define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16) @@ -505,7 +505,7 @@ static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = { static int armada_overlay_create_properties(struct drm_device *dev) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); if (priv->colorkey_prop) return 0; @@ -539,7 +539,7 @@ static int armada_overlay_create_properties(struct drm_device *dev) int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm_to_armada_dev(dev); struct drm_mode_object *mobj; struct drm_plane *overlay; int ret; diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig index 018383cfcfa7..5e95bcea43e9 100644 --- a/drivers/gpu/drm/aspeed/Kconfig +++ b/drivers/gpu/drm/aspeed/Kconfig @@ -3,6 +3,7 @@ config DRM_ASPEED_GFX tristate "ASPEED BMC Display Controller" depends on DRM && OF depends on (COMPILE_TEST || ARCH_ASPEED) + depends on MMU select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DMA_CMA if HAVE_DMA_CONTIGUOUS diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h index e7ca95827ae8..f1e7e56abc02 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx.h +++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h @@ -75,7 +75,7 @@ int aspeed_gfx_create_output(struct drm_device *drm); /* CTRL2 */ #define CRT_CTRL_DAC_EN BIT(0) #define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK) -#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31) +#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(31, 20) /* CRT_HORIZ0 */ #define CRT_H_TOTAL(x) (x) diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 903f4f304647..771ad71cd340 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -63,15 +63,21 @@ static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static void aspeed_gfx_setup_mode_config(struct drm_device *drm) +static int aspeed_gfx_setup_mode_config(struct drm_device *drm) { - drm_mode_config_init(drm); + int ret; + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; drm->mode_config.min_width = 0; drm->mode_config.min_height = 0; drm->mode_config.max_width = 800; drm->mode_config.max_height = 600; drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs; + + return ret; } static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) @@ -144,7 +150,9 @@ static int aspeed_gfx_load(struct drm_device *drm) writel(0, priv->base + CRT_CTRL1); writel(0, priv->base + CRT_CTRL2); - aspeed_gfx_setup_mode_config(drm); + ret = aspeed_gfx_setup_mode_config(drm); + if (ret < 0) + return ret; ret = drm_vblank_init(drm, 1); if (ret < 0) { @@ -179,19 +187,13 @@ static int aspeed_gfx_load(struct drm_device *drm) static void aspeed_gfx_unload(struct drm_device *drm) { drm_kms_helper_poll_fini(drm); - drm_mode_config_cleanup(drm); } DEFINE_DRM_GEM_CMA_FOPS(fops); static struct drm_driver aspeed_gfx_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .gem_create_object = drm_gem_cma_create_object_default_funcs, - .dumb_create = drm_gem_cma_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, - .gem_prime_mmap = drm_gem_prime_mmap, + DRM_GEM_CMA_DRIVER_OPS, .fops = &fops, .name = "aspeed-gfx-drm", .desc = "ASPEED GFX DRM", @@ -205,6 +207,69 @@ static const struct of_device_id aspeed_gfx_match[] = { { } }; +#define ASPEED_SCU_VGA0 0x50 +#define ASPEED_SCU_MISC_CTRL 0x2c + +static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct aspeed_gfx *priv = dev_get_drvdata(dev); + u32 val; + int rc; + + rc = kstrtou32(buf, 0, &val); + if (rc) + return rc; + + if (val > 3) + return -EINVAL; + + rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16); + if (rc < 0) + return 0; + + return count; +} + +static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct aspeed_gfx *priv = dev_get_drvdata(dev); + u32 reg; + int rc; + + rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, ®); + if (rc) + return rc; + + return sprintf(buf, "%u\n", (reg >> 16) & 0x3); +} +static DEVICE_ATTR_RW(dac_mux); + +static ssize_t +vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct aspeed_gfx *priv = dev_get_drvdata(dev); + u32 reg; + int rc; + + rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, ®); + if (rc) + return rc; + + return sprintf(buf, "%u\n", reg & 1); +} +static DEVICE_ATTR_RO(vga_pw); + +static struct attribute *aspeed_sysfs_entries[] = { + &dev_attr_vga_pw.attr, + &dev_attr_dac_mux.attr, + NULL, +}; + +static struct attribute_group aspeed_sysfs_attr_group = { + .attrs = aspeed_sysfs_entries, +}; + static int aspeed_gfx_probe(struct platform_device *pdev) { struct aspeed_gfx *priv; @@ -219,6 +284,12 @@ static int aspeed_gfx_probe(struct platform_device *pdev) if (ret) return ret; + dev_set_drvdata(&pdev->dev, priv); + + ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); + if (ret) + return ret; + ret = drm_dev_register(&priv->drm, 0); if (ret) goto err_unload; @@ -227,6 +298,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev) return 0; err_unload: + sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); aspeed_gfx_unload(&priv->drm); return ret; @@ -236,6 +308,7 @@ static int aspeed_gfx_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); + sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group); drm_dev_unregister(drm); aspeed_gfx_unload(drm); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index c1af6b725933..467049ca8430 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -177,6 +177,8 @@ struct ast_private *ast_device_create(struct drm_driver *drv, #define AST_IO_MM_OFFSET (0x380) +#define AST_IO_VGAIR1_VREFRESH BIT(3) + #define __ast_read(x) \ static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ u##x val = 0;\ diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 62fe682a7de6..22f0e65fbe9a 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -514,6 +514,16 @@ static void ast_set_start_address_crt1(struct ast_private *ast, } +static void ast_wait_for_vretrace(struct ast_private *ast) +{ + unsigned long timeout = jiffies + HZ; + u8 vgair1; + + do { + vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout)); +} + /* * Primary plane */ @@ -562,13 +572,24 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_plane_state *state = plane->state; struct drm_gem_vram_object *gbo; s64 gpu_addr; + struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *old_fb = old_state->fb; - gbo = drm_gem_vram_of_gem(state->fb->obj[0]); + if (!old_fb || (fb->format != old_fb->format)) { + struct drm_crtc_state *crtc_state = state->crtc->state; + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info; + + ast_set_color_reg(ast, fb->format); + ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info); + } + + gbo = drm_gem_vram_of_gem(fb->obj[0]); gpu_addr = drm_gem_vram_offset(gbo); if (drm_WARN_ON_ONCE(dev, gpu_addr < 0)) return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ - ast_set_offset_reg(ast, state->fb); + ast_set_offset_reg(ast, fb); ast_set_start_address_crt1(ast, (u32)gpu_addr); ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00); @@ -721,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_SUSPEND: if (ast->tx_chip_type == AST_TX_DP501) ast_set_dp501_video_output(crtc->dev, 1); - ast_crtc_load_lut(ast, crtc); break; case DRM_MODE_DPMS_OFF: if (ast->tx_chip_type == AST_TX_DP501) @@ -731,23 +751,26 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) } static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct drm_device *dev = crtc->dev; struct ast_crtc_state *ast_state; const struct drm_format_info *format; bool succ; - if (!state->enable) + if (!crtc_state->enable) return 0; /* no mode checks if CRTC is being disabled */ - ast_state = to_ast_crtc_state(state); + ast_state = to_ast_crtc_state(crtc_state); format = ast_state->format; - if (!format) - return 0; + if (drm_WARN_ON_ONCE(dev, !format)) + return -EINVAL; /* BUG: We didn't set format in primary check(). */ - succ = ast_get_vbios_mode_info(format, &state->mode, - &state->adjusted_mode, + succ = ast_get_vbios_mode_info(format, &crtc_state->mode, + &crtc_state->adjusted_mode, &ast_state->vbios_mode_info); if (!succ) return -EINVAL; @@ -755,39 +778,35 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, return 0; } -static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) +static void +ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct ast_private *ast = to_ast_private(crtc->dev); + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state); + struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state); - ast_open_key(ast); + /* + * The gamma LUT has to be reloaded after changing the primary + * plane's color format. + */ + if (old_ast_crtc_state->format != ast_crtc_state->format) + ast_crtc_load_lut(ast, crtc); } -static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) +static void +ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct ast_private *ast = to_ast_private(dev); - struct ast_crtc_state *ast_state; - const struct drm_format_info *format; - struct ast_vbios_mode_info *vbios_mode_info; - struct drm_display_mode *adjusted_mode; - - ast_state = to_ast_crtc_state(crtc->state); - - format = ast_state->format; - if (!format) - return; - - vbios_mode_info = &ast_state->vbios_mode_info; - - ast_set_color_reg(ast, format); - ast_set_vbios_color_reg(ast, format, vbios_mode_info); - - if (!crtc->state->mode_changed) - return; - - adjusted_mode = &crtc->state->adjusted_mode; + struct drm_crtc_state *crtc_state = crtc->state; + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); @@ -796,25 +815,42 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info); ast_set_crtthd_reg(ast); ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info); -} -static void -ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON); } static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + /* + * HW cursors require the underlying primary plane and CRTC to + * display a valid mode and image. This is not the case during + * full modeset operations. So we temporarily disable any active + * plane, including the HW cursor. Each plane's atomic_update() + * helper will re-enable it if necessary. + * + * We only do this during *full* modesets. It does not affect + * simple pageflips on the planes. + */ + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + + /* + * Ensure that no scanout takes place before reprogramming mode + * and format registers. + */ + ast_wait_for_vretrace(ast); } static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { .atomic_check = ast_crtc_helper_atomic_check, - .atomic_begin = ast_crtc_helper_atomic_begin, .atomic_flush = ast_crtc_helper_atomic_flush, .atomic_enable = ast_crtc_helper_atomic_enable, .atomic_disable = ast_crtc_helper_atomic_disable, @@ -1054,6 +1090,11 @@ static int ast_connector_init(struct drm_device *dev) * Mode config */ +static const struct drm_mode_config_helper_funcs +ast_mode_config_helper_funcs = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + static const struct drm_mode_config_funcs ast_mode_config_funcs = { .fb_create = drm_gem_fb_create, .mode_valid = drm_vram_helper_mode_valid, @@ -1093,6 +1134,8 @@ int ast_mode_config_init(struct ast_private *ast) dev->mode_config.max_height = 1200; } + dev->mode_config.helper_private = &ast_mode_config_helper_funcs; + memset(&ast->primary_plane, 0, sizeof(ast->primary_plane)); ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01, &ast_primary_plane_funcs, diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index d665dd5af5dd..3d013946714e 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -282,6 +282,8 @@ static const struct ast_vbios_enhtable res_1360x768[] = { }; static const struct ast_vbios_enhtable res_1600x900[] = { + {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A }, {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | AST2500PreCatchCRT), 60, 1, 0x3A }, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index ce246b96330b..c17571a3cc2b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -165,7 +165,7 @@ atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c, } static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = c->dev; struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); @@ -200,7 +200,7 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, } static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = c->dev; struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); @@ -325,8 +325,9 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state) } static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, - struct drm_crtc_state *s) + struct drm_atomic_state *state) { + struct drm_crtc_state *s = drm_atomic_get_new_crtc_state(state, c); int ret; ret = atmel_hlcdc_crtc_select_output_mode(s); @@ -341,7 +342,7 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, } static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, - struct drm_crtc_state *old_s) + struct drm_atomic_state *state) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); @@ -356,7 +357,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, } static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_s) + struct drm_atomic_state *state) { /* TODO: write common plane control register if available */ } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3e11af4e9f63..ef91646441b1 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -241,6 +241,8 @@ source "drivers/gpu/drm/bridge/analogix/Kconfig" source "drivers/gpu/drm/bridge/adv7511/Kconfig" +source "drivers/gpu/drm/bridge/cadence/Kconfig" + source "drivers/gpu/drm/bridge/synopsys/Kconfig" endmenu diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index c589a6a7cbe1..2b3aff104e46 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -25,4 +25,5 @@ obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o obj-y += analogix/ +obj-y += cadence/ obj-y += synopsys/ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index f101dd2819b5..45838bd08d37 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -55,9 +55,9 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511) return 0; } -int adv7511_hdmi_hw_params(struct device *dev, void *data, - struct hdmi_codec_daifmt *fmt, - struct hdmi_codec_params *hparms) +static int adv7511_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) { struct adv7511 *adv7511 = dev_get_drvdata(dev); unsigned int audio_source, i2s_format = 0; diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig index e1fa7d820373..024ea2a570e7 100644 --- a/drivers/gpu/drm/bridge/analogix/Kconfig +++ b/drivers/gpu/drm/bridge/analogix/Kconfig @@ -25,3 +25,12 @@ config DRM_ANALOGIX_ANX78XX config DRM_ANALOGIX_DP tristate depends on DRM + +config DRM_ANALOGIX_ANX7625 + tristate "Analogix Anx7625 MIPI to DP interface support" + depends on DRM + depends on OF + help + ANX7625 is an ultra-low power 4K mobile HD transmitter + designed for portable devices. It converts MIPI/DPI to + DisplayPort1.3 4K. diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile index 97669b374098..44da392bb9f9 100644 --- a/drivers/gpu/drm/bridge/analogix/Makefile +++ b/drivers/gpu/drm/bridge/analogix/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o +obj-$(CONFIG_DRM_ANALOGIX_ANX7625) += anx7625.o obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 914c569ab8c1..fafb4b492ea0 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -524,7 +524,7 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); } -int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp) +static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp) { int reg; int retval = 0; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c new file mode 100644 index 000000000000..65cc05982f82 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -0,0 +1,1850 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2020, Analogix Semiconductor. All rights reserved. + * + */ +#include <linux/gcd.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <linux/of_gpio.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include <video/display_timing.h> + +#include "anx7625.h" + +/* + * There is a sync issue while access I2C register between AP(CPU) and + * internal firmware(OCM), to avoid the race condition, AP should access + * the reserved slave address before slave address occurs changes. + */ +static int i2c_access_workaround(struct anx7625_data *ctx, + struct i2c_client *client) +{ + u8 offset; + struct device *dev = &client->dev; + int ret; + + if (client == ctx->last_client) + return 0; + + ctx->last_client = client; + + if (client == ctx->i2c.tcpc_client) + offset = RSVD_00_ADDR; + else if (client == ctx->i2c.tx_p0_client) + offset = RSVD_D1_ADDR; + else if (client == ctx->i2c.tx_p1_client) + offset = RSVD_60_ADDR; + else if (client == ctx->i2c.rx_p0_client) + offset = RSVD_39_ADDR; + else if (client == ctx->i2c.rx_p1_client) + offset = RSVD_7F_ADDR; + else + offset = RSVD_00_ADDR; + + ret = i2c_smbus_write_byte_data(client, offset, 0x00); + if (ret < 0) + DRM_DEV_ERROR(dev, + "fail to access i2c id=%x\n:%x", + client->addr, offset); + + return ret; +} + +static int anx7625_reg_read(struct anx7625_data *ctx, + struct i2c_client *client, u8 reg_addr) +{ + int ret; + struct device *dev = &client->dev; + + i2c_access_workaround(ctx, client); + + ret = i2c_smbus_read_byte_data(client, reg_addr); + if (ret < 0) + DRM_DEV_ERROR(dev, "read i2c fail id=%x:%x\n", + client->addr, reg_addr); + + return ret; +} + +static int anx7625_reg_block_read(struct anx7625_data *ctx, + struct i2c_client *client, + u8 reg_addr, u8 len, u8 *buf) +{ + int ret; + struct device *dev = &client->dev; + + i2c_access_workaround(ctx, client); + + ret = i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf); + if (ret < 0) + DRM_DEV_ERROR(dev, "read i2c block fail id=%x:%x\n", + client->addr, reg_addr); + + return ret; +} + +static int anx7625_reg_write(struct anx7625_data *ctx, + struct i2c_client *client, + u8 reg_addr, u8 reg_val) +{ + int ret; + struct device *dev = &client->dev; + + i2c_access_workaround(ctx, client); + + ret = i2c_smbus_write_byte_data(client, reg_addr, reg_val); + + if (ret < 0) + DRM_DEV_ERROR(dev, "fail to write i2c id=%x\n:%x", + client->addr, reg_addr); + + return ret; +} + +static int anx7625_write_or(struct anx7625_data *ctx, + struct i2c_client *client, + u8 offset, u8 mask) +{ + int val; + + val = anx7625_reg_read(ctx, client, offset); + if (val < 0) + return val; + + return anx7625_reg_write(ctx, client, offset, (val | (mask))); +} + +static int anx7625_write_and(struct anx7625_data *ctx, + struct i2c_client *client, + u8 offset, u8 mask) +{ + int val; + + val = anx7625_reg_read(ctx, client, offset); + if (val < 0) + return val; + + return anx7625_reg_write(ctx, client, offset, (val & (mask))); +} + +static int anx7625_write_and_or(struct anx7625_data *ctx, + struct i2c_client *client, + u8 offset, u8 and_mask, u8 or_mask) +{ + int val; + + val = anx7625_reg_read(ctx, client, offset); + if (val < 0) + return val; + + return anx7625_reg_write(ctx, client, + offset, (val & and_mask) | (or_mask)); +} + +static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx) +{ + return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS); +} + +static int wait_aux_op_finish(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + int val; + int ret; + + ret = readx_poll_timeout(anx7625_read_ctrl_status_p0, + ctx, val, + (!(val & AP_AUX_CTRL_OP_EN) || (val < 0)), + 2000, + 2000 * 150); + if (ret) { + DRM_DEV_ERROR(dev, "aux operation fail!\n"); + return -EIO; + } + + val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, + AP_AUX_CTRL_STATUS); + if (val < 0 || (val & 0x0F)) { + DRM_DEV_ERROR(dev, "aux status %02x\n", val); + val = -EIO; + } + + return val; +} + +static int anx7625_video_mute_control(struct anx7625_data *ctx, + u8 status) +{ + int ret; + + if (status) { + /* Set mute on flag */ + ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_MIPI_MUTE); + /* Clear mipi RX en */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, (u8)~AP_MIPI_RX_EN); + } else { + /* Mute off flag */ + ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, (u8)~AP_MIPI_MUTE); + /* Set MIPI RX EN */ + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_MIPI_RX_EN); + } + + return ret; +} + +static int anx7625_config_audio_input(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + int ret; + + /* Channel num */ + ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, I2S_CH_2 << 5); + + /* FS */ + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_4, + 0xf0, AUDIO_FS_48K); + /* Word length */ + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_5, + 0xf0, AUDIO_W_LEN_24_24MAX); + /* I2S */ + ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, I2S_SLAVE_MODE); + ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, + AUDIO_CONTROL_REGISTER, ~TDM_TIMING_MODE); + /* Audio change flag */ + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_AUDIO_CHG); + + if (ret < 0) + DRM_DEV_ERROR(dev, "fail to config audio.\n"); + + return ret; +} + +/* Reduction of fraction a/b */ +static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b) +{ + unsigned long gcd_num; + unsigned long tmp_a, tmp_b; + u32 i = 1; + + gcd_num = gcd(*a, *b); + *a /= gcd_num; + *b /= gcd_num; + + tmp_a = *a; + tmp_b = *b; + + while ((*a > MAX_UNSIGNED_24BIT) || (*b > MAX_UNSIGNED_24BIT)) { + i++; + *a = tmp_a / i; + *b = tmp_b / i; + } + + /* + * In the end, make a, b larger to have higher ODFC PLL + * output frequency accuracy + */ + while ((*a < MAX_UNSIGNED_24BIT) && (*b < MAX_UNSIGNED_24BIT)) { + *a <<= 1; + *b <<= 1; + } + + *a >>= 1; + *b >>= 1; +} + +static int anx7625_calculate_m_n(u32 pixelclock, + unsigned long *m, + unsigned long *n, + u8 *post_divider) +{ + if (pixelclock > PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN) { + /* Pixel clock frequency is too high */ + DRM_ERROR("pixelclock too high, act(%d), maximum(%lu)\n", + pixelclock, + PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN); + return -EINVAL; + } + + if (pixelclock < PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX) { + /* Pixel clock frequency is too low */ + DRM_ERROR("pixelclock too low, act(%d), maximum(%lu)\n", + pixelclock, + PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX); + return -EINVAL; + } + + for (*post_divider = 1; + pixelclock < (PLL_OUT_FREQ_MIN / (*post_divider));) + *post_divider += 1; + + if (*post_divider > POST_DIVIDER_MAX) { + for (*post_divider = 1; + (pixelclock < + (PLL_OUT_FREQ_ABS_MIN / (*post_divider)));) + *post_divider += 1; + + if (*post_divider > POST_DIVIDER_MAX) { + DRM_ERROR("cannot find property post_divider(%d)\n", + *post_divider); + return -EDOM; + } + } + + /* Patch to improve the accuracy */ + if (*post_divider == 7) { + /* 27,000,000 is not divisible by 7 */ + *post_divider = 8; + } else if (*post_divider == 11) { + /* 27,000,000 is not divisible by 11 */ + *post_divider = 12; + } else if ((*post_divider == 13) || (*post_divider == 14)) { + /* 27,000,000 is not divisible by 13 or 14 */ + *post_divider = 15; + } + + if (pixelclock * (*post_divider) > PLL_OUT_FREQ_ABS_MAX) { + DRM_ERROR("act clock(%u) large than maximum(%lu)\n", + pixelclock * (*post_divider), + PLL_OUT_FREQ_ABS_MAX); + return -EDOM; + } + + *m = pixelclock; + *n = XTAL_FRQ / (*post_divider); + + anx7625_reduction_of_a_fraction(m, n); + + return 0; +} + +static int anx7625_odfc_config(struct anx7625_data *ctx, + u8 post_divider) +{ + int ret; + struct device *dev = &ctx->client->dev; + + /* Config input reference clock frequency 27MHz/19.2MHz */ + ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16, + ~(REF_CLK_27000KHZ << MIPI_FREF_D_IND)); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16, + (REF_CLK_27000KHZ << MIPI_FREF_D_IND)); + /* Post divider */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, + MIPI_DIGITAL_PLL_8, 0x0f); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_8, + post_divider << 4); + + /* Add patch for MIS2-125 (5pcs ANX7625 fail ATE MBIST test) */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7, + ~MIPI_PLL_VCO_TUNE_REG_VAL); + + /* Reset ODFC PLL */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7, + ~MIPI_PLL_RESET_N); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7, + MIPI_PLL_RESET_N); + + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error.\n"); + + return ret; +} + +static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + unsigned long m, n; + u16 htotal; + int ret; + u8 post_divider = 0; + + ret = anx7625_calculate_m_n(ctx->dt.pixelclock.min * 1000, + &m, &n, &post_divider); + + if (ret) { + DRM_DEV_ERROR(dev, "cannot get property m n value.\n"); + return ret; + } + + DRM_DEV_DEBUG_DRIVER(dev, "compute M(%lu), N(%lu), divider(%d).\n", + m, n, post_divider); + + /* Configure pixel clock */ + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_L, + (ctx->dt.pixelclock.min / 1000) & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_H, + (ctx->dt.pixelclock.min / 1000) >> 8); + /* Lane count */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, + MIPI_LANE_CTRL_0, 0xfc); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, + MIPI_LANE_CTRL_0, 3); + + /* Htotal */ + htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min + + ctx->dt.hback_porch.min + ctx->dt.hsync_len.min; + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_TOTAL_PIXELS_L, htotal & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_TOTAL_PIXELS_H, htotal >> 8); + /* Hactive */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_ACTIVE_PIXELS_L, ctx->dt.hactive.min & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_ACTIVE_PIXELS_H, ctx->dt.hactive.min >> 8); + /* HFP */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_FRONT_PORCH_L, ctx->dt.hfront_porch.min); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_FRONT_PORCH_H, + ctx->dt.hfront_porch.min >> 8); + /* HWS */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_SYNC_WIDTH_L, ctx->dt.hsync_len.min); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_SYNC_WIDTH_H, ctx->dt.hsync_len.min >> 8); + /* HBP */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_BACK_PORCH_L, ctx->dt.hback_porch.min); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + HORIZONTAL_BACK_PORCH_H, ctx->dt.hback_porch.min >> 8); + /* Vactive */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_L, + ctx->dt.vactive.min); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_H, + ctx->dt.vactive.min >> 8); + /* VFP */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + VERTICAL_FRONT_PORCH, ctx->dt.vfront_porch.min); + /* VWS */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + VERTICAL_SYNC_WIDTH, ctx->dt.vsync_len.min); + /* VBP */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, + VERTICAL_BACK_PORCH, ctx->dt.vback_porch.min); + /* M value */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_PLL_M_NUM_23_16, (m >> 16) & 0xff); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_PLL_M_NUM_15_8, (m >> 8) & 0xff); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_PLL_M_NUM_7_0, (m & 0xff)); + /* N value */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_PLL_N_NUM_23_16, (n >> 16) & 0xff); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_PLL_N_NUM_15_8, (n >> 8) & 0xff); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_7_0, + (n & 0xff)); + /* Diff */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_DIGITAL_ADJ_1, 0x3D); + + ret |= anx7625_odfc_config(ctx, post_divider - 1); + + if (ret < 0) + DRM_DEV_ERROR(dev, "mipi dsi setup IO error.\n"); + + return ret; +} + +static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx) +{ + int val; + struct device *dev = &ctx->client->dev; + + /* Swap MIPI-DSI data lane 3 P and N */ + val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP); + if (val < 0) { + DRM_DEV_ERROR(dev, "IO error : access MIPI_SWAP.\n"); + return -EIO; + } + + val |= (1 << MIPI_SWAP_CH3); + return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP, val); +} + +static int anx7625_api_dsi_config(struct anx7625_data *ctx) + +{ + int val, ret; + struct device *dev = &ctx->client->dev; + + /* Swap MIPI-DSI data lane 3 P and N */ + ret = anx7625_swap_dsi_lane3(ctx); + if (ret < 0) { + DRM_DEV_ERROR(dev, "IO error : swap dsi lane 3 fail.\n"); + return ret; + } + + /* DSI clock settings */ + val = (0 << MIPI_HS_PWD_CLK) | + (0 << MIPI_HS_RT_CLK) | + (0 << MIPI_PD_CLK) | + (1 << MIPI_CLK_RT_MANUAL_PD_EN) | + (1 << MIPI_CLK_HS_MANUAL_PD_EN) | + (0 << MIPI_CLK_DET_DET_BYPASS) | + (0 << MIPI_CLK_MISS_CTRL) | + (0 << MIPI_PD_LPTX_CH_MANUAL_PD_EN); + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_PHY_CONTROL_3, val); + + /* + * Decreased HS prepare timing delay from 160ns to 80ns work with + * a) Dragon board 810 series (Qualcomm AP) + * b) Moving Pixel DSI source (PG3A pattern generator + + * P332 D-PHY Probe) default D-PHY timing + * 5ns/step + */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_TIME_HS_PRPR, 0x10); + + /* Enable DSI mode*/ + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_18, + SELECT_DSI << MIPI_DPI_SELECT); + + ret |= anx7625_dsi_video_timing_config(ctx); + if (ret < 0) { + DRM_DEV_ERROR(dev, "dsi video timing config fail\n"); + return ret; + } + + /* Toggle m, n ready */ + ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6, + ~(MIPI_M_NUM_READY | MIPI_N_NUM_READY)); + usleep_range(1000, 1100); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6, + MIPI_M_NUM_READY | MIPI_N_NUM_READY); + + /* Configure integer stable register */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_VIDEO_STABLE_CNT, 0x02); + /* Power on MIPI RX */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_LANE_CTRL_10, 0x00); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_LANE_CTRL_10, 0x80); + + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error : mipi dsi enable init fail.\n"); + + return ret; +} + +static int anx7625_dsi_config(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n"); + + /* DSC disable */ + ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + R_DSC_CTRL_0, ~DSC_EN); + + ret |= anx7625_api_dsi_config(ctx); + + if (ret < 0) { + DRM_DEV_ERROR(dev, "IO error : api dsi config error.\n"); + return ret; + } + + /* Set MIPI RX EN */ + ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_MIPI_RX_EN); + /* Clear mute flag */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, (u8)~AP_MIPI_MUTE); + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error : enable mipi rx fail.\n"); + else + DRM_DEV_DEBUG_DRIVER(dev, "success to config DSI\n"); + + return ret; +} + +static void anx7625_dp_start(struct anx7625_data *ctx) +{ + int ret; + struct device *dev = &ctx->client->dev; + + if (!ctx->display_timing_valid) { + DRM_DEV_ERROR(dev, "mipi not set display timing yet.\n"); + return; + } + + anx7625_config_audio_input(ctx); + + ret = anx7625_dsi_config(ctx); + + if (ret < 0) + DRM_DEV_ERROR(dev, "MIPI phy setup error.\n"); +} + +static void anx7625_dp_stop(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n"); + + /* + * Video disable: 0x72:08 bit 7 = 0; + * Audio disable: 0x70:87 bit 0 = 0; + */ + ret = anx7625_write_and(ctx, ctx->i2c.tx_p0_client, 0x87, 0xfe); + ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f); + + ret |= anx7625_video_mute_control(ctx, 1); + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error : mute video fail\n"); +} + +static int sp_tx_rst_aux(struct anx7625_data *ctx) +{ + int ret; + + ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, RST_CTRL2, + AUX_RST); + ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, RST_CTRL2, + ~AUX_RST); + return ret; +} + +static int sp_tx_aux_wr(struct anx7625_data *ctx, u8 offset) +{ + int ret; + + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_BUFF_START, offset); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_COMMAND, 0x04); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); + return (ret | wait_aux_op_finish(ctx)); +} + +static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd) +{ + int ret; + + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_COMMAND, len_cmd); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); + return (ret | wait_aux_op_finish(ctx)); +} + +static int sp_tx_get_edid_block(struct anx7625_data *ctx) +{ + int c = 0; + struct device *dev = &ctx->client->dev; + + sp_tx_aux_wr(ctx, 0x7e); + sp_tx_aux_rd(ctx, 0x01); + c = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START); + if (c < 0) { + DRM_DEV_ERROR(dev, "IO error : access AUX BUFF.\n"); + return -EIO; + } + + DRM_DEV_DEBUG_DRIVER(dev, " EDID Block = %d\n", c + 1); + + if (c > MAX_EDID_BLOCK) + c = 1; + + return c; +} + +static int edid_read(struct anx7625_data *ctx, + u8 offset, u8 *pblock_buf) +{ + int ret, cnt; + struct device *dev = &ctx->client->dev; + + for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) { + sp_tx_aux_wr(ctx, offset); + /* Set I2C read com 0x01 mot = 0 and read 16 bytes */ + ret = sp_tx_aux_rd(ctx, 0xf1); + + if (ret) { + sp_tx_rst_aux(ctx); + DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n"); + } else { + ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, + AP_AUX_BUFF_START, + MAX_DPCD_BUFFER_SIZE, + pblock_buf); + if (ret > 0) + break; + } + } + + if (cnt > EDID_TRY_CNT) + return -EIO; + + return 0; +} + +static int segments_edid_read(struct anx7625_data *ctx, + u8 segment, u8 *buf, u8 offset) +{ + u8 cnt; + int ret; + struct device *dev = &ctx->client->dev; + + /* Write address only */ + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_ADDR_7_0, 0x30); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_COMMAND, 0x04); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_CTRL_STATUS, + AP_AUX_CTRL_ADDRONLY | AP_AUX_CTRL_OP_EN); + + ret |= wait_aux_op_finish(ctx); + /* Write segment address */ + ret |= sp_tx_aux_wr(ctx, segment); + /* Data read */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_ADDR_7_0, 0x50); + if (ret) { + DRM_DEV_ERROR(dev, "IO error : aux initial fail.\n"); + return ret; + } + + for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) { + sp_tx_aux_wr(ctx, offset); + /* Set I2C read com 0x01 mot = 0 and read 16 bytes */ + ret = sp_tx_aux_rd(ctx, 0xf1); + + if (ret) { + ret = sp_tx_rst_aux(ctx); + DRM_DEV_ERROR(dev, "segment read fail, reset!\n"); + } else { + ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, + AP_AUX_BUFF_START, + MAX_DPCD_BUFFER_SIZE, buf); + if (ret > 0) + break; + } + } + + if (cnt > EDID_TRY_CNT) + return -EIO; + + return 0; +} + +static int sp_tx_edid_read(struct anx7625_data *ctx, + u8 *pedid_blocks_buf) +{ + u8 offset, edid_pos; + int count, blocks_num; + u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; + u8 i, j; + u8 g_edid_break = 0; + int ret; + struct device *dev = &ctx->client->dev; + + /* Address initial */ + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_ADDR_7_0, 0x50); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_ADDR_15_8, 0); + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + AP_AUX_ADDR_19_16, 0xf0); + if (ret < 0) { + DRM_DEV_ERROR(dev, "access aux channel IO error.\n"); + return -EIO; + } + + blocks_num = sp_tx_get_edid_block(ctx); + if (blocks_num < 0) + return blocks_num; + + count = 0; + + do { + switch (count) { + case 0: + case 1: + for (i = 0; i < 8; i++) { + offset = (i + count * 8) * MAX_DPCD_BUFFER_SIZE; + g_edid_break = edid_read(ctx, offset, + pblock_buf); + + if (g_edid_break) + break; + + memcpy(&pedid_blocks_buf[offset], + pblock_buf, + MAX_DPCD_BUFFER_SIZE); + } + + break; + case 2: + offset = 0x00; + + for (j = 0; j < 8; j++) { + edid_pos = (j + count * 8) * + MAX_DPCD_BUFFER_SIZE; + + if (g_edid_break == 1) + break; + + segments_edid_read(ctx, count / 2, + pblock_buf, offset); + memcpy(&pedid_blocks_buf[edid_pos], + pblock_buf, + MAX_DPCD_BUFFER_SIZE); + offset = offset + 0x10; + } + + break; + case 3: + offset = 0x80; + + for (j = 0; j < 8; j++) { + edid_pos = (j + count * 8) * + MAX_DPCD_BUFFER_SIZE; + if (g_edid_break == 1) + break; + + segments_edid_read(ctx, count / 2, + pblock_buf, offset); + memcpy(&pedid_blocks_buf[edid_pos], + pblock_buf, + MAX_DPCD_BUFFER_SIZE); + offset = offset + 0x10; + } + + break; + default: + break; + } + + count++; + + } while (blocks_num >= count); + + /* Check edid data */ + if (!drm_edid_is_valid((struct edid *)pedid_blocks_buf)) { + DRM_DEV_ERROR(dev, "WARNING! edid check fail!\n"); + return -EINVAL; + } + + /* Reset aux channel */ + sp_tx_rst_aux(ctx); + + return (blocks_num + 1); +} + +static void anx7625_power_on(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + + if (!ctx->pdata.low_power_mode) { + DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n"); + return; + } + + /* Power on pin enable */ + gpiod_set_value(ctx->pdata.gpio_p_on, 1); + usleep_range(10000, 11000); + /* Power reset pin enable */ + gpiod_set_value(ctx->pdata.gpio_reset, 1); + usleep_range(10000, 11000); + + DRM_DEV_DEBUG_DRIVER(dev, "power on !\n"); +} + +static void anx7625_power_standby(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + + if (!ctx->pdata.low_power_mode) { + DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n"); + return; + } + + gpiod_set_value(ctx->pdata.gpio_reset, 0); + usleep_range(1000, 1100); + gpiod_set_value(ctx->pdata.gpio_p_on, 0); + usleep_range(1000, 1100); + DRM_DEV_DEBUG_DRIVER(dev, "power down\n"); +} + +/* Basic configurations of ANX7625 */ +static void anx7625_config(struct anx7625_data *ctx) +{ + anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + XTAL_FRQ_SEL, XTAL_FRQ_27M); +} + +static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + int ret; + + /* Reset main ocm */ + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40); + /* Disable PD */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_DISABLE_PD); + /* Release main ocm */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x00); + + if (ret < 0) + DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n"); + else + DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n"); +} + +static int anx7625_ocm_loading_check(struct anx7625_data *ctx) +{ + int ret; + struct device *dev = &ctx->client->dev; + + /* Check interface workable */ + ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, + FLASH_LOAD_STA); + if (ret < 0) { + DRM_DEV_ERROR(dev, "IO error : access flash load.\n"); + return ret; + } + if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK) + return -ENODEV; + + anx7625_disable_pd_protocol(ctx); + + DRM_DEV_DEBUG_DRIVER(dev, "Firmware ver %02x%02x,", + anx7625_reg_read(ctx, + ctx->i2c.rx_p0_client, + OCM_FW_VERSION), + anx7625_reg_read(ctx, + ctx->i2c.rx_p0_client, + OCM_FW_REVERSION)); + DRM_DEV_DEBUG_DRIVER(dev, "Driver version %s\n", + ANX7625_DRV_VERSION); + + return 0; +} + +static void anx7625_power_on_init(struct anx7625_data *ctx) +{ + int retry_count, i; + + for (retry_count = 0; retry_count < 3; retry_count++) { + anx7625_power_on(ctx); + anx7625_config(ctx); + + for (i = 0; i < OCM_LOADING_TIME; i++) { + if (!anx7625_ocm_loading_check(ctx)) + return; + usleep_range(1000, 1100); + } + anx7625_power_standby(ctx); + } +} + +static void anx7625_chip_control(struct anx7625_data *ctx, int state) +{ + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "before set, power_state(%d).\n", + atomic_read(&ctx->power_status)); + + if (!ctx->pdata.low_power_mode) + return; + + if (state) { + atomic_inc(&ctx->power_status); + if (atomic_read(&ctx->power_status) == 1) + anx7625_power_on_init(ctx); + } else { + if (atomic_read(&ctx->power_status)) { + atomic_dec(&ctx->power_status); + + if (atomic_read(&ctx->power_status) == 0) + anx7625_power_standby(ctx); + } + } + + DRM_DEV_DEBUG_DRIVER(dev, "after set, power_state(%d).\n", + atomic_read(&ctx->power_status)); +} + +static void anx7625_init_gpio(struct anx7625_data *platform) +{ + struct device *dev = &platform->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n"); + + /* Gpio for chip power enable */ + platform->pdata.gpio_p_on = + devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + /* Gpio for chip reset */ + platform->pdata.gpio_reset = + devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + + if (platform->pdata.gpio_p_on && platform->pdata.gpio_reset) { + platform->pdata.low_power_mode = 1; + DRM_DEV_DEBUG_DRIVER(dev, "low power mode, pon %d, reset %d.\n", + desc_to_gpio(platform->pdata.gpio_p_on), + desc_to_gpio(platform->pdata.gpio_reset)); + } else { + platform->pdata.low_power_mode = 0; + DRM_DEV_DEBUG_DRIVER(dev, "not low power mode.\n"); + } +} + +static void anx7625_stop_dp_work(struct anx7625_data *ctx) +{ + ctx->hpd_status = 0; + ctx->hpd_high_cnt = 0; + ctx->display_timing_valid = 0; + + if (ctx->pdata.low_power_mode == 0) + anx7625_disable_pd_protocol(ctx); +} + +static void anx7625_start_dp_work(struct anx7625_data *ctx) +{ + int ret; + struct device *dev = &ctx->client->dev; + + if (ctx->hpd_high_cnt >= 2) { + DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n"); + return; + } + + ctx->hpd_high_cnt++; + + /* Not support HDCP */ + ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f); + + /* Try auth flag */ + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10); + /* Interrupt for DRM */ + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01); + if (ret < 0) + return; + + ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86); + if (ret < 0) + return; + + DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret); +} + +static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx) +{ + return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); +} + +static void anx7625_hpd_polling(struct anx7625_data *ctx) +{ + int ret, val; + struct device *dev = &ctx->client->dev; + + if (atomic_read(&ctx->power_status) != 1) { + DRM_DEV_DEBUG_DRIVER(dev, "No need to poling HPD status.\n"); + return; + } + + ret = readx_poll_timeout(anx7625_read_hpd_status_p0, + ctx, val, + ((val & HPD_STATUS) || (val < 0)), + 5000, + 5000 * 100); + if (ret) { + DRM_DEV_ERROR(dev, "HPD polling timeout!\n"); + } else { + DRM_DEV_DEBUG_DRIVER(dev, "HPD raise up.\n"); + anx7625_reg_write(ctx, ctx->i2c.tcpc_client, + INTR_ALERT_1, 0xFF); + anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + INTERFACE_CHANGE_INT, 0); + } + + anx7625_start_dp_work(ctx); +} + +static void anx7625_disconnect_check(struct anx7625_data *ctx) +{ + if (atomic_read(&ctx->power_status) == 0) + anx7625_stop_dp_work(ctx); +} + +static void anx7625_low_power_mode_check(struct anx7625_data *ctx, + int state) +{ + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "low power mode check, state(%d).\n", state); + + if (ctx->pdata.low_power_mode) { + anx7625_chip_control(ctx, state); + if (state) + anx7625_hpd_polling(ctx); + else + anx7625_disconnect_check(ctx); + } +} + +static void anx7625_remove_edid(struct anx7625_data *ctx) +{ + ctx->slimport_edid_p.edid_block_num = -1; +} + +static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) +{ + struct device *dev = &ctx->client->dev; + + /* HPD changed */ + DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n", + (u32)on); + + if (on == 0) { + DRM_DEV_DEBUG_DRIVER(dev, " HPD low\n"); + anx7625_remove_edid(ctx); + anx7625_stop_dp_work(ctx); + } else { + DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n"); + anx7625_start_dp_work(ctx); + } + + ctx->hpd_status = 1; +} + +static int anx7625_hpd_change_detect(struct anx7625_data *ctx) +{ + int intr_vector, status; + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "power_status=%d\n", + (u32)atomic_read(&ctx->power_status)); + + status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client, + INTR_ALERT_1, 0xFF); + if (status < 0) { + DRM_DEV_ERROR(dev, "cannot clear alert reg.\n"); + return status; + } + + intr_vector = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, + INTERFACE_CHANGE_INT); + if (intr_vector < 0) { + DRM_DEV_ERROR(dev, "cannot access interrupt change reg.\n"); + return intr_vector; + } + DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x44=%x\n", intr_vector); + status = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + INTERFACE_CHANGE_INT, + intr_vector & (~intr_vector)); + if (status < 0) { + DRM_DEV_ERROR(dev, "cannot clear interrupt change reg.\n"); + return status; + } + + if (!(intr_vector & HPD_STATUS_CHANGE)) + return -ENOENT; + + status = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, + SYSTEM_STSTUS); + if (status < 0) { + DRM_DEV_ERROR(dev, "cannot clear interrupt status.\n"); + return status; + } + + DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x45=%x\n", status); + dp_hpd_change_handler(ctx, status & HPD_STATUS); + + return 0; +} + +static void anx7625_work_func(struct work_struct *work) +{ + int event; + struct anx7625_data *ctx = container_of(work, + struct anx7625_data, work); + + mutex_lock(&ctx->lock); + event = anx7625_hpd_change_detect(ctx); + mutex_unlock(&ctx->lock); + if (event < 0) + return; + + if (ctx->bridge_attached) + drm_helper_hpd_irq_event(ctx->bridge.dev); +} + +static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data) +{ + struct anx7625_data *ctx = (struct anx7625_data *)data; + + if (atomic_read(&ctx->power_status) != 1) + return IRQ_NONE; + + queue_work(ctx->workqueue, &ctx->work); + + return IRQ_HANDLED; +} + +static int anx7625_parse_dt(struct device *dev, + struct anx7625_platform_data *pdata) +{ + struct device_node *np = dev->of_node; + struct drm_panel *panel; + int ret; + + pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0); + if (!pdata->mipi_host_node) { + DRM_DEV_ERROR(dev, "fail to get internal panel.\n"); + return -ENODEV; + } + + DRM_DEV_DEBUG_DRIVER(dev, "found dsi host node.\n"); + + ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); + if (ret < 0) { + if (ret == -ENODEV) + return 0; + return ret; + } + if (!panel) + return -ENODEV; + + pdata->panel_bridge = devm_drm_panel_bridge_add(dev, panel); + if (IS_ERR(pdata->panel_bridge)) + return PTR_ERR(pdata->panel_bridge); + DRM_DEV_DEBUG_DRIVER(dev, "get panel node.\n"); + + return 0; +} + +static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge) +{ + return container_of(bridge, struct anx7625_data, bridge); +} + +static struct edid *anx7625_get_edid(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + struct s_edid_data *p_edid = &ctx->slimport_edid_p; + int edid_num; + u8 *edid; + + edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL); + if (!edid) { + DRM_DEV_ERROR(dev, "Fail to allocate buffer\n"); + return NULL; + } + + if (ctx->slimport_edid_p.edid_block_num > 0) { + memcpy(edid, ctx->slimport_edid_p.edid_raw_data, + FOUR_BLOCK_SIZE); + return (struct edid *)edid; + } + + anx7625_low_power_mode_check(ctx, 1); + edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data); + anx7625_low_power_mode_check(ctx, 0); + + if (edid_num < 1) { + DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num); + kfree(edid); + return NULL; + } + + p_edid->edid_block_num = edid_num; + + memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE); + return (struct edid *)edid; +} + +static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "sink detect, return connected\n"); + + return connector_status_connected; +} + +static int anx7625_attach_dsi(struct anx7625_data *ctx) +{ + struct mipi_dsi_device *dsi; + struct device *dev = &ctx->client->dev; + struct mipi_dsi_host *host; + const struct mipi_dsi_device_info info = { + .type = "anx7625", + .channel = 0, + .node = NULL, + }; + + DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n"); + + host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node); + if (!host) { + DRM_DEV_ERROR(dev, "fail to find dsi host.\n"); + return -EINVAL; + } + + dsi = mipi_dsi_device_register_full(host, &info); + if (IS_ERR(dsi)) { + DRM_DEV_ERROR(dev, "fail to create dsi device.\n"); + return -EINVAL; + } + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | + MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_MODE_VIDEO_HSE; + + if (mipi_dsi_attach(dsi) < 0) { + DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n"); + mipi_dsi_device_unregister(dsi); + return -EINVAL; + } + + ctx->dsi = dsi; + + DRM_DEV_DEBUG_DRIVER(dev, "attach dsi succeeded.\n"); + + return 0; +} + +static void anx7625_bridge_detach(struct drm_bridge *bridge) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + + if (ctx->dsi) { + mipi_dsi_detach(ctx->dsi); + mipi_dsi_device_unregister(ctx->dsi); + } +} + +static int anx7625_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + int err; + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n"); + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + if (!bridge->encoder) { + DRM_DEV_ERROR(dev, "Parent encoder object not found"); + return -ENODEV; + } + + err = anx7625_attach_dsi(ctx); + if (err) { + DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", err); + return err; + } + + if (ctx->pdata.panel_bridge) { + err = drm_bridge_attach(bridge->encoder, + ctx->pdata.panel_bridge, + &ctx->bridge, flags); + if (err) { + DRM_DEV_ERROR(dev, + "Fail to attach panel bridge: %d\n", err); + return err; + } + } + + ctx->bridge_attached = 1; + + return 0; +} + +static enum drm_mode_status +anx7625_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n"); + + /* Max 1200p at 5.4 Ghz, one lane, pixel clock 300M */ + if (mode->clock > SUPPORT_PIXEL_CLOCK) { + DRM_DEV_DEBUG_DRIVER(dev, + "drm mode invalid, pixelclock too high.\n"); + return MODE_CLOCK_HIGH; + } + + DRM_DEV_DEBUG_DRIVER(dev, "drm mode valid.\n"); + + return MODE_OK; +} + +static void anx7625_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *old_mode, + const struct drm_display_mode *mode) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n"); + + ctx->dt.pixelclock.min = mode->clock; + ctx->dt.hactive.min = mode->hdisplay; + ctx->dt.hsync_len.min = mode->hsync_end - mode->hsync_start; + ctx->dt.hfront_porch.min = mode->hsync_start - mode->hdisplay; + ctx->dt.hback_porch.min = mode->htotal - mode->hsync_end; + ctx->dt.vactive.min = mode->vdisplay; + ctx->dt.vsync_len.min = mode->vsync_end - mode->vsync_start; + ctx->dt.vfront_porch.min = mode->vsync_start - mode->vdisplay; + ctx->dt.vback_porch.min = mode->vtotal - mode->vsync_end; + + ctx->display_timing_valid = 1; + + DRM_DEV_DEBUG_DRIVER(dev, "pixelclock(%d).\n", ctx->dt.pixelclock.min); + DRM_DEV_DEBUG_DRIVER(dev, "hactive(%d), hsync(%d), hfp(%d), hbp(%d)\n", + ctx->dt.hactive.min, + ctx->dt.hsync_len.min, + ctx->dt.hfront_porch.min, + ctx->dt.hback_porch.min); + DRM_DEV_DEBUG_DRIVER(dev, "vactive(%d), vsync(%d), vfp(%d), vbp(%d)\n", + ctx->dt.vactive.min, + ctx->dt.vsync_len.min, + ctx->dt.vfront_porch.min, + ctx->dt.vback_porch.min); + DRM_DEV_DEBUG_DRIVER(dev, "hdisplay(%d),hsync_start(%d).\n", + mode->hdisplay, + mode->hsync_start); + DRM_DEV_DEBUG_DRIVER(dev, "hsync_end(%d),htotal(%d).\n", + mode->hsync_end, + mode->htotal); + DRM_DEV_DEBUG_DRIVER(dev, "vdisplay(%d),vsync_start(%d).\n", + mode->vdisplay, + mode->vsync_start); + DRM_DEV_DEBUG_DRIVER(dev, "vsync_end(%d),vtotal(%d).\n", + mode->vsync_end, + mode->vtotal); +} + +static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adj) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + struct device *dev = &ctx->client->dev; + u32 hsync, hfp, hbp, hblanking; + u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj; + u32 vref, adj_clock; + + DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n"); + + hsync = mode->hsync_end - mode->hsync_start; + hfp = mode->hsync_start - mode->hdisplay; + hbp = mode->htotal - mode->hsync_end; + hblanking = mode->htotal - mode->hdisplay; + + DRM_DEV_DEBUG_DRIVER(dev, "before mode fixup\n"); + DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n", + hsync, hfp, hbp, adj->clock); + DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n", + adj->hsync_start, adj->hsync_end, adj->htotal); + + adj_hfp = hfp; + adj_hsync = hsync; + adj_hbp = hbp; + adj_hblanking = hblanking; + + /* HFP needs to be even */ + if (hfp & 0x1) { + adj_hfp += 1; + adj_hblanking += 1; + } + + /* HBP needs to be even */ + if (hbp & 0x1) { + adj_hbp -= 1; + adj_hblanking -= 1; + } + + /* HSYNC needs to be even */ + if (hsync & 0x1) { + if (adj_hblanking < hblanking) + adj_hsync += 1; + else + adj_hsync -= 1; + } + + /* + * Once illegal timing detected, use default HFP, HSYNC, HBP + * This adjusting made for built-in eDP panel, for the externel + * DP monitor, may need return false. + */ + if (hblanking < HBLANKING_MIN || (hfp < HP_MIN && hbp < HP_MIN)) { + adj_hsync = SYNC_LEN_DEF; + adj_hfp = HFP_HBP_DEF; + adj_hbp = HFP_HBP_DEF; + vref = adj->clock * 1000 / (adj->htotal * adj->vtotal); + if (hblanking < HBLANKING_MIN) { + delta_adj = HBLANKING_MIN - hblanking; + adj_clock = vref * delta_adj * adj->vtotal; + adj->clock += DIV_ROUND_UP(adj_clock, 1000); + } else { + delta_adj = hblanking - HBLANKING_MIN; + adj_clock = vref * delta_adj * adj->vtotal; + adj->clock -= DIV_ROUND_UP(adj_clock, 1000); + } + + DRM_WARN("illegal hblanking timing, use default.\n"); + DRM_WARN("hfp(%d), hbp(%d), hsync(%d).\n", hfp, hbp, hsync); + } else if (adj_hfp < HP_MIN) { + /* Adjust hfp if hfp less than HP_MIN */ + delta_adj = HP_MIN - adj_hfp; + adj_hfp = HP_MIN; + + /* + * Balance total HBlanking pixel, if HBP does not have enough + * space, adjust HSYNC length, otherwise adjust HBP + */ + if ((adj_hbp - delta_adj) < HP_MIN) + /* HBP not enough space */ + adj_hsync -= delta_adj; + else + adj_hbp -= delta_adj; + } else if (adj_hbp < HP_MIN) { + delta_adj = HP_MIN - adj_hbp; + adj_hbp = HP_MIN; + + /* + * Balance total HBlanking pixel, if HBP hasn't enough space, + * adjust HSYNC length, otherwize adjust HBP + */ + if ((adj_hfp - delta_adj) < HP_MIN) + /* HFP not enough space */ + adj_hsync -= delta_adj; + else + adj_hfp -= delta_adj; + } + + DRM_DEV_DEBUG_DRIVER(dev, "after mode fixup\n"); + DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n", + adj_hsync, adj_hfp, adj_hbp, adj->clock); + + /* Reconstruct timing */ + adj->hsync_start = adj->hdisplay + adj_hfp; + adj->hsync_end = adj->hsync_start + adj_hsync; + adj->htotal = adj->hsync_end + adj_hbp; + DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n", + adj->hsync_start, adj->hsync_end, adj->htotal); + + return true; +} + +static void anx7625_bridge_enable(struct drm_bridge *bridge) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "drm enable\n"); + + anx7625_low_power_mode_check(ctx, 1); + + if (WARN_ON(!atomic_read(&ctx->power_status))) + return; + + anx7625_dp_start(ctx); +} + +static void anx7625_bridge_disable(struct drm_bridge *bridge) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + struct device *dev = &ctx->client->dev; + + if (WARN_ON(!atomic_read(&ctx->power_status))) + return; + + DRM_DEV_DEBUG_DRIVER(dev, "drm disable\n"); + + anx7625_dp_stop(ctx); + + anx7625_low_power_mode_check(ctx, 0); +} + +static enum drm_connector_status +anx7625_bridge_detect(struct drm_bridge *bridge) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n"); + + return anx7625_sink_detect(ctx); +} + +static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + struct device *dev = &ctx->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n"); + + return anx7625_get_edid(ctx); +} + +static const struct drm_bridge_funcs anx7625_bridge_funcs = { + .attach = anx7625_bridge_attach, + .detach = anx7625_bridge_detach, + .disable = anx7625_bridge_disable, + .mode_valid = anx7625_bridge_mode_valid, + .mode_set = anx7625_bridge_mode_set, + .mode_fixup = anx7625_bridge_mode_fixup, + .enable = anx7625_bridge_enable, + .detect = anx7625_bridge_detect, + .get_edid = anx7625_bridge_get_edid, +}; + +static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx, + struct i2c_client *client) +{ + ctx->i2c.tx_p0_client = i2c_new_dummy_device(client->adapter, + TX_P0_ADDR >> 1); + if (!ctx->i2c.tx_p0_client) + return -ENOMEM; + + ctx->i2c.tx_p1_client = i2c_new_dummy_device(client->adapter, + TX_P1_ADDR >> 1); + if (!ctx->i2c.tx_p1_client) + goto free_tx_p0; + + ctx->i2c.tx_p2_client = i2c_new_dummy_device(client->adapter, + TX_P2_ADDR >> 1); + if (!ctx->i2c.tx_p2_client) + goto free_tx_p1; + + ctx->i2c.rx_p0_client = i2c_new_dummy_device(client->adapter, + RX_P0_ADDR >> 1); + if (!ctx->i2c.rx_p0_client) + goto free_tx_p2; + + ctx->i2c.rx_p1_client = i2c_new_dummy_device(client->adapter, + RX_P1_ADDR >> 1); + if (!ctx->i2c.rx_p1_client) + goto free_rx_p0; + + ctx->i2c.rx_p2_client = i2c_new_dummy_device(client->adapter, + RX_P2_ADDR >> 1); + if (!ctx->i2c.rx_p2_client) + goto free_rx_p1; + + ctx->i2c.tcpc_client = i2c_new_dummy_device(client->adapter, + TCPC_INTERFACE_ADDR >> 1); + if (!ctx->i2c.tcpc_client) + goto free_rx_p2; + + return 0; + +free_rx_p2: + i2c_unregister_device(ctx->i2c.rx_p2_client); +free_rx_p1: + i2c_unregister_device(ctx->i2c.rx_p1_client); +free_rx_p0: + i2c_unregister_device(ctx->i2c.rx_p0_client); +free_tx_p2: + i2c_unregister_device(ctx->i2c.tx_p2_client); +free_tx_p1: + i2c_unregister_device(ctx->i2c.tx_p1_client); +free_tx_p0: + i2c_unregister_device(ctx->i2c.tx_p0_client); + + return -ENOMEM; +} + +static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx) +{ + i2c_unregister_device(ctx->i2c.tx_p0_client); + i2c_unregister_device(ctx->i2c.tx_p1_client); + i2c_unregister_device(ctx->i2c.tx_p2_client); + i2c_unregister_device(ctx->i2c.rx_p0_client); + i2c_unregister_device(ctx->i2c.rx_p1_client); + i2c_unregister_device(ctx->i2c.rx_p2_client); + i2c_unregister_device(ctx->i2c.tcpc_client); +} + +static int anx7625_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct anx7625_data *platform; + struct anx7625_platform_data *pdata; + int ret = 0; + struct device *dev = &client->dev; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK)) { + DRM_DEV_ERROR(dev, "anx7625's i2c bus doesn't support\n"); + return -ENODEV; + } + + platform = kzalloc(sizeof(*platform), GFP_KERNEL); + if (!platform) { + DRM_DEV_ERROR(dev, "fail to allocate driver data\n"); + return -ENOMEM; + } + + pdata = &platform->pdata; + + ret = anx7625_parse_dt(dev, pdata); + if (ret) { + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret); + goto free_platform; + } + + platform->client = client; + i2c_set_clientdata(client, platform); + + anx7625_init_gpio(platform); + + atomic_set(&platform->power_status, 0); + + mutex_init(&platform->lock); + + platform->pdata.intp_irq = client->irq; + if (platform->pdata.intp_irq) { + INIT_WORK(&platform->work, anx7625_work_func); + platform->workqueue = create_workqueue("anx7625_work"); + if (!platform->workqueue) { + DRM_DEV_ERROR(dev, "fail to create work queue\n"); + ret = -ENOMEM; + goto free_platform; + } + + ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq, + NULL, anx7625_intr_hpd_isr, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "anx7625-intp", platform); + if (ret) { + DRM_DEV_ERROR(dev, "fail to request irq\n"); + goto free_wq; + } + } + + if (anx7625_register_i2c_dummy_clients(platform, client) != 0) { + ret = -ENOMEM; + DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n"); + goto free_wq; + } + + if (platform->pdata.low_power_mode == 0) { + anx7625_disable_pd_protocol(platform); + atomic_set(&platform->power_status, 1); + } + + /* Add work function */ + if (platform->pdata.intp_irq) + queue_work(platform->workqueue, &platform->work); + + platform->bridge.funcs = &anx7625_bridge_funcs; + platform->bridge.of_node = client->dev.of_node; + platform->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; + platform->bridge.type = DRM_MODE_CONNECTOR_eDP; + drm_bridge_add(&platform->bridge); + + DRM_DEV_DEBUG_DRIVER(dev, "probe done\n"); + + return 0; + +free_wq: + if (platform->workqueue) + destroy_workqueue(platform->workqueue); + +free_platform: + kfree(platform); + + return ret; +} + +static int anx7625_i2c_remove(struct i2c_client *client) +{ + struct anx7625_data *platform = i2c_get_clientdata(client); + + drm_bridge_remove(&platform->bridge); + + if (platform->pdata.intp_irq) + destroy_workqueue(platform->workqueue); + + anx7625_unregister_i2c_dummy_clients(platform); + + kfree(platform); + return 0; +} + +static const struct i2c_device_id anx7625_id[] = { + {"anx7625", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, anx7625_id); + +static const struct of_device_id anx_match_table[] = { + {.compatible = "analogix,anx7625",}, + {}, +}; + +static struct i2c_driver anx7625_driver = { + .driver = { + .name = "anx7625", + .of_match_table = anx_match_table, + }, + .probe = anx7625_i2c_probe, + .remove = anx7625_i2c_remove, + + .id_table = anx7625_id, +}; + +module_i2c_driver(anx7625_driver); + +MODULE_DESCRIPTION("MIPI2DP anx7625 driver"); +MODULE_AUTHOR("Xin Ji <xji@analogixsemi.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(ANX7625_DRV_VERSION); diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h new file mode 100644 index 000000000000..193ad86c5450 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -0,0 +1,390 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright(c) 2020, Analogix Semiconductor. All rights reserved. + * + */ + +#ifndef __ANX7625_H__ +#define __ANX7625_H__ + +#define ANX7625_DRV_VERSION "0.1.04" + +/* Loading OCM re-trying times */ +#define OCM_LOADING_TIME 10 + +/********* ANX7625 Register **********/ +#define TX_P0_ADDR 0x70 +#define TX_P1_ADDR 0x7A +#define TX_P2_ADDR 0x72 + +#define RX_P0_ADDR 0x7e +#define RX_P1_ADDR 0x84 +#define RX_P2_ADDR 0x54 + +#define RSVD_00_ADDR 0x00 +#define RSVD_D1_ADDR 0xD1 +#define RSVD_60_ADDR 0x60 +#define RSVD_39_ADDR 0x39 +#define RSVD_7F_ADDR 0x7F + +#define TCPC_INTERFACE_ADDR 0x58 + +/* Clock frequency in Hz */ +#define XTAL_FRQ (27 * 1000000) + +#define POST_DIVIDER_MIN 1 +#define POST_DIVIDER_MAX 16 +#define PLL_OUT_FREQ_MIN 520000000UL +#define PLL_OUT_FREQ_MAX 730000000UL +#define PLL_OUT_FREQ_ABS_MIN 300000000UL +#define PLL_OUT_FREQ_ABS_MAX 800000000UL +#define MAX_UNSIGNED_24BIT 16777215UL + +/***************************************************************/ +/* Register definition of device address 0x58 */ + +#define PRODUCT_ID_L 0x02 +#define PRODUCT_ID_H 0x03 + +#define INTR_ALERT_1 0xCC +#define INTR_SOFTWARE_INT BIT(3) +#define INTR_RECEIVED_MSG BIT(5) + +#define SYSTEM_STSTUS 0x45 +#define INTERFACE_CHANGE_INT 0x44 +#define HPD_STATUS_CHANGE 0x80 +#define HPD_STATUS 0x80 + +/******** END of I2C Address 0x58 ********/ + +/***************************************************************/ +/* Register definition of device address 0x70 */ +#define I2C_ADDR_70_DPTX 0x70 + +#define SP_TX_LINK_BW_SET_REG 0xA0 +#define SP_TX_LANE_COUNT_SET_REG 0xA1 + +#define M_VID_0 0xC0 +#define M_VID_1 0xC1 +#define M_VID_2 0xC2 +#define N_VID_0 0xC3 +#define N_VID_1 0xC4 +#define N_VID_2 0xC5 + +/***************************************************************/ +/* Register definition of device address 0x72 */ +#define AUX_RST 0x04 +#define RST_CTRL2 0x07 + +#define SP_TX_TOTAL_LINE_STA_L 0x24 +#define SP_TX_TOTAL_LINE_STA_H 0x25 +#define SP_TX_ACT_LINE_STA_L 0x26 +#define SP_TX_ACT_LINE_STA_H 0x27 +#define SP_TX_V_F_PORCH_STA 0x28 +#define SP_TX_V_SYNC_STA 0x29 +#define SP_TX_V_B_PORCH_STA 0x2A +#define SP_TX_TOTAL_PIXEL_STA_L 0x2B +#define SP_TX_TOTAL_PIXEL_STA_H 0x2C +#define SP_TX_ACT_PIXEL_STA_L 0x2D +#define SP_TX_ACT_PIXEL_STA_H 0x2E +#define SP_TX_H_F_PORCH_STA_L 0x2F +#define SP_TX_H_F_PORCH_STA_H 0x30 +#define SP_TX_H_SYNC_STA_L 0x31 +#define SP_TX_H_SYNC_STA_H 0x32 +#define SP_TX_H_B_PORCH_STA_L 0x33 +#define SP_TX_H_B_PORCH_STA_H 0x34 + +#define SP_TX_VID_CTRL 0x84 +#define SP_TX_BPC_MASK 0xE0 +#define SP_TX_BPC_6 0x00 +#define SP_TX_BPC_8 0x20 +#define SP_TX_BPC_10 0x40 +#define SP_TX_BPC_12 0x60 + +#define VIDEO_BIT_MATRIX_12 0x4c + +#define AUDIO_CHANNEL_STATUS_1 0xd0 +#define AUDIO_CHANNEL_STATUS_2 0xd1 +#define AUDIO_CHANNEL_STATUS_3 0xd2 +#define AUDIO_CHANNEL_STATUS_4 0xd3 +#define AUDIO_CHANNEL_STATUS_5 0xd4 +#define AUDIO_CHANNEL_STATUS_6 0xd5 +#define TDM_SLAVE_MODE 0x10 +#define I2S_SLAVE_MODE 0x08 + +#define AUDIO_CONTROL_REGISTER 0xe6 +#define TDM_TIMING_MODE 0x08 + +#define I2C_ADDR_72_DPTX 0x72 + +#define HP_MIN 8 +#define HBLANKING_MIN 80 +#define SYNC_LEN_DEF 32 +#define HFP_HBP_DEF ((HBLANKING_MIN - SYNC_LEN_DEF) / 2) +#define VIDEO_CONTROL_0 0x08 + +#define ACTIVE_LINES_L 0x14 +#define ACTIVE_LINES_H 0x15 /* Bit[7:6] are reserved */ +#define VERTICAL_FRONT_PORCH 0x16 +#define VERTICAL_SYNC_WIDTH 0x17 +#define VERTICAL_BACK_PORCH 0x18 + +#define HORIZONTAL_TOTAL_PIXELS_L 0x19 +#define HORIZONTAL_TOTAL_PIXELS_H 0x1A /* Bit[7:6] are reserved */ +#define HORIZONTAL_ACTIVE_PIXELS_L 0x1B +#define HORIZONTAL_ACTIVE_PIXELS_H 0x1C /* Bit[7:6] are reserved */ +#define HORIZONTAL_FRONT_PORCH_L 0x1D +#define HORIZONTAL_FRONT_PORCH_H 0x1E /* Bit[7:4] are reserved */ +#define HORIZONTAL_SYNC_WIDTH_L 0x1F +#define HORIZONTAL_SYNC_WIDTH_H 0x20 /* Bit[7:4] are reserved */ +#define HORIZONTAL_BACK_PORCH_L 0x21 +#define HORIZONTAL_BACK_PORCH_H 0x22 /* Bit[7:4] are reserved */ + +/******** END of I2C Address 0x72 *********/ +/***************************************************************/ +/* Register definition of device address 0x7e */ + +#define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E + +#define FLASH_LOAD_STA 0x05 +#define FLASH_LOAD_STA_CHK BIT(7) + +#define XTAL_FRQ_SEL 0x3F +/* bit field positions */ +#define XTAL_FRQ_SEL_POS 5 +/* bit field values */ +#define XTAL_FRQ_19M2 (0 << XTAL_FRQ_SEL_POS) +#define XTAL_FRQ_27M (4 << XTAL_FRQ_SEL_POS) + +#define R_DSC_CTRL_0 0x40 +#define READ_STATUS_EN 7 +#define CLK_1MEG_RB 6 /* 1MHz clock reset; 0=reset, 0=reset release */ +#define DSC_BIST_DONE 1 /* Bit[5:1]: 1=DSC MBIST pass */ +#define DSC_EN 0x01 /* 1=DSC enabled, 0=DSC disabled */ + +#define OCM_FW_VERSION 0x31 +#define OCM_FW_REVERSION 0x32 + +#define AP_AUX_ADDR_7_0 0x11 +#define AP_AUX_ADDR_15_8 0x12 +#define AP_AUX_ADDR_19_16 0x13 + +/* Bit[0:3] AUX status, bit 4 op_en, bit 5 address only */ +#define AP_AUX_CTRL_STATUS 0x14 +#define AP_AUX_CTRL_OP_EN 0x10 +#define AP_AUX_CTRL_ADDRONLY 0x20 + +#define AP_AUX_BUFF_START 0x15 +#define PIXEL_CLOCK_L 0x25 +#define PIXEL_CLOCK_H 0x26 + +#define AP_AUX_COMMAND 0x27 /* com+len */ +/* Bit 0&1: 3D video structure */ +/* 0x01: frame packing, 0x02:Line alternative, 0x03:Side-by-side(full) */ +#define AP_AV_STATUS 0x28 +#define AP_VIDEO_CHG BIT(2) +#define AP_AUDIO_CHG BIT(3) +#define AP_MIPI_MUTE BIT(4) /* 1:MIPI input mute, 0: ummute */ +#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */ +#define AP_DISABLE_PD BIT(6) +#define AP_DISABLE_DISPLAY BIT(7) +/***************************************************************/ +/* Register definition of device address 0x84 */ +#define MIPI_PHY_CONTROL_3 0x03 +#define MIPI_HS_PWD_CLK 7 +#define MIPI_HS_RT_CLK 6 +#define MIPI_PD_CLK 5 +#define MIPI_CLK_RT_MANUAL_PD_EN 4 +#define MIPI_CLK_HS_MANUAL_PD_EN 3 +#define MIPI_CLK_DET_DET_BYPASS 2 +#define MIPI_CLK_MISS_CTRL 1 +#define MIPI_PD_LPTX_CH_MANUAL_PD_EN 0 + +#define MIPI_LANE_CTRL_0 0x05 +#define MIPI_TIME_HS_PRPR 0x08 + +/* + * After MIPI RX protocol layer received video frames, + * Protocol layer starts to reconstruct video stream from PHY + */ +#define MIPI_VIDEO_STABLE_CNT 0x0A + +#define MIPI_LANE_CTRL_10 0x0F +#define MIPI_DIGITAL_ADJ_1 0x1B + +#define MIPI_PLL_M_NUM_23_16 0x1E +#define MIPI_PLL_M_NUM_15_8 0x1F +#define MIPI_PLL_M_NUM_7_0 0x20 +#define MIPI_PLL_N_NUM_23_16 0x21 +#define MIPI_PLL_N_NUM_15_8 0x22 +#define MIPI_PLL_N_NUM_7_0 0x23 + +#define MIPI_DIGITAL_PLL_6 0x2A +/* Bit[7:6]: VCO band control, only effective */ +#define MIPI_M_NUM_READY 0x10 +#define MIPI_N_NUM_READY 0x08 +#define STABLE_INTEGER_CNT_EN 0x04 +#define MIPI_PLL_TEST_BIT 0 +/* Bit[1:0]: test point output select - */ +/* 00: VCO power, 01: dvdd_pdt, 10: dvdd, 11: vcox */ + +#define MIPI_DIGITAL_PLL_7 0x2B +#define MIPI_PLL_FORCE_N_EN 7 +#define MIPI_PLL_FORCE_BAND_EN 6 + +#define MIPI_PLL_VCO_TUNE_REG 4 +/* Bit[5:4]: VCO metal capacitance - */ +/* 00: +20% fast, 01: +10% fast (default), 10: typical, 11: -10% slow */ +#define MIPI_PLL_VCO_TUNE_REG_VAL 0x30 + +#define MIPI_PLL_PLL_LDO_BIT 2 +/* Bit[3:2]: vco_v2i power - */ +/* 00: 1.40V, 01: 1.45V (default), 10: 1.50V, 11: 1.55V */ +#define MIPI_PLL_RESET_N 0x02 +#define MIPI_FRQ_FORCE_NDET 0 + +#define MIPI_ALERT_CLR_0 0x2D +#define HS_link_error_clear 7 +/* This bit itself is S/C, and it clears 0x84:0x31[7] */ + +#define MIPI_ALERT_OUT_0 0x31 +#define check_sum_err_hs_sync 7 +/* This bit is cleared by 0x84:0x2D[7] */ + +#define MIPI_DIGITAL_PLL_8 0x33 +#define MIPI_POST_DIV_VAL 4 +/* N means divided by (n+1), n = 0~15 */ +#define MIPI_EN_LOCK_FRZ 3 +#define MIPI_FRQ_COUNTER_RST 2 +#define MIPI_FRQ_SET_REG_8 1 +/* Bit 0 is reserved */ + +#define MIPI_DIGITAL_PLL_9 0x34 + +#define MIPI_DIGITAL_PLL_16 0x3B +#define MIPI_FRQ_FREEZE_NDET 7 +#define MIPI_FRQ_REG_SET_ENABLE 6 +#define MIPI_REG_FORCE_SEL_EN 5 +#define MIPI_REG_SEL_DIV_REG 4 +#define MIPI_REG_FORCE_PRE_DIV_EN 3 +/* Bit 2 is reserved */ +#define MIPI_FREF_D_IND 1 +#define REF_CLK_27000KHZ 1 +#define REF_CLK_19200KHZ 0 +#define MIPI_REG_PLL_PLL_TEST_ENABLE 0 + +#define MIPI_DIGITAL_PLL_18 0x3D +#define FRQ_COUNT_RB_SEL 7 +#define REG_FORCE_POST_DIV_EN 6 +#define MIPI_DPI_SELECT 5 +#define SELECT_DSI 1 +#define SELECT_DPI 0 +#define REG_BAUD_DIV_RATIO 0 + +#define H_BLANK_L 0x3E +/* For DSC only */ +#define H_BLANK_H 0x3F +/* For DSC only; note: bit[7:6] are reserved */ +#define MIPI_SWAP 0x4A +#define MIPI_SWAP_CH0 7 +#define MIPI_SWAP_CH1 6 +#define MIPI_SWAP_CH2 5 +#define MIPI_SWAP_CH3 4 +#define MIPI_SWAP_CLK 3 +/* Bit[2:0] are reserved */ + +/******** END of I2C Address 0x84 *********/ + +/* DPCD regs */ +#define DPCD_DPCD_REV 0x00 +#define DPCD_MAX_LINK_RATE 0x01 +#define DPCD_MAX_LANE_COUNT 0x02 + +/********* ANX7625 Register End **********/ + +/***************** Display *****************/ +enum audio_fs { + AUDIO_FS_441K = 0x00, + AUDIO_FS_48K = 0x02, + AUDIO_FS_32K = 0x03, + AUDIO_FS_882K = 0x08, + AUDIO_FS_96K = 0x0a, + AUDIO_FS_1764K = 0x0c, + AUDIO_FS_192K = 0x0e +}; + +enum audio_wd_len { + AUDIO_W_LEN_16_20MAX = 0x02, + AUDIO_W_LEN_18_20MAX = 0x04, + AUDIO_W_LEN_17_20MAX = 0x0c, + AUDIO_W_LEN_19_20MAX = 0x08, + AUDIO_W_LEN_20_20MAX = 0x0a, + AUDIO_W_LEN_20_24MAX = 0x03, + AUDIO_W_LEN_22_24MAX = 0x05, + AUDIO_W_LEN_21_24MAX = 0x0d, + AUDIO_W_LEN_23_24MAX = 0x09, + AUDIO_W_LEN_24_24MAX = 0x0b +}; + +#define I2S_CH_2 0x01 +#define TDM_CH_4 0x03 +#define TDM_CH_6 0x05 +#define TDM_CH_8 0x07 + +#define MAX_DPCD_BUFFER_SIZE 16 + +#define ONE_BLOCK_SIZE 128 +#define FOUR_BLOCK_SIZE (128 * 4) + +#define MAX_EDID_BLOCK 3 +#define EDID_TRY_CNT 3 +#define SUPPORT_PIXEL_CLOCK 300000 + +struct s_edid_data { + int edid_block_num; + u8 edid_raw_data[FOUR_BLOCK_SIZE]; +}; + +/***************** Display End *****************/ + +struct anx7625_platform_data { + struct gpio_desc *gpio_p_on; + struct gpio_desc *gpio_reset; + struct drm_bridge *panel_bridge; + int intp_irq; + u32 low_power_mode; + struct device_node *mipi_host_node; +}; + +struct anx7625_i2c_client { + struct i2c_client *tx_p0_client; + struct i2c_client *tx_p1_client; + struct i2c_client *tx_p2_client; + struct i2c_client *rx_p0_client; + struct i2c_client *rx_p1_client; + struct i2c_client *rx_p2_client; + struct i2c_client *tcpc_client; +}; + +struct anx7625_data { + struct anx7625_platform_data pdata; + atomic_t power_status; + int hpd_status; + int hpd_high_cnt; + /* Lock for work queue */ + struct mutex lock; + struct i2c_client *client; + struct anx7625_i2c_client i2c; + struct i2c_client *last_client; + struct s_edid_data slimport_edid_p; + struct work_struct work; + struct workqueue_struct *workqueue; + char edid_block; + struct display_timing dt; + u8 display_timing_valid; + struct drm_bridge bridge; + u8 bridge_attached; + struct mipi_dsi_device *dsi; +}; + +#endif /* __ANX7625_H__ */ diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig new file mode 100644 index 000000000000..511d67b16d14 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/Kconfig @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_CDNS_MHDP8546 + tristate "Cadence DPI/DP bridge" + select DRM_KMS_HELPER + select DRM_PANEL_BRIDGE + depends on OF + help + Support Cadence DPI to DP bridge. This is an internal + bridge and is meant to be directly embedded in a SoC. + It takes a DPI stream as input and outputs it encoded + in DP format. + +if DRM_CDNS_MHDP8546 + +config DRM_CDNS_MHDP8546_J721E + depends on ARCH_K3_J721E_SOC || COMPILE_TEST + bool "J721E Cadence DPI/DP wrapper support" + default y + help + Support J721E Cadence DPI/DP wrapper. This is a wrapper + which adds support for J721E related platform ops. It + initializes the J721E Display Port and sets up the + clock and data muxes. +endif diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile new file mode 100644 index 000000000000..8f647991b374 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o +cdns-mhdp8546-y := cdns-mhdp8546-core.o +cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c new file mode 100644 index 000000000000..d0c65610ebb5 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -0,0 +1,2532 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence MHDP8546 DP bridge driver. + * + * Copyright (C) 2020 Cadence Design Systems, Inc. + * + * Authors: Quentin Schulz <quentin.schulz@free-electrons.com> + * Swapnil Jakhade <sjakhade@cadence.com> + * Yuti Amonkar <yamonkar@cadence.com> + * Tomi Valkeinen <tomi.valkeinen@ti.com> + * Jyri Sarha <jsarha@ti.com> + * + * TODO: + * - Implement optimized mailbox communication using mailbox interrupts + * - Add support for power management + * - Add support for features like audio, MST and fast link training + * - Implement request_fw_cancel to handle HW_STATE + * - Fix asynchronous loading of firmware implementation + * - Add DRM helper function for cdns_mhdp_lower_link_rate + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/firmware.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/wait.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_state_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include <asm/unaligned.h> + +#include "cdns-mhdp8546-core.h" + +#include "cdns-mhdp8546-j721e.h" + +static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp) +{ + int ret, empty; + + WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); + + ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY, + empty, !empty, MAILBOX_RETRY_US, + MAILBOX_TIMEOUT_US); + if (ret < 0) + return ret; + + return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff; +} + +static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val) +{ + int ret, full; + + WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); + + ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL, + full, !full, MAILBOX_RETRY_US, + MAILBOX_TIMEOUT_US); + if (ret < 0) + return ret; + + writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA); + + return 0; +} + +static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp, + u8 module_id, u8 opcode, + u16 req_size) +{ + u32 mbox_size, i; + u8 header[4]; + int ret; + + /* read the header of the message */ + for (i = 0; i < sizeof(header); i++) { + ret = cdns_mhdp_mailbox_read(mhdp); + if (ret < 0) + return ret; + + header[i] = ret; + } + + mbox_size = get_unaligned_be16(header + 2); + + if (opcode != header[0] || module_id != header[1] || + req_size != mbox_size) { + /* + * If the message in mailbox is not what we want, we need to + * clear the mailbox by reading its contents. + */ + for (i = 0; i < mbox_size; i++) + if (cdns_mhdp_mailbox_read(mhdp) < 0) + break; + + return -EINVAL; + } + + return 0; +} + +static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp, + u8 *buff, u16 buff_size) +{ + u32 i; + int ret; + + for (i = 0; i < buff_size; i++) { + ret = cdns_mhdp_mailbox_read(mhdp); + if (ret < 0) + return ret; + + buff[i] = ret; + } + + return 0; +} + +static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id, + u8 opcode, u16 size, u8 *message) +{ + u8 header[4]; + int ret, i; + + header[0] = opcode; + header[1] = module_id; + put_unaligned_be16(size, header + 2); + + for (i = 0; i < sizeof(header); i++) { + ret = cdns_mhdp_mailbox_write(mhdp, header[i]); + if (ret) + return ret; + } + + for (i = 0; i < size; i++) { + ret = cdns_mhdp_mailbox_write(mhdp, message[i]); + if (ret) + return ret; + } + + return 0; +} + +static +int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value) +{ + u8 msg[4], resp[8]; + int ret; + + put_unaligned_be32(addr, msg); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL, + GENERAL_REGISTER_READ, + sizeof(msg), msg); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL, + GENERAL_REGISTER_READ, + sizeof(resp)); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp)); + if (ret) + goto out; + + /* Returned address value should be the same as requested */ + if (memcmp(msg, resp, sizeof(msg))) { + ret = -EINVAL; + goto out; + } + + *value = get_unaligned_be32(resp + 4); + +out: + mutex_unlock(&mhdp->mbox_mutex); + if (ret) { + dev_err(mhdp->dev, "Failed to read register\n"); + *value = 0; + } + + return ret; +} + +static +int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val) +{ + u8 msg[6]; + int ret; + + put_unaligned_be16(addr, msg); + put_unaligned_be32(val, msg + 2); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_REGISTER, sizeof(msg), msg); + + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr, + u8 start_bit, u8 bits_no, u32 val) +{ + u8 field[8]; + int ret; + + put_unaligned_be16(addr, field); + field[2] = start_bit; + field[3] = bits_no; + put_unaligned_be32(val, field + 4); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_FIELD, sizeof(field), field); + + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp, + u32 addr, u8 *data, u16 len) +{ + u8 msg[5], reg[5]; + int ret; + + put_unaligned_be16(len, msg); + put_unaligned_be24(addr, msg + 2); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_DPCD, sizeof(msg), msg); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_DPCD, + sizeof(reg) + len); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len); + +out: + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value) +{ + u8 msg[6], reg[5]; + int ret; + + put_unaligned_be16(1, msg); + put_unaligned_be24(addr, msg + 2); + msg[5] = value; + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_DPCD, sizeof(msg), msg); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_DPCD, sizeof(reg)); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); + if (ret) + goto out; + + if (addr != get_unaligned_be24(reg + 2)) + ret = -EINVAL; + +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret) + dev_err(mhdp->dev, "dpcd write failed: %d\n", ret); + return ret; +} + +static +int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable) +{ + u8 msg[5]; + int ret, i; + + msg[0] = GENERAL_MAIN_CONTROL; + msg[1] = MB_MODULE_ID_GENERAL; + msg[2] = 0; + msg[3] = 1; + msg[4] = enable ? FW_ACTIVE : FW_STANDBY; + + mutex_lock(&mhdp->mbox_mutex); + + for (i = 0; i < sizeof(msg); i++) { + ret = cdns_mhdp_mailbox_write(mhdp, msg[i]); + if (ret) + goto out; + } + + /* read the firmware state */ + ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg)); + if (ret) + goto out; + + ret = 0; + +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret < 0) + dev_err(mhdp->dev, "set firmware active failed\n"); + return ret; +} + +static +int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp) +{ + u8 status; + int ret; + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_HPD_STATE, 0, NULL); + if (ret) + goto err_get_hpd; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_HPD_STATE, + sizeof(status)); + if (ret) + goto err_get_hpd; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status)); + if (ret) + goto err_get_hpd; + + mutex_unlock(&mhdp->mbox_mutex); + + dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__, + status ? "" : "un"); + + return status; + +err_get_hpd: + mutex_unlock(&mhdp->mbox_mutex); + + return ret; +} + +static +int cdns_mhdp_get_edid_block(void *data, u8 *edid, + unsigned int block, size_t length) +{ + struct cdns_mhdp_device *mhdp = data; + u8 msg[2], reg[2], i; + int ret; + + mutex_lock(&mhdp->mbox_mutex); + + for (i = 0; i < 4; i++) { + msg[0] = block / 2; + msg[1] = block % 2; + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_GET_EDID, sizeof(msg), msg); + if (ret) + continue; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_GET_EDID, + sizeof(reg) + length); + if (ret) + continue; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); + if (ret) + continue; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length); + if (ret) + continue; + + if (reg[0] == length && reg[1] == block / 2) + break; + } + + mutex_unlock(&mhdp->mbox_mutex); + + if (ret) + dev_err(mhdp->dev, "get block[%d] edid failed: %d\n", + block, ret); + + return ret; +} + +static +int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp) +{ + u8 event = 0; + int ret; + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_EVENT, 0, NULL); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_EVENT, sizeof(event)); + if (ret < 0) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event)); +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret < 0) + return ret; + + dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__, + (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "", + (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "", + (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "", + (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : ""); + + return event; +} + +static +int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes, + unsigned int udelay, const u8 *lanes_data, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 payload[7]; + u8 hdr[5]; /* For DPCD read response header */ + u32 addr; + int ret; + + if (nlanes != 4 && nlanes != 2 && nlanes != 1) { + dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes); + ret = -EINVAL; + goto out; + } + + payload[0] = nlanes; + put_unaligned_be16(udelay, payload + 1); + memcpy(payload + 3, lanes_data, nlanes); + + mutex_lock(&mhdp->mbox_mutex); + + ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, + DPTX_ADJUST_LT, + sizeof(payload), payload); + if (ret) + goto out; + + /* Yes, read the DPCD read command response */ + ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, + DPTX_READ_DPCD, + sizeof(hdr) + DP_LINK_STATUS_SIZE); + if (ret) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr)); + if (ret) + goto out; + + addr = get_unaligned_be24(hdr + 2); + if (addr != DP_LANE0_1_STATUS) + goto out; + + ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status, + DP_LINK_STATUS_SIZE); + +out: + mutex_unlock(&mhdp->mbox_mutex); + + if (ret) + dev_err(mhdp->dev, "Failed to adjust Link Training.\n"); + + return ret; +} + +/** + * cdns_mhdp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +static +int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} + +/** + * cdns_mhdp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +static +int cdns_mhdp_link_power_down(struct drm_dp_aux *aux, + struct cdns_mhdp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} + +/** + * cdns_mhdp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +static +int cdns_mhdp_link_configure(struct drm_dp_aux *aux, + struct cdns_mhdp_link *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} + +static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp) +{ + return min(mhdp->host.link_rate, mhdp->sink.link_rate); +} + +static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp) +{ + return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt); +} + +static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp) +{ + return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp); +} + +static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp) +{ + /* Check if SSC is supported by both sides */ + return mhdp->host.ssc && mhdp->sink.ssc; +} + +static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp) +{ + dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged); + + if (mhdp->plugged) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp) +{ + u32 major_num, minor_num, revision; + u32 fw_ver, lib_ver; + + fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8) + | readl(mhdp->regs + CDNS_VER_L); + + lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8) + | readl(mhdp->regs + CDNS_LIB_L_ADDR); + + if (lib_ver < 33984) { + /* + * Older FW versions with major number 1, used to store FW + * version information by storing repository revision number + * in registers. This is for identifying these FW versions. + */ + major_num = 1; + minor_num = 2; + if (fw_ver == 26098) { + revision = 15; + } else if (lib_ver == 0 && fw_ver == 0) { + revision = 17; + } else { + dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n", + fw_ver, lib_ver); + return -ENODEV; + } + } else { + /* To identify newer FW versions with major number 2 onwards. */ + major_num = fw_ver / 10000; + minor_num = (fw_ver / 100) % 100; + revision = (fw_ver % 10000) % 100; + } + + dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num, + revision); + return 0; +} + +static int cdns_mhdp_fw_activate(const struct firmware *fw, + struct cdns_mhdp_device *mhdp) +{ + unsigned int reg; + int ret; + + /* Release uCPU reset and stall it. */ + writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL); + + memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size); + + /* Leave debug mode, release stall */ + writel(0, mhdp->regs + CDNS_APB_CTRL); + + /* + * Wait for the KEEP_ALIVE "message" on the first 8 bits. + * Updated each sched "tick" (~2ms) + */ + ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg, + reg & CDNS_KEEP_ALIVE_MASK, 500, + CDNS_KEEP_ALIVE_TIMEOUT); + if (ret) { + dev_err(mhdp->dev, + "device didn't give any life sign: reg %d\n", reg); + return ret; + } + + ret = cdns_mhdp_check_fw_version(mhdp); + if (ret) + return ret; + + /* Init events to 0 as it's not cleared by FW at boot but on read */ + readl(mhdp->regs + CDNS_SW_EVENT0); + readl(mhdp->regs + CDNS_SW_EVENT1); + readl(mhdp->regs + CDNS_SW_EVENT2); + readl(mhdp->regs + CDNS_SW_EVENT3); + + /* Activate uCPU */ + ret = cdns_mhdp_set_firmware_active(mhdp, true); + if (ret) + return ret; + + spin_lock(&mhdp->start_lock); + + mhdp->hw_state = MHDP_HW_READY; + + /* + * Here we must keep the lock while enabling the interrupts + * since it would otherwise be possible that interrupt enable + * code is executed after the bridge is detached. The similar + * situation is not possible in attach()/detach() callbacks + * since the hw_state changes from MHDP_HW_READY to + * MHDP_HW_STOPPED happens only due to driver removal when + * bridge should already be detached. + */ + if (mhdp->bridge_attached) + writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); + + spin_unlock(&mhdp->start_lock); + + wake_up(&mhdp->fw_load_wq); + dev_dbg(mhdp->dev, "DP FW activated\n"); + + return 0; +} + +static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context) +{ + struct cdns_mhdp_device *mhdp = context; + bool bridge_attached; + int ret; + + dev_dbg(mhdp->dev, "firmware callback\n"); + + if (!fw || !fw->data) { + dev_err(mhdp->dev, "%s: No firmware.\n", __func__); + return; + } + + ret = cdns_mhdp_fw_activate(fw, mhdp); + + release_firmware(fw); + + if (ret) + return; + + /* + * XXX how to make sure the bridge is still attached when + * calling drm_kms_helper_hotplug_event() after releasing + * the lock? We should not hold the spin lock when + * calling drm_kms_helper_hotplug_event() since it may + * cause a dead lock. FB-dev console calls detect from the + * same thread just down the call stack started here. + */ + spin_lock(&mhdp->start_lock); + bridge_attached = mhdp->bridge_attached; + spin_unlock(&mhdp->start_lock); + if (bridge_attached) { + if (mhdp->connector.dev) + drm_kms_helper_hotplug_event(mhdp->bridge.dev); + else + drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); + } +} + +static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp) +{ + int ret; + + ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev, + GFP_KERNEL, mhdp, cdns_mhdp_fw_cb); + if (ret) { + dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n", + FW_NAME, ret); + return ret; + } + + return 0; +} + +static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev); + int ret; + + if (msg->request != DP_AUX_NATIVE_WRITE && + msg->request != DP_AUX_NATIVE_READ) + return -EOPNOTSUPP; + + if (msg->request == DP_AUX_NATIVE_WRITE) { + const u8 *buf = msg->buffer; + unsigned int i; + + for (i = 0; i < msg->size; ++i) { + ret = cdns_mhdp_dpcd_write(mhdp, + msg->address + i, buf[i]); + if (!ret) + continue; + + dev_err(mhdp->dev, + "Failed to write DPCD addr %u\n", + msg->address + i); + + return ret; + } + } else { + ret = cdns_mhdp_dpcd_read(mhdp, msg->address, + msg->buffer, msg->size); + if (ret) { + dev_err(mhdp->dev, + "Failed to read DPCD addr %u\n", + msg->address); + + return ret; + } + } + + return msg->size; +} + +static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp) +{ + union phy_configure_opts phy_cfg; + u32 reg32; + int ret; + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + + /* Reset PHY configuration */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); + if (!mhdp->host.scrambler) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD, + mhdp->sink.enhanced & mhdp->host.enhanced); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN, + CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes)); + + cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link); + phy_cfg.dp.link_rate = mhdp->link.rate / 100; + phy_cfg.dp.lanes = mhdp->link.num_lanes; + + memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage)); + memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre)); + + phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); + phy_cfg.dp.set_lanes = true; + phy_cfg.dp.set_rate = true; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(mhdp->phy, &phy_cfg); + if (ret) { + dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + return ret; + } + + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, + CDNS_PHY_COMMON_CONFIG | + CDNS_PHY_TRAINING_EN | + CDNS_PHY_TRAINING_TYPE(1) | + CDNS_PHY_SCRAMBLER_BYPASS); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE); + + return 0; +} + +static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp, + u8 link_status[DP_LINK_STATUS_SIZE], + u8 lanes_data[CDNS_DP_MAX_NUM_LANES], + union phy_configure_opts *phy_cfg) +{ + u8 adjust, max_pre_emph, max_volt_swing; + u8 set_volt, set_pre; + unsigned int i; + + max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis) + << DP_TRAIN_PRE_EMPHASIS_SHIFT; + max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing); + + for (i = 0; i < mhdp->link.num_lanes; i++) { + /* Check if Voltage swing and pre-emphasis are within limits */ + adjust = drm_dp_get_adjust_request_voltage(link_status, i); + set_volt = min(adjust, max_volt_swing); + + adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + set_pre = min(adjust, max_pre_emph) + >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + + /* + * Voltage swing level and pre-emphasis level combination is + * not allowed: leaving pre-emphasis as-is, and adjusting + * voltage swing. + */ + if (set_volt + set_pre > 3) + set_volt = 3 - set_pre; + + phy_cfg->dp.voltage[i] = set_volt; + lanes_data[i] = set_volt; + + if (set_volt == max_volt_swing) + lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED; + + phy_cfg->dp.pre[i] = set_pre; + lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT); + + if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) + lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } +} + +static +void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], + unsigned int lane, u8 volt) +{ + unsigned int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); + + link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s); + link_status[idx] |= volt << s; +} + +static +void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], + unsigned int lane, u8 pre_emphasis) +{ + unsigned int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); + + link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s); + link_status[idx] |= pre_emphasis << s; +} + +static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); + u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); + unsigned int i; + u8 volt, pre; + + for (i = 0; i < mhdp->link.num_lanes; i++) { + volt = drm_dp_get_adjust_request_voltage(link_status, i); + pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + if (volt + pre > 3) + cdns_mhdp_set_adjust_request_voltage(link_status, i, + 3 - pre); + if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING) + cdns_mhdp_set_adjust_request_voltage(link_status, i, + max_volt); + if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS) + cdns_mhdp_set_adjust_request_pre_emphasis(link_status, + i, max_pre); + } +} + +static void cdns_mhdp_print_lt_status(const char *prefix, + struct cdns_mhdp_device *mhdp, + union phy_configure_opts *phy_cfg) +{ + char vs[8] = "0/0/0/0"; + char pe[8] = "0/0/0/0"; + unsigned int i; + + for (i = 0; i < mhdp->link.num_lanes; i++) { + vs[i * 2] = '0' + phy_cfg->dp.voltage[i]; + pe[i * 2] = '0' + phy_cfg->dp.pre[i]; + } + + vs[i * 2 - 1] = '\0'; + pe[i * 2 - 1] = '\0'; + + dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n", + prefix, + mhdp->link.num_lanes, mhdp->link.rate / 100, + vs, pe); +} + +static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp, + u8 eq_tps, + unsigned int training_interval) +{ + u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0; + u8 link_status[DP_LINK_STATUS_SIZE]; + union phy_configure_opts phy_cfg; + u32 reg32; + int ret; + bool r; + + dev_dbg(mhdp->dev, "Starting EQ phase\n"); + + /* Enable link training TPS[eq_tps] in PHY */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN | + CDNS_PHY_TRAINING_TYPE(eq_tps); + if (eq_tps != 4) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE : + CDNS_DP_TRAINING_PATTERN_4); + + drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); + + do { + cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, + &phy_cfg); + phy_cfg.dp.lanes = mhdp->link.num_lanes; + phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(mhdp->phy, &phy_cfg); + if (ret) { + dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + goto err; + } + + cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, + training_interval, lanes_data, link_status); + + r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes); + if (!r) + goto err; + + if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) { + cdns_mhdp_print_lt_status("EQ phase ok", mhdp, + &phy_cfg); + return true; + } + + fail_counter_short++; + + cdns_mhdp_adjust_requested_eq(mhdp, link_status); + } while (fail_counter_short < 5); + +err: + cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg); + + return false; +} + +static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp, + u8 link_status[DP_LINK_STATUS_SIZE], + u8 *req_volt, u8 *req_pre) +{ + const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); + const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); + unsigned int i; + + for (i = 0; i < mhdp->link.num_lanes; i++) { + u8 val; + + val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ? + max_volt : req_volt[i]; + cdns_mhdp_set_adjust_request_voltage(link_status, i, val); + + val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ? + max_pre : req_pre[i]; + cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val); + } +} + +static +void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done, + bool *same_before_adjust, bool *max_swing_reached, + u8 before_cr[CDNS_DP_MAX_NUM_LANES], + u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt, + u8 *req_pre) +{ + const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); + const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); + bool same_pre, same_volt; + unsigned int i; + u8 adjust; + + *same_before_adjust = false; + *max_swing_reached = false; + *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes); + + for (i = 0; i < mhdp->link.num_lanes; i++) { + adjust = drm_dp_get_adjust_request_voltage(after_cr, i); + req_volt[i] = min(adjust, max_volt); + + adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + req_pre[i] = min(adjust, max_pre); + + same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) == + req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT; + same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) == + req_volt[i]; + if (same_pre && same_volt) + *same_before_adjust = true; + + /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */ + if (!*cr_done && req_volt[i] + req_pre[i] >= 3) { + *max_swing_reached = true; + return; + } + } +} + +static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp) +{ + u8 lanes_data[CDNS_DP_MAX_NUM_LANES], + fail_counter_short = 0, fail_counter_cr_long = 0; + u8 link_status[DP_LINK_STATUS_SIZE]; + bool cr_done; + union phy_configure_opts phy_cfg; + int ret; + + dev_dbg(mhdp->dev, "Starting CR phase\n"); + + ret = cdns_mhdp_link_training_init(mhdp); + if (ret) + goto err; + + drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); + + do { + u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {}; + u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {}; + bool same_before_adjust, max_swing_reached; + + cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, + &phy_cfg); + phy_cfg.dp.lanes = mhdp->link.num_lanes; + phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = true; + ret = phy_configure(mhdp->phy, &phy_cfg); + if (ret) { + dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", + __func__, ret); + goto err; + } + + cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100, + lanes_data, link_status); + + cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust, + &max_swing_reached, lanes_data, + link_status, + requested_adjust_volt_swing, + requested_adjust_pre_emphasis); + + if (max_swing_reached) { + dev_err(mhdp->dev, "CR: max swing reached\n"); + goto err; + } + + if (cr_done) { + cdns_mhdp_print_lt_status("CR phase ok", mhdp, + &phy_cfg); + return true; + } + + /* Not all CR_DONE bits set */ + fail_counter_cr_long++; + + if (same_before_adjust) { + fail_counter_short++; + continue; + } + + fail_counter_short = 0; + /* + * Voltage swing/pre-emphasis adjust requested + * during CR phase + */ + cdns_mhdp_adjust_requested_cr(mhdp, link_status, + requested_adjust_volt_swing, + requested_adjust_pre_emphasis); + } while (fail_counter_short < 5 && fail_counter_cr_long < 10); + +err: + cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg); + + return false; +} + +static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link) +{ + switch (drm_dp_link_rate_to_bw_code(link->rate)) { + case DP_LINK_BW_2_7: + link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62); + break; + case DP_LINK_BW_5_4: + link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7); + break; + case DP_LINK_BW_8_1: + link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4); + break; + } +} + +static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp, + unsigned int training_interval) +{ + u32 reg32; + const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp); + int ret; + + while (1) { + if (!cdns_mhdp_link_training_cr(mhdp)) { + if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != + DP_LINK_BW_1_62) { + dev_dbg(mhdp->dev, + "Reducing link rate during CR phase\n"); + cdns_mhdp_lower_link_rate(&mhdp->link); + + continue; + } else if (mhdp->link.num_lanes > 1) { + dev_dbg(mhdp->dev, + "Reducing lanes number during CR phase\n"); + mhdp->link.num_lanes >>= 1; + mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); + + continue; + } + + dev_err(mhdp->dev, + "Link training failed during CR phase\n"); + goto err; + } + + if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps, + training_interval)) + break; + + if (mhdp->link.num_lanes > 1) { + dev_dbg(mhdp->dev, + "Reducing lanes number during EQ phase\n"); + mhdp->link.num_lanes >>= 1; + + continue; + } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != + DP_LINK_BW_1_62) { + dev_dbg(mhdp->dev, + "Reducing link rate during EQ phase\n"); + cdns_mhdp_lower_link_rate(&mhdp->link); + mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); + + continue; + } + + dev_err(mhdp->dev, "Link training failed during EQ phase\n"); + goto err; + } + + dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n", + mhdp->link.num_lanes, mhdp->link.rate / 100); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + mhdp->host.scrambler ? 0 : + DP_LINK_SCRAMBLING_DISABLE); + + ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32); + if (ret < 0) { + dev_err(mhdp->dev, + "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", + ret); + return ret; + } + reg32 &= ~GENMASK(1, 0); + reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes); + reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC; + reg32 |= CDNS_DP_FRAMER_EN; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32); + + /* Reset PHY config */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); + if (!mhdp->host.scrambler) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + return 0; +err: + /* Reset PHY config */ + reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); + if (!mhdp->host.scrambler) + reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); + + drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + + return -EIO; +} + +static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp, + u32 interval) +{ + if (interval == 0) + return 400; + if (interval < 5) + return 4000 << (interval - 1); + dev_err(mhdp->dev, + "wrong training interval returned by DPCD: %d\n", interval); + return 0; +} + +static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp) +{ + unsigned int link_rate; + + /* Get source capabilities based on PHY attributes */ + + mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width; + if (!mhdp->host.lanes_cnt) + mhdp->host.lanes_cnt = 4; + + link_rate = mhdp->phy->attrs.max_link_rate; + if (!link_rate) + link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1); + else + /* PHY uses Mb/s, DRM uses tens of kb/s. */ + link_rate *= 100; + + mhdp->host.link_rate = link_rate; + mhdp->host.volt_swing = CDNS_VOLT_SWING(3); + mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3); + mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) | + CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) | + CDNS_SUPPORT_TPS(4); + mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL; + mhdp->host.fast_link = false; + mhdp->host.enhanced = true; + mhdp->host.scrambler = true; + mhdp->host.ssc = false; +} + +static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp, + u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + mhdp->sink.link_rate = mhdp->link.rate; + mhdp->sink.lanes_cnt = mhdp->link.num_lanes; + mhdp->sink.enhanced = !!(mhdp->link.capabilities & + DP_LINK_CAP_ENHANCED_FRAMING); + + /* Set SSC support */ + mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + + /* Set TPS support */ + mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2); + if (drm_dp_tps3_supported(dpcd)) + mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3); + if (drm_dp_tps4_supported(dpcd)) + mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4); + + /* Set fast link support */ + mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING); +} + +static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp) +{ + u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2]; + u32 resp, interval, interval_us; + u8 ext_cap_chk = 0; + unsigned int addr; + int err; + + WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); + + drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL, + &ext_cap_chk); + + if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT) + addr = DP_DP13_DPCD_REV; + else + addr = DP_DPCD_REV; + + err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE); + if (err < 0) { + dev_err(mhdp->dev, "Failed to read receiver capabilities\n"); + return err; + } + + mhdp->link.revision = dpcd[0]; + mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]); + mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK; + + if (dpcd[2] & DP_ENHANCED_FRAME_CAP) + mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n"); + cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link); + + cdns_mhdp_fill_sink_caps(mhdp, dpcd); + + mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); + mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); + + /* Disable framer for link training */ + err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); + if (err < 0) { + dev_err(mhdp->dev, + "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", + err); + return err; + } + + resp &= ~CDNS_DP_FRAMER_EN; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); + + /* Spread AMP if required, enable 8b/10b coding */ + amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0; + amp[1] = DP_SET_ANSI_8B10B; + drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2); + + if (mhdp->host.fast_link & mhdp->sink.fast_link) { + dev_err(mhdp->dev, "fastlink not supported\n"); + return -EOPNOTSUPP; + } + + interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK; + interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval); + if (!interval_us || + cdns_mhdp_link_training(mhdp, interval_us)) { + dev_err(mhdp->dev, "Link training failed. Exiting.\n"); + return -EIO; + } + + mhdp->link_up = true; + + return 0; +} + +static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp) +{ + WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); + + if (mhdp->plugged) + cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link); + + mhdp->link_up = false; +} + +static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp, + struct drm_connector *connector) +{ + if (!mhdp->plugged) + return NULL; + + return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp); +} + +static int cdns_mhdp_get_modes(struct drm_connector *connector) +{ + struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector); + struct edid *edid; + int num_modes; + + if (!mhdp->plugged) + return 0; + + edid = cdns_mhdp_get_edid(mhdp, connector); + if (!edid) { + dev_err(mhdp->dev, "Failed to read EDID\n"); + return 0; + } + + drm_connector_update_edid_property(connector, edid); + num_modes = drm_add_edid_modes(connector, edid); + kfree(edid); + + /* + * HACK: Warn about unsupported display formats until we deal + * with them correctly. + */ + if (connector->display_info.color_formats && + !(connector->display_info.color_formats & + mhdp->display_fmt.color_format)) + dev_warn(mhdp->dev, + "%s: No supported color_format found (0x%08x)\n", + __func__, connector->display_info.color_formats); + + if (connector->display_info.bpc && + connector->display_info.bpc < mhdp->display_fmt.bpc) + dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n", + __func__, connector->display_info.bpc, + mhdp->display_fmt.bpc); + + return num_modes; +} + +static int cdns_mhdp_connector_detect(struct drm_connector *conn, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); + + return cdns_mhdp_detect(mhdp); +} + +static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt) +{ + u32 bpp; + + if (fmt->y_only) + return fmt->bpc; + + switch (fmt->color_format) { + case DRM_COLOR_FORMAT_RGB444: + case DRM_COLOR_FORMAT_YCRCB444: + bpp = fmt->bpc * 3; + break; + case DRM_COLOR_FORMAT_YCRCB422: + bpp = fmt->bpc * 2; + break; + case DRM_COLOR_FORMAT_YCRCB420: + bpp = fmt->bpc * 3 / 2; + break; + default: + bpp = fmt->bpc * 3; + WARN_ON(1); + } + return bpp; +} + +static +bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp, + const struct drm_display_mode *mode, + unsigned int lanes, unsigned int rate) +{ + u32 max_bw, req_bw, bpp; + + /* + * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8 + * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the + * value thus equals the bandwidth in 10kb/s units, which matches the + * units of the rate parameter. + */ + + bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); + req_bw = mode->clock * bpp / 8; + max_bw = lanes * rate; + if (req_bw > max_bw) { + dev_dbg(mhdp->dev, + "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n", + mode->name, req_bw, max_bw); + + return false; + } + + return true; +} + +static +enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn, + struct drm_display_mode *mode) +{ + struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); + + mutex_lock(&mhdp->link_mutex); + + if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + mutex_unlock(&mhdp->link_mutex); + return MODE_CLOCK_HIGH; + } + + mutex_unlock(&mhdp->link_mutex); + return MODE_OK; +} + +static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = { + .detect_ctx = cdns_mhdp_connector_detect, + .get_modes = cdns_mhdp_get_modes, + .mode_valid = cdns_mhdp_mode_valid, +}; + +static const struct drm_connector_funcs cdns_mhdp_conn_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .reset = drm_atomic_helper_connector_reset, + .destroy = drm_connector_cleanup, +}; + +static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp) +{ + u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36; + struct drm_connector *conn = &mhdp->connector; + struct drm_bridge *bridge = &mhdp->bridge; + int ret; + + if (!bridge->encoder) { + dev_err(mhdp->dev, "Parent encoder object not found"); + return -ENODEV; + } + + conn->polled = DRM_CONNECTOR_POLL_HPD; + + ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) { + dev_err(mhdp->dev, "Failed to initialize connector with drm\n"); + return ret; + } + + drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs); + + ret = drm_display_info_set_bus_formats(&conn->display_info, + &bus_format, 1); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(conn, bridge->encoder); + if (ret) { + dev_err(mhdp->dev, "Failed to attach connector to encoder\n"); + return ret; + } + + return 0; +} + +static int cdns_mhdp_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + bool hw_ready; + int ret; + + dev_dbg(mhdp->dev, "%s\n", __func__); + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + ret = cdns_mhdp_connector_init(mhdp); + if (ret) + return ret; + } + + spin_lock(&mhdp->start_lock); + + mhdp->bridge_attached = true; + hw_ready = mhdp->hw_state == MHDP_HW_READY; + + spin_unlock(&mhdp->start_lock); + + /* Enable SW event interrupts */ + if (hw_ready) + writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); + + return 0; +} + +static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp, + const struct drm_display_mode *mode) +{ + unsigned int dp_framer_sp = 0, msa_horizontal_1, + msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl, + misc0 = 0, misc1 = 0, pxl_repr, + front_porch, back_porch, msa_h0, msa_v0, hsync, vsync, + dp_vertical_1; + u8 stream_id = mhdp->stream_id; + u32 bpp, bpc, pxlfmt, framer; + int ret; + + pxlfmt = mhdp->display_fmt.color_format; + bpc = mhdp->display_fmt.bpc; + + /* + * If YCBCR supported and stream not SD, use ITU709 + * Need to handle ITU version with YCBCR420 when supported + */ + if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 || + pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720) + misc0 = DP_YCBCR_COEFFICIENTS_ITU709; + + bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); + + switch (pxlfmt) { + case DRM_COLOR_FORMAT_RGB444: + pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT; + misc0 |= DP_COLOR_FORMAT_RGB; + break; + case DRM_COLOR_FORMAT_YCRCB444: + pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT; + misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA; + break; + case DRM_COLOR_FORMAT_YCRCB422: + pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT; + misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA; + break; + case DRM_COLOR_FORMAT_YCRCB420: + pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT; + break; + default: + pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT; + } + + switch (bpc) { + case 6: + misc0 |= DP_TEST_BIT_DEPTH_6; + pxl_repr |= CDNS_DP_FRAMER_6_BPC; + break; + case 8: + misc0 |= DP_TEST_BIT_DEPTH_8; + pxl_repr |= CDNS_DP_FRAMER_8_BPC; + break; + case 10: + misc0 |= DP_TEST_BIT_DEPTH_10; + pxl_repr |= CDNS_DP_FRAMER_10_BPC; + break; + case 12: + misc0 |= DP_TEST_BIT_DEPTH_12; + pxl_repr |= CDNS_DP_FRAMER_12_BPC; + break; + case 16: + misc0 |= DP_TEST_BIT_DEPTH_16; + pxl_repr |= CDNS_DP_FRAMER_16_BPC; + break; + } + + bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT; + + cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id), + bnd_hsync2vsync); + + hsync2vsync_pol_ctrl = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id), + hsync2vsync_pol_ctrl); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp); + + front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay; + back_porch = mode->crtc_htotal - mode->crtc_hsync_end; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id), + CDNS_DP_FRONT_PORCH(front_porch) | + CDNS_DP_BACK_PORCH(back_porch)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id), + mode->crtc_hdisplay * bpp / 8); + + msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id), + CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) | + CDNS_DP_MSAH0_HSYNC_START(msa_h0)); + + hsync = mode->crtc_hsync_end - mode->crtc_hsync_start; + msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) | + CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay); + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id), + msa_horizontal_1); + + msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id), + CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) | + CDNS_DP_MSAV0_VSYNC_START(msa_v0)); + + vsync = mode->crtc_vsync_end - mode->crtc_vsync_start; + msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) | + CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay); + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW; + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id), + msa_vertical_1); + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && + mode->crtc_vtotal % 2 == 0) + misc1 = DP_TEST_INTERLACED; + if (mhdp->display_fmt.y_only) + misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY; + /* Use VSC SDP for Y420 */ + if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420) + misc1 = CDNS_DP_TEST_VSC_SDP; + + cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id), + misc0 | (misc1 << 8)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id), + CDNS_DP_H_HSYNC_WIDTH(hsync) | + CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id), + CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) | + CDNS_DP_V0_VSTART(msa_v0)); + + dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal); + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && + mode->crtc_vtotal % 2 == 0) + dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN; + + cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1); + + cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1, + (mode->flags & DRM_MODE_FLAG_INTERLACE) ? + CDNS_DP_VB_ID_INTERLACED : 0); + + ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer); + if (ret < 0) { + dev_err(mhdp->dev, + "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", + ret); + return; + } + framer |= CDNS_DP_FRAMER_EN; + framer &= ~CDNS_DP_NO_VIDEO_MODE; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer); +} + +static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp, + const struct drm_display_mode *mode) +{ + u32 rate, vs, required_bandwidth, available_bandwidth; + s32 line_thresh1, line_thresh2, line_thresh = 0; + int pxlclock = mode->crtc_clock; + u32 tu_size = 64; + u32 bpp; + + /* Get rate in MSymbols per second per lane */ + rate = mhdp->link.rate / 1000; + + bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); + + required_bandwidth = pxlclock * bpp / 8; + available_bandwidth = mhdp->link.num_lanes * rate; + + vs = tu_size * required_bandwidth / available_bandwidth; + vs /= 1000; + + if (vs == tu_size) + vs = tu_size - 1; + + line_thresh1 = ((vs + 1) << 5) * 8 / bpp; + line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5); + line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes; + line_thresh = (line_thresh >> 5) + 2; + + mhdp->stream_id = 0; + + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU, + CDNS_DP_FRAMER_TU_VS(vs) | + CDNS_DP_FRAMER_TU_SIZE(tu_size) | + CDNS_DP_FRAMER_TU_CNT_RST_EN); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0), + line_thresh & GENMASK(5, 0)); + + cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0), + CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ? + 0 : tu_size - vs)); + + cdns_mhdp_configure_video(mhdp, mode); +} + +static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + struct cdns_mhdp_bridge_state *mhdp_state; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_bridge_state *new_state; + const struct drm_display_mode *mode; + u32 resp; + int ret; + + dev_dbg(mhdp->dev, "bridge enable\n"); + + mutex_lock(&mhdp->link_mutex); + + if (mhdp->plugged && !mhdp->link_up) { + ret = cdns_mhdp_link_up(mhdp); + if (ret < 0) + goto out; + } + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable) + mhdp->info->ops->enable(mhdp); + + /* Enable VIF clock for stream 0 */ + ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); + if (ret < 0) { + dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret); + goto out; + } + + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, + resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN); + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + if (WARN_ON(!connector)) + goto out; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + goto out; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + goto out; + + mode = &crtc_state->adjusted_mode; + + new_state = drm_atomic_get_new_bridge_state(state, bridge); + if (WARN_ON(!new_state)) + goto out; + + if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + ret = -EINVAL; + goto out; + } + + cdns_mhdp_sst_enable(mhdp, mode); + + mhdp_state = to_cdns_mhdp_bridge_state(new_state); + + mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); + drm_mode_set_name(mhdp_state->current_mode); + + dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name); + + mhdp->bridge_enabled = true; + +out: + mutex_unlock(&mhdp->link_mutex); + if (ret < 0) + schedule_work(&mhdp->modeset_retry_work); +} + +static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + u32 resp; + + dev_dbg(mhdp->dev, "%s\n", __func__); + + mutex_lock(&mhdp->link_mutex); + + mhdp->bridge_enabled = false; + cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); + resp &= ~CDNS_DP_FRAMER_EN; + resp |= CDNS_DP_NO_VIDEO_MODE; + cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); + + cdns_mhdp_link_down(mhdp); + + /* Disable VIF clock for stream 0 */ + cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); + cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, + resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN)); + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable) + mhdp->info->ops->disable(mhdp); + + mutex_unlock(&mhdp->link_mutex); +} + +static void cdns_mhdp_detach(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + dev_dbg(mhdp->dev, "%s\n", __func__); + + spin_lock(&mhdp->start_lock); + + mhdp->bridge_attached = false; + + spin_unlock(&mhdp->start_lock); + + writel(~0, mhdp->regs + CDNS_APB_INT_MASK); +} + +static struct drm_bridge_state * +cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge) +{ + struct cdns_mhdp_bridge_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base); + + return &state->base; +} + +static void +cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge, + struct drm_bridge_state *state) +{ + struct cdns_mhdp_bridge_state *cdns_mhdp_state; + + cdns_mhdp_state = to_cdns_mhdp_bridge_state(state); + + if (cdns_mhdp_state->current_mode) { + drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode); + cdns_mhdp_state->current_mode = NULL; + } + + kfree(cdns_mhdp_state); +} + +static struct drm_bridge_state * +cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge) +{ + struct cdns_mhdp_bridge_state *cdns_mhdp_state; + + cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL); + if (!cdns_mhdp_state) + return NULL; + + __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base); + + return &cdns_mhdp_state->base; +} + +static int cdns_mhdp_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; + + mutex_lock(&mhdp->link_mutex); + + if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n", + __func__, mode->name, mhdp->link.num_lanes, + mhdp->link.rate / 100); + mutex_unlock(&mhdp->link_mutex); + return -EINVAL; + } + + mutex_unlock(&mhdp->link_mutex); + return 0; +} + +static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + return cdns_mhdp_detect(mhdp); +} + +static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + return cdns_mhdp_get_edid(mhdp, connector); +} + +static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + /* Enable SW event interrupts */ + if (mhdp->bridge_attached) + writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT, + mhdp->regs + CDNS_APB_INT_MASK); +} + +static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge) +{ + struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); + + writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK); +} + +static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { + .atomic_enable = cdns_mhdp_atomic_enable, + .atomic_disable = cdns_mhdp_atomic_disable, + .atomic_check = cdns_mhdp_atomic_check, + .attach = cdns_mhdp_attach, + .detach = cdns_mhdp_detach, + .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state, + .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state, + .atomic_reset = cdns_mhdp_bridge_atomic_reset, + .detect = cdns_mhdp_bridge_detect, + .get_edid = cdns_mhdp_bridge_get_edid, + .hpd_enable = cdns_mhdp_bridge_hpd_enable, + .hpd_disable = cdns_mhdp_bridge_hpd_disable, +}; + +static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse) +{ + int hpd_event, hpd_status; + + *hpd_pulse = false; + + hpd_event = cdns_mhdp_read_hpd_event(mhdp); + + /* Getting event bits failed, bail out */ + if (hpd_event < 0) { + dev_warn(mhdp->dev, "%s: read event failed: %d\n", + __func__, hpd_event); + return false; + } + + hpd_status = cdns_mhdp_get_hpd_status(mhdp); + if (hpd_status < 0) { + dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n", + __func__, hpd_status); + return false; + } + + if (hpd_event & DPTX_READ_EVENT_HPD_PULSE) + *hpd_pulse = true; + + return !!hpd_status; +} + +static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp) +{ + struct cdns_mhdp_bridge_state *cdns_bridge_state; + struct drm_display_mode *current_mode; + bool old_plugged = mhdp->plugged; + struct drm_bridge_state *state; + u8 status[DP_LINK_STATUS_SIZE]; + bool hpd_pulse; + int ret = 0; + + mutex_lock(&mhdp->link_mutex); + + mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse); + + if (!mhdp->plugged) { + cdns_mhdp_link_down(mhdp); + mhdp->link.rate = mhdp->host.link_rate; + mhdp->link.num_lanes = mhdp->host.lanes_cnt; + goto out; + } + + /* + * If we get a HPD pulse event and we were and still are connected, + * check the link status. If link status is ok, there's nothing to do + * as we don't handle DP interrupts. If link status is bad, continue + * with full link setup. + */ + if (hpd_pulse && old_plugged == mhdp->plugged) { + ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status); + + /* + * If everything looks fine, just return, as we don't handle + * DP IRQs. + */ + if (ret > 0 && + drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) && + drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes)) + goto out; + + /* If link is bad, mark link as down so that we do a new LT */ + mhdp->link_up = false; + } + + if (!mhdp->link_up) { + ret = cdns_mhdp_link_up(mhdp); + if (ret < 0) + goto out; + } + + if (mhdp->bridge_enabled) { + state = drm_priv_to_bridge_state(mhdp->bridge.base.state); + if (!state) { + ret = -EINVAL; + goto out; + } + + cdns_bridge_state = to_cdns_mhdp_bridge_state(state); + if (!cdns_bridge_state) { + ret = -EINVAL; + goto out; + } + + current_mode = cdns_bridge_state->current_mode; + if (!current_mode) { + ret = -EINVAL; + goto out; + } + + if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes, + mhdp->link.rate)) { + ret = -EINVAL; + goto out; + } + + dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, + current_mode->name); + + cdns_mhdp_sst_enable(mhdp, current_mode); + } +out: + mutex_unlock(&mhdp->link_mutex); + return ret; +} + +static void cdns_mhdp_modeset_retry_fn(struct work_struct *work) +{ + struct cdns_mhdp_device *mhdp; + struct drm_connector *conn; + + mhdp = container_of(work, typeof(*mhdp), modeset_retry_work); + + conn = &mhdp->connector; + + /* Grab the locks before changing connector property */ + mutex_lock(&conn->dev->mode_config.mutex); + + /* + * Set connector link status to BAD and send a Uevent to notify + * userspace to do a modeset. + */ + drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&conn->dev->mode_config.mutex); + + /* Send Hotplug uevent so userspace can reprobe */ + drm_kms_helper_hotplug_event(mhdp->bridge.dev); +} + +static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data) +{ + struct cdns_mhdp_device *mhdp = data; + u32 apb_stat, sw_ev0; + bool bridge_attached; + int ret; + + apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS); + if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT)) + return IRQ_NONE; + + sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0); + + /* + * Calling drm_kms_helper_hotplug_event() when not attached + * to drm device causes an oops because the drm_bridge->dev + * is NULL. See cdns_mhdp_fw_cb() comments for details about the + * problems related drm_kms_helper_hotplug_event() call. + */ + spin_lock(&mhdp->start_lock); + bridge_attached = mhdp->bridge_attached; + spin_unlock(&mhdp->start_lock); + + if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) { + ret = cdns_mhdp_update_link_status(mhdp); + if (mhdp->connector.dev) { + if (ret < 0) + schedule_work(&mhdp->modeset_retry_work); + else + drm_kms_helper_hotplug_event(mhdp->bridge.dev); + } else { + drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); + } + } + + return IRQ_HANDLED; +} + +static int cdns_mhdp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cdns_mhdp_device *mhdp; + unsigned long rate; + struct clk *clk; + int ret; + int irq; + + mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL); + if (!mhdp) + return -ENOMEM; + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + mhdp->clk = clk; + mhdp->dev = dev; + mutex_init(&mhdp->mbox_mutex); + mutex_init(&mhdp->link_mutex); + spin_lock_init(&mhdp->start_lock); + + drm_dp_aux_init(&mhdp->aux); + mhdp->aux.dev = dev; + mhdp->aux.transfer = cdns_mhdp_transfer; + + mhdp->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mhdp->regs)) { + dev_err(dev, "Failed to get memory resource\n"); + return PTR_ERR(mhdp->regs); + } + + mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0); + if (IS_ERR(mhdp->phy)) { + dev_err(dev, "no PHY configured\n"); + return PTR_ERR(mhdp->phy); + } + + platform_set_drvdata(pdev, mhdp); + + mhdp->info = of_device_get_match_data(dev); + + clk_prepare_enable(clk); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + pm_runtime_disable(dev); + goto clk_disable; + } + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) { + ret = mhdp->info->ops->init(mhdp); + if (ret != 0) { + dev_err(dev, "MHDP platform initialization failed: %d\n", + ret); + goto runtime_put; + } + } + + rate = clk_get_rate(clk); + writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L); + writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H); + + dev_dbg(dev, "func clk rate %lu Hz\n", rate); + + writel(~0, mhdp->regs + CDNS_APB_INT_MASK); + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, + cdns_mhdp_irq_handler, IRQF_ONESHOT, + "mhdp8546", mhdp); + if (ret) { + dev_err(dev, "cannot install IRQ %d\n", irq); + ret = -EIO; + goto plat_fini; + } + + cdns_mhdp_fill_host_caps(mhdp); + + /* Initialize link rate and num of lanes to host values */ + mhdp->link.rate = mhdp->host.link_rate; + mhdp->link.num_lanes = mhdp->host.lanes_cnt; + + /* The only currently supported format */ + mhdp->display_fmt.y_only = false; + mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444; + mhdp->display_fmt.bpc = 8; + + mhdp->bridge.of_node = pdev->dev.of_node; + mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs; + mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HPD; + mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; + if (mhdp->info) + mhdp->bridge.timings = mhdp->info->timings; + + ret = phy_init(mhdp->phy); + if (ret) { + dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret); + goto plat_fini; + } + + /* Initialize the work for modeset in case of link train failure */ + INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn); + + init_waitqueue_head(&mhdp->fw_load_wq); + + ret = cdns_mhdp_load_firmware(mhdp); + if (ret) + goto phy_exit; + + drm_bridge_add(&mhdp->bridge); + + return 0; + +phy_exit: + phy_exit(mhdp->phy); +plat_fini: + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) + mhdp->info->ops->exit(mhdp); +runtime_put: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); +clk_disable: + clk_disable_unprepare(mhdp->clk); + + return ret; +} + +static int cdns_mhdp_remove(struct platform_device *pdev) +{ + struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev); + unsigned long timeout = msecs_to_jiffies(100); + bool stop_fw = false; + int ret; + + drm_bridge_remove(&mhdp->bridge); + + ret = wait_event_timeout(mhdp->fw_load_wq, + mhdp->hw_state == MHDP_HW_READY, + timeout); + if (ret == 0) + dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n", + __func__); + else + stop_fw = true; + + spin_lock(&mhdp->start_lock); + mhdp->hw_state = MHDP_HW_STOPPED; + spin_unlock(&mhdp->start_lock); + + if (stop_fw) + ret = cdns_mhdp_set_firmware_active(mhdp, false); + + phy_exit(mhdp->phy); + + if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) + mhdp->info->ops->exit(mhdp); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + cancel_work_sync(&mhdp->modeset_retry_work); + flush_scheduled_work(); + + clk_disable_unprepare(mhdp->clk); + + return ret; +} + +static const struct of_device_id mhdp_ids[] = { + { .compatible = "cdns,mhdp8546", }, +#ifdef CONFIG_DRM_CDNS_MHDP8546_J721E + { .compatible = "ti,j721e-mhdp8546", + .data = &(const struct cdns_mhdp_platform_info) { + .timings = &mhdp_ti_j721e_bridge_timings, + .ops = &mhdp_ti_j721e_ops, + }, + }, +#endif + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mhdp_ids); + +static struct platform_driver mhdp_driver = { + .driver = { + .name = "cdns-mhdp8546", + .of_match_table = of_match_ptr(mhdp_ids), + }, + .probe = cdns_mhdp_probe, + .remove = cdns_mhdp_remove, +}; +module_platform_driver(mhdp_driver); + +MODULE_FIRMWARE(FW_NAME); + +MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>"); +MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>"); +MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>"); +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>"); +MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:cdns-mhdp8546"); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h new file mode 100644 index 000000000000..5897a85e3159 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h @@ -0,0 +1,400 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cadence MHDP8546 DP bridge driver. + * + * Copyright (C) 2020 Cadence Design Systems, Inc. + * + * Author: Quentin Schulz <quentin.schulz@free-electrons.com> + * Swapnil Jakhade <sjakhade@cadence.com> + */ + +#ifndef CDNS_MHDP8546_CORE_H +#define CDNS_MHDP8546_CORE_H + +#include <linux/bits.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_dp_helper.h> + +struct clk; +struct device; +struct phy; + +/* Register offsets */ +#define CDNS_APB_CTRL 0x00000 +#define CDNS_CPU_STALL BIT(3) + +#define CDNS_MAILBOX_FULL 0x00008 +#define CDNS_MAILBOX_EMPTY 0x0000c +#define CDNS_MAILBOX_TX_DATA 0x00010 +#define CDNS_MAILBOX_RX_DATA 0x00014 +#define CDNS_KEEP_ALIVE 0x00018 +#define CDNS_KEEP_ALIVE_MASK GENMASK(7, 0) + +#define CDNS_VER_L 0x0001C +#define CDNS_VER_H 0x00020 +#define CDNS_LIB_L_ADDR 0x00024 +#define CDNS_LIB_H_ADDR 0x00028 + +#define CDNS_MB_INT_MASK 0x00034 +#define CDNS_MB_INT_STATUS 0x00038 + +#define CDNS_SW_CLK_L 0x0003c +#define CDNS_SW_CLK_H 0x00040 + +#define CDNS_SW_EVENT0 0x00044 +#define CDNS_DPTX_HPD BIT(0) + +#define CDNS_SW_EVENT1 0x00048 +#define CDNS_SW_EVENT2 0x0004c +#define CDNS_SW_EVENT3 0x00050 + +#define CDNS_APB_INT_MASK 0x0006C +#define CDNS_APB_INT_MASK_MAILBOX_INT BIT(0) +#define CDNS_APB_INT_MASK_SW_EVENT_INT BIT(1) + +#define CDNS_APB_INT_STATUS 0x00070 + +#define CDNS_DPTX_CAR 0x00904 +#define CDNS_VIF_CLK_EN BIT(0) +#define CDNS_VIF_CLK_RSTN BIT(1) + +#define CDNS_SOURCE_VIDEO_IF(s) (0x00b00 + ((s) * 0x20)) +#define CDNS_BND_HSYNC2VSYNC(s) (CDNS_SOURCE_VIDEO_IF(s) + \ + 0x00) +#define CDNS_IP_DTCT_WIN GENMASK(11, 0) +#define CDNS_IP_DET_INTERLACE_FORMAT BIT(12) +#define CDNS_IP_BYPASS_V_INTERFACE BIT(13) + +#define CDNS_HSYNC2VSYNC_POL_CTRL(s) (CDNS_SOURCE_VIDEO_IF(s) + \ + 0x10) +#define CDNS_H2V_HSYNC_POL_ACTIVE_LOW BIT(1) +#define CDNS_H2V_VSYNC_POL_ACTIVE_LOW BIT(2) + +#define CDNS_DPTX_PHY_CONFIG 0x02000 +#define CDNS_PHY_TRAINING_EN BIT(0) +#define CDNS_PHY_TRAINING_TYPE(x) (((x) & GENMASK(3, 0)) << 1) +#define CDNS_PHY_SCRAMBLER_BYPASS BIT(5) +#define CDNS_PHY_ENCODER_BYPASS BIT(6) +#define CDNS_PHY_SKEW_BYPASS BIT(7) +#define CDNS_PHY_TRAINING_AUTO BIT(8) +#define CDNS_PHY_LANE0_SKEW(x) (((x) & GENMASK(2, 0)) << 9) +#define CDNS_PHY_LANE1_SKEW(x) (((x) & GENMASK(2, 0)) << 12) +#define CDNS_PHY_LANE2_SKEW(x) (((x) & GENMASK(2, 0)) << 15) +#define CDNS_PHY_LANE3_SKEW(x) (((x) & GENMASK(2, 0)) << 18) +#define CDNS_PHY_COMMON_CONFIG (CDNS_PHY_LANE1_SKEW(1) | \ + CDNS_PHY_LANE2_SKEW(2) | \ + CDNS_PHY_LANE3_SKEW(3)) +#define CDNS_PHY_10BIT_EN BIT(21) + +#define CDNS_DP_FRAMER_GLOBAL_CONFIG 0x02200 +#define CDNS_DP_NUM_LANES(x) ((x) - 1) +#define CDNS_DP_MST_EN BIT(2) +#define CDNS_DP_FRAMER_EN BIT(3) +#define CDNS_DP_RATE_GOVERNOR_EN BIT(4) +#define CDNS_DP_NO_VIDEO_MODE BIT(5) +#define CDNS_DP_DISABLE_PHY_RST BIT(6) +#define CDNS_DP_WR_FAILING_EDGE_VSYNC BIT(7) + +#define CDNS_DP_FRAMER_TU 0x02208 +#define CDNS_DP_FRAMER_TU_SIZE(x) (((x) & GENMASK(6, 0)) << 8) +#define CDNS_DP_FRAMER_TU_VS(x) ((x) & GENMASK(5, 0)) +#define CDNS_DP_FRAMER_TU_CNT_RST_EN BIT(15) + +#define CDNS_DP_MTPH_CONTROL 0x02264 +#define CDNS_DP_MTPH_ECF_EN BIT(0) +#define CDNS_DP_MTPH_ACT_EN BIT(1) +#define CDNS_DP_MTPH_LVP_EN BIT(2) + +#define CDNS_DP_MTPH_STATUS 0x0226C +#define CDNS_DP_MTPH_ACT_STATUS BIT(0) + +#define CDNS_DP_LANE_EN 0x02300 +#define CDNS_DP_LANE_EN_LANES(x) GENMASK((x) - 1, 0) + +#define CDNS_DP_ENHNCD 0x02304 + +#define CDNS_DPTX_STREAM(s) (0x03000 + (s) * 0x80) +#define CDNS_DP_MSA_HORIZONTAL_0(s) (CDNS_DPTX_STREAM(s) + 0x00) +#define CDNS_DP_MSAH0_H_TOTAL(x) (x) +#define CDNS_DP_MSAH0_HSYNC_START(x) ((x) << 16) + +#define CDNS_DP_MSA_HORIZONTAL_1(s) (CDNS_DPTX_STREAM(s) + 0x04) +#define CDNS_DP_MSAH1_HSYNC_WIDTH(x) (x) +#define CDNS_DP_MSAH1_HSYNC_POL_LOW BIT(15) +#define CDNS_DP_MSAH1_HDISP_WIDTH(x) ((x) << 16) + +#define CDNS_DP_MSA_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x08) +#define CDNS_DP_MSAV0_V_TOTAL(x) (x) +#define CDNS_DP_MSAV0_VSYNC_START(x) ((x) << 16) + +#define CDNS_DP_MSA_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x0c) +#define CDNS_DP_MSAV1_VSYNC_WIDTH(x) (x) +#define CDNS_DP_MSAV1_VSYNC_POL_LOW BIT(15) +#define CDNS_DP_MSAV1_VDISP_WIDTH(x) ((x) << 16) + +#define CDNS_DP_MSA_MISC(s) (CDNS_DPTX_STREAM(s) + 0x10) +#define CDNS_DP_STREAM_CONFIG(s) (CDNS_DPTX_STREAM(s) + 0x14) +#define CDNS_DP_STREAM_CONFIG_2(s) (CDNS_DPTX_STREAM(s) + 0x2c) +#define CDNS_DP_SC2_TU_VS_DIFF(x) ((x) << 8) + +#define CDNS_DP_HORIZONTAL(s) (CDNS_DPTX_STREAM(s) + 0x30) +#define CDNS_DP_H_HSYNC_WIDTH(x) (x) +#define CDNS_DP_H_H_TOTAL(x) ((x) << 16) + +#define CDNS_DP_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x34) +#define CDNS_DP_V0_VHEIGHT(x) (x) +#define CDNS_DP_V0_VSTART(x) ((x) << 16) + +#define CDNS_DP_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x38) +#define CDNS_DP_V1_VTOTAL(x) (x) +#define CDNS_DP_V1_VTOTAL_EVEN BIT(16) + +#define CDNS_DP_MST_SLOT_ALLOCATE(s) (CDNS_DPTX_STREAM(s) + 0x44) +#define CDNS_DP_S_ALLOC_START_SLOT(x) (x) +#define CDNS_DP_S_ALLOC_END_SLOT(x) ((x) << 8) + +#define CDNS_DP_RATE_GOVERNING(s) (CDNS_DPTX_STREAM(s) + 0x48) +#define CDNS_DP_RG_TARG_AV_SLOTS_Y(x) (x) +#define CDNS_DP_RG_TARG_AV_SLOTS_X(x) ((x) << 4) +#define CDNS_DP_RG_ENABLE BIT(10) + +#define CDNS_DP_FRAMER_PXL_REPR(s) (CDNS_DPTX_STREAM(s) + 0x4c) +#define CDNS_DP_FRAMER_6_BPC BIT(0) +#define CDNS_DP_FRAMER_8_BPC BIT(1) +#define CDNS_DP_FRAMER_10_BPC BIT(2) +#define CDNS_DP_FRAMER_12_BPC BIT(3) +#define CDNS_DP_FRAMER_16_BPC BIT(4) +#define CDNS_DP_FRAMER_PXL_FORMAT 0x8 +#define CDNS_DP_FRAMER_RGB BIT(0) +#define CDNS_DP_FRAMER_YCBCR444 BIT(1) +#define CDNS_DP_FRAMER_YCBCR422 BIT(2) +#define CDNS_DP_FRAMER_YCBCR420 BIT(3) +#define CDNS_DP_FRAMER_Y_ONLY BIT(4) + +#define CDNS_DP_FRAMER_SP(s) (CDNS_DPTX_STREAM(s) + 0x50) +#define CDNS_DP_FRAMER_VSYNC_POL_LOW BIT(0) +#define CDNS_DP_FRAMER_HSYNC_POL_LOW BIT(1) +#define CDNS_DP_FRAMER_INTERLACE BIT(2) + +#define CDNS_DP_LINE_THRESH(s) (CDNS_DPTX_STREAM(s) + 0x64) +#define CDNS_DP_ACTIVE_LINE_THRESH(x) (x) + +#define CDNS_DP_VB_ID(s) (CDNS_DPTX_STREAM(s) + 0x68) +#define CDNS_DP_VB_ID_INTERLACED BIT(2) +#define CDNS_DP_VB_ID_COMPRESSED BIT(6) + +#define CDNS_DP_FRONT_BACK_PORCH(s) (CDNS_DPTX_STREAM(s) + 0x78) +#define CDNS_DP_BACK_PORCH(x) (x) +#define CDNS_DP_FRONT_PORCH(x) ((x) << 16) + +#define CDNS_DP_BYTE_COUNT(s) (CDNS_DPTX_STREAM(s) + 0x7c) +#define CDNS_DP_BYTE_COUNT_BYTES_IN_CHUNK_SHIFT 16 + +/* mailbox */ +#define MAILBOX_RETRY_US 1000 +#define MAILBOX_TIMEOUT_US 2000000 + +#define MB_OPCODE_ID 0 +#define MB_MODULE_ID 1 +#define MB_SIZE_MSB_ID 2 +#define MB_SIZE_LSB_ID 3 +#define MB_DATA_ID 4 + +#define MB_MODULE_ID_DP_TX 0x01 +#define MB_MODULE_ID_HDCP_TX 0x07 +#define MB_MODULE_ID_HDCP_RX 0x08 +#define MB_MODULE_ID_HDCP_GENERAL 0x09 +#define MB_MODULE_ID_GENERAL 0x0a + +/* firmware and opcodes */ +#define FW_NAME "cadence/mhdp8546.bin" +#define CDNS_MHDP_IMEM 0x10000 + +#define GENERAL_MAIN_CONTROL 0x01 +#define GENERAL_TEST_ECHO 0x02 +#define GENERAL_BUS_SETTINGS 0x03 +#define GENERAL_TEST_ACCESS 0x04 +#define GENERAL_REGISTER_READ 0x07 + +#define DPTX_SET_POWER_MNG 0x00 +#define DPTX_GET_EDID 0x02 +#define DPTX_READ_DPCD 0x03 +#define DPTX_WRITE_DPCD 0x04 +#define DPTX_ENABLE_EVENT 0x05 +#define DPTX_WRITE_REGISTER 0x06 +#define DPTX_READ_REGISTER 0x07 +#define DPTX_WRITE_FIELD 0x08 +#define DPTX_READ_EVENT 0x0a +#define DPTX_GET_LAST_AUX_STAUS 0x0e +#define DPTX_HPD_STATE 0x11 +#define DPTX_ADJUST_LT 0x12 + +#define FW_STANDBY 0 +#define FW_ACTIVE 1 + +/* HPD */ +#define DPTX_READ_EVENT_HPD_TO_HIGH BIT(0) +#define DPTX_READ_EVENT_HPD_TO_LOW BIT(1) +#define DPTX_READ_EVENT_HPD_PULSE BIT(2) +#define DPTX_READ_EVENT_HPD_STATE BIT(3) + +/* general */ +#define CDNS_DP_TRAINING_PATTERN_4 0x7 + +#define CDNS_KEEP_ALIVE_TIMEOUT 2000 + +#define CDNS_VOLT_SWING(x) ((x) & GENMASK(1, 0)) +#define CDNS_FORCE_VOLT_SWING BIT(2) + +#define CDNS_PRE_EMPHASIS(x) ((x) & GENMASK(1, 0)) +#define CDNS_FORCE_PRE_EMPHASIS BIT(2) + +#define CDNS_SUPPORT_TPS(x) BIT((x) - 1) + +#define CDNS_FAST_LINK_TRAINING BIT(0) + +#define CDNS_LANE_MAPPING_TYPE_C_LANE_0(x) ((x) & GENMASK(1, 0)) +#define CDNS_LANE_MAPPING_TYPE_C_LANE_1(x) ((x) & GENMASK(3, 2)) +#define CDNS_LANE_MAPPING_TYPE_C_LANE_2(x) ((x) & GENMASK(5, 4)) +#define CDNS_LANE_MAPPING_TYPE_C_LANE_3(x) ((x) & GENMASK(7, 6)) +#define CDNS_LANE_MAPPING_NORMAL 0xe4 +#define CDNS_LANE_MAPPING_FLIPPED 0x1b + +#define CDNS_DP_MAX_NUM_LANES 4 +#define CDNS_DP_TEST_VSC_SDP BIT(6) /* 1.3+ */ +#define CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY BIT(7) + +#define CDNS_MHDP_MAX_STREAMS 4 + +#define DP_LINK_CAP_ENHANCED_FRAMING BIT(0) + +struct cdns_mhdp_link { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +struct cdns_mhdp_host { + unsigned int link_rate; + u8 lanes_cnt; + u8 volt_swing; + u8 pre_emphasis; + u8 pattern_supp; + u8 lane_mapping; + bool fast_link; + bool enhanced; + bool scrambler; + bool ssc; +}; + +struct cdns_mhdp_sink { + unsigned int link_rate; + u8 lanes_cnt; + u8 pattern_supp; + bool fast_link; + bool enhanced; + bool ssc; +}; + +struct cdns_mhdp_display_fmt { + u32 color_format; + u32 bpc; + bool y_only; +}; + +/* + * These enums present MHDP hw initialization state + * Legal state transitions are: + * MHDP_HW_READY <-> MHDP_HW_STOPPED + */ +enum mhdp_hw_state { + MHDP_HW_READY = 1, /* HW ready, FW active */ + MHDP_HW_STOPPED /* Driver removal FW to be stopped */ +}; + +struct cdns_mhdp_device; + +struct mhdp_platform_ops { + int (*init)(struct cdns_mhdp_device *mhdp); + void (*exit)(struct cdns_mhdp_device *mhdp); + void (*enable)(struct cdns_mhdp_device *mhdp); + void (*disable)(struct cdns_mhdp_device *mhdp); +}; + +struct cdns_mhdp_bridge_state { + struct drm_bridge_state base; + struct drm_display_mode *current_mode; +}; + +struct cdns_mhdp_platform_info { + const struct drm_bridge_timings *timings; + const struct mhdp_platform_ops *ops; +}; + +#define to_cdns_mhdp_bridge_state(s) \ + container_of(s, struct cdns_mhdp_bridge_state, base) + +struct cdns_mhdp_device { + void __iomem *regs; + void __iomem *j721e_regs; + + struct device *dev; + struct clk *clk; + struct phy *phy; + + const struct cdns_mhdp_platform_info *info; + + /* This is to protect mailbox communications with the firmware */ + struct mutex mbox_mutex; + + /* + * "link_mutex" protects the access to all the link parameters + * including the link training process. Link training will be + * invoked both from threaded interrupt handler and from atomic + * callbacks when link_up is not set. So this mutex protects + * flags such as link_up, bridge_enabled, link.num_lanes, + * link.rate etc. + */ + struct mutex link_mutex; + + struct drm_connector connector; + struct drm_bridge bridge; + + struct cdns_mhdp_link link; + struct drm_dp_aux aux; + + struct cdns_mhdp_host host; + struct cdns_mhdp_sink sink; + struct cdns_mhdp_display_fmt display_fmt; + u8 stream_id; + + bool link_up; + bool plugged; + + /* + * "start_lock" protects the access to bridge_attached and + * hw_state data members that control the delayed firmware + * loading and attaching the bridge. They are accessed from + * both the DRM core and cdns_mhdp_fw_cb(). In most cases just + * protecting the data members is enough, but the irq mask + * setting needs to be protected when enabling the FW. + */ + spinlock_t start_lock; + bool bridge_attached; + bool bridge_enabled; + enum mhdp_hw_state hw_state; + wait_queue_head_t fw_load_wq; + + /* Work struct to schedule a uevent on link train failure */ + struct work_struct modeset_retry_work; +}; + +#define connector_to_mhdp(x) container_of(x, struct cdns_mhdp_device, connector) +#define bridge_to_mhdp(x) container_of(x, struct cdns_mhdp_device, bridge) + +#endif diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c new file mode 100644 index 000000000000..dfe1b59514f7 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI j721e Cadence MHDP8546 DP wrapper + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Jyri Sarha <jsarha@ti.com> + */ + +#include <linux/io.h> +#include <linux/platform_device.h> + +#include "cdns-mhdp8546-j721e.h" + +#define REVISION 0x00 +#define DPTX_IPCFG 0x04 +#define ECC_MEM_CFG 0x08 +#define DPTX_DSC_CFG 0x0c +#define DPTX_SRC_CFG 0x10 +#define DPTX_VIF_SECURE_MODE_CFG 0x14 +#define DPTX_VIF_CONN_STATUS 0x18 +#define PHY_CLK_STATUS 0x1c + +#define DPTX_SRC_AIF_EN BIT(16) +#define DPTX_SRC_VIF_3_IN30B BIT(11) +#define DPTX_SRC_VIF_2_IN30B BIT(10) +#define DPTX_SRC_VIF_1_IN30B BIT(9) +#define DPTX_SRC_VIF_0_IN30B BIT(8) +#define DPTX_SRC_VIF_3_SEL_DPI5 BIT(7) +#define DPTX_SRC_VIF_3_SEL_DPI3 0 +#define DPTX_SRC_VIF_2_SEL_DPI4 BIT(6) +#define DPTX_SRC_VIF_2_SEL_DPI2 0 +#define DPTX_SRC_VIF_1_SEL_DPI3 BIT(5) +#define DPTX_SRC_VIF_1_SEL_DPI1 0 +#define DPTX_SRC_VIF_0_SEL_DPI2 BIT(4) +#define DPTX_SRC_VIF_0_SEL_DPI0 0 +#define DPTX_SRC_VIF_3_EN BIT(3) +#define DPTX_SRC_VIF_2_EN BIT(2) +#define DPTX_SRC_VIF_1_EN BIT(1) +#define DPTX_SRC_VIF_0_EN BIT(0) + +/* TODO turn DPTX_IPCFG fw_mem_clk_en at pm_runtime_suspend. */ + +static int cdns_mhdp_j721e_init(struct cdns_mhdp_device *mhdp) +{ + struct platform_device *pdev = to_platform_device(mhdp->dev); + + mhdp->j721e_regs = devm_platform_ioremap_resource(pdev, 1); + return PTR_ERR_OR_ZERO(mhdp->j721e_regs); +} + +static void cdns_mhdp_j721e_enable(struct cdns_mhdp_device *mhdp) +{ + /* + * Enable VIF_0 and select DPI2 as its input. DSS0 DPI0 is connected + * to eDP DPI2. This is the only supported SST configuration on + * J721E. + */ + writel(DPTX_SRC_VIF_0_EN | DPTX_SRC_VIF_0_SEL_DPI2, + mhdp->j721e_regs + DPTX_SRC_CFG); +} + +static void cdns_mhdp_j721e_disable(struct cdns_mhdp_device *mhdp) +{ + /* Put everything to defaults */ + writel(0, mhdp->j721e_regs + DPTX_DSC_CFG); +} + +const struct mhdp_platform_ops mhdp_ti_j721e_ops = { + .init = cdns_mhdp_j721e_init, + .enable = cdns_mhdp_j721e_enable, + .disable = cdns_mhdp_j721e_disable, +}; + +const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings = { + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | + DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | + DRM_BUS_FLAG_DE_HIGH, +}; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h new file mode 100644 index 000000000000..97d20d115a24 --- /dev/null +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * TI j721e Cadence MHDP8546 DP wrapper + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Jyri Sarha <jsarha@ti.com> + */ + +#ifndef CDNS_MHDP8546_J721E_H +#define CDNS_MHDP8546_J721E_H + +#include "cdns-mhdp8546-core.h" + +struct mhdp_platform_ops; + +extern const struct mhdp_platform_ops mhdp_ti_j721e_ops; +extern const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings; + +#endif /* !CDNS_MHDP8546_J721E_H */ diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index f19d9f7a5db2..f52ccffc1bd1 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -10,13 +10,16 @@ #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #include <drm/drm_bridge.h> #include <drm/drm_panel.h> struct lvds_codec { + struct device *dev; struct drm_bridge bridge; struct drm_bridge *panel_bridge; + struct regulator *vcc; struct gpio_desc *powerdown_gpio; u32 connector_type; }; @@ -38,6 +41,14 @@ static int lvds_codec_attach(struct drm_bridge *bridge, static void lvds_codec_enable(struct drm_bridge *bridge) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); + int ret; + + ret = regulator_enable(lvds_codec->vcc); + if (ret) { + dev_err(lvds_codec->dev, + "Failed to enable regulator \"vcc\": %d\n", ret); + return; + } if (lvds_codec->powerdown_gpio) gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0); @@ -46,9 +57,15 @@ static void lvds_codec_enable(struct drm_bridge *bridge) static void lvds_codec_disable(struct drm_bridge *bridge) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); + int ret; if (lvds_codec->powerdown_gpio) gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1); + + ret = regulator_disable(lvds_codec->vcc); + if (ret) + dev_err(lvds_codec->dev, + "Failed to disable regulator \"vcc\": %d\n", ret); } static const struct drm_bridge_funcs funcs = { @@ -63,12 +80,24 @@ static int lvds_codec_probe(struct platform_device *pdev) struct device_node *panel_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; + int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); if (!lvds_codec) return -ENOMEM; + lvds_codec->dev = &pdev->dev; lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev); + + lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power"); + if (IS_ERR(lvds_codec->vcc)) { + ret = PTR_ERR(lvds_codec->vcc); + if (ret != -EPROBE_DEFER) + dev_err(lvds_codec->dev, + "Unable to get \"vcc\" supply: %d\n", ret); + return ret; + } + lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(lvds_codec->powerdown_gpio)) diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 9f7b7a9c53c5..7bd0affa057a 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -65,6 +65,7 @@ struct ps8640 { struct regulator_bulk_data supplies[2]; struct gpio_desc *gpio_reset; struct gpio_desc *gpio_powerdown; + bool powered; }; static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e) @@ -91,13 +92,15 @@ static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge, return 0; } -static void ps8640_pre_enable(struct drm_bridge *bridge) +static void ps8640_bridge_poweron(struct ps8640 *ps_bridge) { - struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL]; unsigned long timeout; int ret, status; + if (ps_bridge->powered) + return; + ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) { @@ -152,10 +155,6 @@ static void ps8640_pre_enable(struct drm_bridge *bridge) goto err_regulators_disable; } - ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE); - if (ret) - goto err_regulators_disable; - /* Switch access edp panel's edid through i2c */ ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS, I2C_BYPASS_EN); @@ -164,6 +163,8 @@ static void ps8640_pre_enable(struct drm_bridge *bridge) goto err_regulators_disable; } + ps_bridge->powered = true; + return; err_regulators_disable: @@ -171,12 +172,12 @@ err_regulators_disable: ps_bridge->supplies); } -static void ps8640_post_disable(struct drm_bridge *bridge) +static void ps8640_bridge_poweroff(struct ps8640 *ps_bridge) { - struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); int ret; - ps8640_bridge_vdo_control(ps_bridge, DISABLE); + if (!ps_bridge->powered) + return; gpiod_set_value(ps_bridge->gpio_reset, 1); gpiod_set_value(ps_bridge->gpio_powerdown, 1); @@ -184,6 +185,28 @@ static void ps8640_post_disable(struct drm_bridge *bridge) ps_bridge->supplies); if (ret < 0) DRM_ERROR("cannot disable regulators %d\n", ret); + + ps_bridge->powered = false; +} + +static void ps8640_pre_enable(struct drm_bridge *bridge) +{ + struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + int ret; + + ps8640_bridge_poweron(ps_bridge); + + ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE); + if (ret < 0) + ps8640_bridge_poweroff(ps_bridge); +} + +static void ps8640_post_disable(struct drm_bridge *bridge) +{ + struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + + ps8640_bridge_vdo_control(ps_bridge, DISABLE); + ps8640_bridge_poweroff(ps_bridge); } static int ps8640_bridge_attach(struct drm_bridge *bridge, @@ -249,9 +272,34 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + bool poweroff = !ps_bridge->powered; + struct edid *edid; + + /* + * When we end calling get_edid() triggered by an ioctl, i.e + * + * drm_mode_getconnector (ioctl) + * -> drm_helper_probe_single_connector_modes + * -> drm_bridge_connector_get_modes + * -> ps8640_bridge_get_edid + * + * We need to make sure that what we need is enabled before reading + * EDID, for this chip, we need to do a full poweron, otherwise it will + * fail. + */ + drm_bridge_chain_pre_enable(bridge); - return drm_get_edid(connector, + edid = drm_get_edid(connector, ps_bridge->page[PAGE0_DP_CNTL]->adapter); + + /* + * If we call the get_edid() function without having enabled the chip + * before, return the chip to its original power state. + */ + if (poweroff) + drm_bridge_chain_post_disable(bridge); + + return edid; } static const struct drm_bridge_funcs ps8640_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index 9fef6413741d..feb04f127b55 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -170,7 +170,7 @@ static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data, return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev); } -static struct hdmi_codec_ops dw_hdmi_i2s_ops = { +static const struct hdmi_codec_ops dw_hdmi_i2s_ops = { .hw_params = dw_hdmi_i2s_hw_params, .audio_startup = dw_hdmi_i2s_audio_startup, .audio_shutdown = dw_hdmi_i2s_audio_shutdown, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index d580b2aa4ce9..6b268f9445b3 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -89,7 +89,9 @@ #define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1 #define VID_MODE_TYPE_BURST 0x2 #define VID_MODE_TYPE_MASK 0x3 +#define ENABLE_LOW_POWER_CMD BIT(15) #define VID_MODE_VPG_ENABLE BIT(16) +#define VID_MODE_VPG_MODE BIT(20) #define VID_MODE_VPG_HORIZONTAL BIT(24) #define DSI_VID_PKT_SIZE 0x3c @@ -220,6 +222,21 @@ #define PHY_STATUS_TIMEOUT_US 10000 #define CMD_PKT_STATUS_TIMEOUT_US 20000 +#ifdef CONFIG_DEBUG_FS +#define VPG_DEFS(name, dsi) \ + ((void __force *)&((*dsi).vpg_defs.name)) + +#define REGISTER(name, mask, dsi) \ + { #name, VPG_DEFS(name, dsi), mask, dsi } + +struct debugfs_entries { + const char *name; + bool *reg; + u32 mask; + struct dw_mipi_dsi *dsi; +}; +#endif /* CONFIG_DEBUG_FS */ + struct dw_mipi_dsi { struct drm_bridge bridge; struct mipi_dsi_host dsi_host; @@ -237,9 +254,12 @@ struct dw_mipi_dsi { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; - - bool vpg; - bool vpg_horizontal; + struct debugfs_entries *debugfs_vpg; + struct { + bool vpg; + bool vpg_horizontal; + bool vpg_ber_pattern; + } vpg_defs; #endif /* CONFIG_DEBUG_FS */ struct dw_mipi_dsi *master; /* dual-dsi master ptr */ @@ -360,13 +380,28 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi, bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM; u32 val = 0; + /* + * TODO dw drv improvements + * largest packet sizes during hfp or during vsa/vpb/vfp + * should be computed according to byte lane, lane number and only + * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS) + */ + dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(16) + | INVACT_LPCMD_TIME(4)); + if (msg->flags & MIPI_DSI_MSG_REQ_ACK) val |= ACK_RQST_EN; if (lpm) val |= CMD_MODE_ALL_LP; - dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS); dsi_write(dsi, DSI_CMD_MODE_CFG, val); + + val = dsi_read(dsi, DSI_VID_MODE_CFG); + if (lpm) + val |= ENABLE_LOW_POWER_CMD; + else + val &= ~ENABLE_LOW_POWER_CMD; + dsi_write(dsi, DSI_VID_MODE_CFG, val); } static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val) @@ -529,9 +564,11 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi) val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS; #ifdef CONFIG_DEBUG_FS - if (dsi->vpg) { + if (dsi->vpg_defs.vpg) { val |= VID_MODE_VPG_ENABLE; - val |= dsi->vpg_horizontal ? VID_MODE_VPG_HORIZONTAL : 0; + val |= dsi->vpg_defs.vpg_horizontal ? + VID_MODE_VPG_HORIZONTAL : 0; + val |= dsi->vpg_defs.vpg_ber_pattern ? VID_MODE_VPG_MODE : 0; } #endif /* CONFIG_DEBUG_FS */ @@ -541,16 +578,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, unsigned long mode_flags) { + u32 val; + dsi_write(dsi, DSI_PWR_UP, RESET); if (mode_flags & MIPI_DSI_MODE_VIDEO) { dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE); dw_mipi_dsi_video_mode_config(dsi); - dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS); } else { dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE); } + val = PHY_TXREQUESTCLKHS; + if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + val |= AUTO_CLKLANE_CTRL; + dsi_write(dsi, DSI_LPCLK_CTRL, val); + dsi_write(dsi, DSI_PWR_UP, POWERUP); } @@ -562,15 +605,30 @@ static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi) { + const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; + unsigned int esc_rate; /* in MHz */ + u32 esc_clk_division; + int ret; + /* * The maximum permitted escape clock is 20MHz and it is derived from - * lanebyteclk, which is running at "lane_mbps / 8". Thus we want: - * - * (lane_mbps >> 3) / esc_clk_division < 20 + * lanebyteclk, which is running at "lane_mbps / 8". + */ + if (phy_ops->get_esc_clk_rate) { + ret = phy_ops->get_esc_clk_rate(dsi->plat_data->priv_data, + &esc_rate); + if (ret) + DRM_DEBUG_DRIVER("Phy get_esc_clk_rate() failed\n"); + } else + esc_rate = 20; /* Default to 20MHz */ + + /* + * We want : + * (lane_mbps >> 3) / esc_clk_division < X * which is: - * (lane_mbps >> 3) / 20 > esc_clk_division + * (lane_mbps >> 3) / X > esc_clk_division */ - u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1; + esc_clk_division = (dsi->lane_mbps >> 3) / esc_rate + 1; dsi_write(dsi, DSI_PWR_UP, RESET); @@ -611,14 +669,6 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi, dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel)); dsi_write(dsi, DSI_DPI_COLOR_CODING, color); dsi_write(dsi, DSI_DPI_CFG_POL, val); - /* - * TODO dw drv improvements - * largest packet sizes during hfp or during vsa/vpb/vfp - * should be computed according to byte lane, lane number and only - * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS) - */ - dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4) - | INVACT_LPCMD_TIME(4)); } static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi) @@ -964,6 +1014,66 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { #ifdef CONFIG_DEBUG_FS +static int dw_mipi_dsi_debugfs_write(void *data, u64 val) +{ + struct debugfs_entries *vpg = data; + struct dw_mipi_dsi *dsi; + u32 mode_cfg; + + if (!vpg) + return -ENODEV; + + dsi = vpg->dsi; + + *vpg->reg = (bool)val; + + mode_cfg = dsi_read(dsi, DSI_VID_MODE_CFG); + + if (*vpg->reg) + mode_cfg |= vpg->mask; + else + mode_cfg &= ~vpg->mask; + + dsi_write(dsi, DSI_VID_MODE_CFG, mode_cfg); + + return 0; +} + +static int dw_mipi_dsi_debugfs_show(void *data, u64 *val) +{ + struct debugfs_entries *vpg = data; + + if (!vpg) + return -ENODEV; + + *val = *vpg->reg; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_mipi_dsi_debugfs_show, + dw_mipi_dsi_debugfs_write, "%llu\n"); + +static void debugfs_create_files(void *data) +{ + struct dw_mipi_dsi *dsi = data; + struct debugfs_entries debugfs[] = { + REGISTER(vpg, VID_MODE_VPG_ENABLE, dsi), + REGISTER(vpg_horizontal, VID_MODE_VPG_HORIZONTAL, dsi), + REGISTER(vpg_ber_pattern, VID_MODE_VPG_MODE, dsi), + }; + int i; + + dsi->debugfs_vpg = kmemdup(debugfs, sizeof(debugfs), GFP_KERNEL); + if (!dsi->debugfs_vpg) + return; + + for (i = 0; i < ARRAY_SIZE(debugfs); i++) + debugfs_create_file(dsi->debugfs_vpg[i].name, 0644, + dsi->debugfs, &dsi->debugfs_vpg[i], + &fops_x32); +} + static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) { dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL); @@ -972,14 +1082,13 @@ static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) return; } - debugfs_create_bool("vpg", 0660, dsi->debugfs, &dsi->vpg); - debugfs_create_bool("vpg_horizontal", 0660, dsi->debugfs, - &dsi->vpg_horizontal); + debugfs_create_files(dsi); } static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi) { debugfs_remove_recursive(dsi->debugfs); + kfree(dsi->debugfs_vpg); } #else diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c index d89394bc5aa4..c1e35bdf9232 100644 --- a/drivers/gpu/drm/bridge/tc358764.c +++ b/drivers/gpu/drm/bridge/tc358764.c @@ -153,9 +153,10 @@ static const char * const tc358764_supplies[] = { struct tc358764 { struct device *dev; struct drm_bridge bridge; + struct drm_connector connector; struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)]; struct gpio_desc *gpio_reset; - struct drm_bridge *panel_bridge; + struct drm_panel *panel; int error; }; @@ -209,6 +210,12 @@ static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge) return container_of(bridge, struct tc358764, bridge); } +static inline +struct tc358764 *connector_to_tc358764(struct drm_connector *connector) +{ + return container_of(connector, struct tc358764, connector); +} + static int tc358764_init(struct tc358764 *ctx) { u32 v = 0; @@ -271,11 +278,43 @@ static void tc358764_reset(struct tc358764 *ctx) usleep_range(1000, 2000); } +static int tc358764_get_modes(struct drm_connector *connector) +{ + struct tc358764 *ctx = connector_to_tc358764(connector); + + return drm_panel_get_modes(ctx->panel, connector); +} + +static const +struct drm_connector_helper_funcs tc358764_connector_helper_funcs = { + .get_modes = tc358764_get_modes, +}; + +static const struct drm_connector_funcs tc358764_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static void tc358764_disable(struct drm_bridge *bridge) +{ + struct tc358764 *ctx = bridge_to_tc358764(bridge); + int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel); + + if (ret < 0) + dev_err(ctx->dev, "error disabling panel (%d)\n", ret); +} + static void tc358764_post_disable(struct drm_bridge *bridge) { struct tc358764 *ctx = bridge_to_tc358764(bridge); int ret; + ret = drm_panel_unprepare(ctx->panel); + if (ret < 0) + dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret); tc358764_reset(ctx); usleep_range(10000, 15000); ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); @@ -296,28 +335,71 @@ static void tc358764_pre_enable(struct drm_bridge *bridge) ret = tc358764_init(ctx); if (ret < 0) dev_err(ctx->dev, "error initializing bridge (%d)\n", ret); + ret = drm_panel_prepare(ctx->panel); + if (ret < 0) + dev_err(ctx->dev, "error preparing panel (%d)\n", ret); +} + +static void tc358764_enable(struct drm_bridge *bridge) +{ + struct tc358764 *ctx = bridge_to_tc358764(bridge); + int ret = drm_panel_enable(ctx->panel); + + if (ret < 0) + dev_err(ctx->dev, "error enabling panel (%d)\n", ret); } static int tc358764_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc358764 *ctx = bridge_to_tc358764(bridge); + struct drm_device *drm = bridge->dev; + int ret; + + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { + DRM_ERROR("Fix bridge driver to make connector optional!"); + return -EINVAL; + } + + ctx->connector.polled = DRM_CONNECTOR_POLL_HPD; + ret = drm_connector_init(drm, &ctx->connector, + &tc358764_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + if (ret) { + DRM_ERROR("Failed to initialize connector\n"); + return ret; + } + + drm_connector_helper_add(&ctx->connector, + &tc358764_connector_helper_funcs); + drm_connector_attach_encoder(&ctx->connector, bridge->encoder); + ctx->connector.funcs->reset(&ctx->connector); + drm_connector_register(&ctx->connector); + + return 0; +} + +static void tc358764_detach(struct drm_bridge *bridge) +{ + struct tc358764 *ctx = bridge_to_tc358764(bridge); - return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, - bridge, flags); + drm_connector_unregister(&ctx->connector); + ctx->panel = NULL; + drm_connector_put(&ctx->connector); } static const struct drm_bridge_funcs tc358764_bridge_funcs = { + .disable = tc358764_disable, .post_disable = tc358764_post_disable, + .enable = tc358764_enable, .pre_enable = tc358764_pre_enable, .attach = tc358764_attach, + .detach = tc358764_detach, }; static int tc358764_parse_dt(struct tc358764 *ctx) { - struct drm_bridge *panel_bridge; struct device *dev = ctx->dev; - struct drm_panel *panel; int ret; ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); @@ -326,16 +408,12 @@ static int tc358764_parse_dt(struct tc358764 *ctx) return PTR_ERR(ctx->gpio_reset); } - ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); - if (ret) - return ret; - - panel_bridge = devm_drm_panel_bridge_add(dev, panel); - if (IS_ERR(panel_bridge)) - return PTR_ERR(panel_bridge); + ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel, + NULL); + if (ret && ret != -EPROBE_DEFER) + dev_err(dev, "cannot find panel (%d)\n", ret); - ctx->panel_bridge = panel_bridge; - return 0; + return ret; } static int tc358764_configure_regulators(struct tc358764 *ctx) @@ -381,7 +459,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi) return ret; ctx->bridge.funcs = &tc358764_bridge_funcs; - ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS; ctx->bridge.of_node = dev->of_node; drm_bridge_add(&ctx->bridge); diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index d951cdc58297..2272adcc5b4a 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -485,7 +485,7 @@ static void tc_bridge_enable(struct drm_bridge *bridge) val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_6); } else { val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_3); - }; + } d2l_write(tc->i2c, LVCFG, val); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ecdf9b01340f..6ca1debd0f88 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -106,6 +106,8 @@ #define SN_NUM_GPIOS 4 #define SN_GPIO_PHYSICAL_OFFSET 1 +#define SN_LINK_TRAINING_TRIES 10 + /** * struct ti_sn_bridge - Platform data for ti-sn65dsi86 driver. * @dev: Pointer to our device. @@ -673,6 +675,7 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx, { unsigned int val; int ret; + int i; /* set dp clk frequency value */ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG, @@ -689,19 +692,34 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx, goto exit; } - /* Semi auto link training mode */ - regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A); - ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val, - val == ML_TX_MAIN_LINK_OFF || - val == ML_TX_NORMAL_MODE, 1000, - 500 * 1000); - if (ret) { - *last_err_str = "Training complete polling failed"; - } else if (val == ML_TX_MAIN_LINK_OFF) { - *last_err_str = "Link training failed, link is off"; - ret = -EIO; + /* + * We'll try to link train several times. As part of link training + * the bridge chip will write DP_SET_POWER_D0 to DP_SET_POWER. If + * the panel isn't ready quite it might respond NAK here which means + * we need to try again. + */ + for (i = 0; i < SN_LINK_TRAINING_TRIES; i++) { + /* Semi auto link training mode */ + regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A); + ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val, + val == ML_TX_MAIN_LINK_OFF || + val == ML_TX_NORMAL_MODE, 1000, + 500 * 1000); + if (ret) { + *last_err_str = "Training complete polling failed"; + } else if (val == ML_TX_MAIN_LINK_OFF) { + *last_err_str = "Link training failed, link is off"; + ret = -EIO; + continue; + } + + break; } + /* If we saw quite a few retries, add a note about it */ + if (!ret && i > SN_LINK_TRAINING_TRIES / 2) + DRM_DEV_INFO(pdata->dev, "Link training needed %d retries\n", i); + exit: /* Disable the PLL if we failed */ if (ret) @@ -816,8 +834,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge) { struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge); - if (pdata->refclk) - clk_disable_unprepare(pdata->refclk); + clk_disable_unprepare(pdata->refclk); pm_runtime_put_sync(pdata->dev); } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 58527f151984..b2d20eb6c807 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -281,6 +281,10 @@ EXPORT_SYMBOL(__drm_atomic_state_free); * needed. It will also grab the relevant CRTC lock to make sure that the state * is consistent. * + * WARNING: Drivers may only add new CRTC states to a @state if + * drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit + * not created by userspace through an IOCTL call. + * * Returns: * * Either the allocated state or the error code encoded into the pointer. When @@ -1262,10 +1266,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state) struct drm_crtc_state *new_crtc_state; struct drm_connector *conn; struct drm_connector_state *conn_state; + unsigned requested_crtc = 0; + unsigned affected_crtc = 0; int i, ret = 0; DRM_DEBUG_ATOMIC("checking %p\n", state); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) + requested_crtc |= drm_crtc_mask(crtc); + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { ret = drm_atomic_plane_check(old_plane_state, new_plane_state); if (ret) { @@ -1313,6 +1322,26 @@ int drm_atomic_check_only(struct drm_atomic_state *state) } } + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) + affected_crtc |= drm_crtc_mask(crtc); + + /* + * For commits that allow modesets drivers can add other CRTCs to the + * atomic commit, e.g. when they need to reallocate global resources. + * This can cause spurious EBUSY, which robs compositors of a very + * effective sanity check for their drawing loop. Therefor only allow + * drivers to add unrelated CRTC states for modeset commits. + * + * FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output + * so compositors know what's going on. + */ + if (affected_crtc != requested_crtc) { + DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n", + requested_crtc, affected_crtc); + WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n", + requested_crtc, affected_crtc); + } + return 0; } EXPORT_SYMBOL(drm_atomic_check_only); @@ -1613,11 +1642,11 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, * to dmesg in case of error irq's. (Hint, you probably want to * ratelimit this!) * - * The caller must drm_modeset_lock_all(), or if this is called - * from error irq handler, it should not be enabled by default. - * (Ie. if you are debugging errors you might not care that this - * is racey. But calling this without all modeset locks held is - * not inherently safe.) + * The caller must wrap this drm_modeset_lock_all_ctx() and + * drm_modeset_drop_locks(). If this is called from error irq handler, it should + * not be enabled by default - if you are debugging errors you might + * not care that this is racey, but calling this without all modeset locks held + * is inherently unsafe. */ void drm_state_dump(struct drm_device *dev, struct drm_printer *p) { diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9e1ad493e689..ddd0e3239150 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -918,7 +918,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_check) continue; - ret = funcs->atomic_check(crtc, new_crtc_state); + ret = funcs->atomic_check(crtc, state); if (ret) { DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", crtc->base.id, crtc->name); @@ -1093,7 +1093,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (new_crtc_state->enable && funcs->prepare) funcs->prepare(crtc); else if (funcs->atomic_disable) - funcs->atomic_disable(crtc, old_crtc_state); + funcs->atomic_disable(crtc, old_state); else if (funcs->disable) funcs->disable(crtc); else if (funcs->dpms) @@ -1115,9 +1115,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) * @old_state: atomic state object with old state structures * * This function updates all the various legacy modeset state pointers in - * connectors, encoders and CRTCs. It also updates the timestamping constants - * used for precise vblank timestamps by calling - * drm_calc_timestamping_constants(). + * connectors, encoders and CRTCs. * * Drivers can use this for building their own atomic commit if they don't have * a pure helper-based modeset implementation. @@ -1186,13 +1184,30 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, crtc->x = new_plane_state->src_x >> 16; crtc->y = new_plane_state->src_y >> 16; } + } +} +EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); +/** + * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants + * @state: atomic state object + * + * Updates the timestamping constants used for precise vblank timestamps + * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state. + */ +void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state) +{ + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; + int i; + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->enable) drm_calc_timestamping_constants(crtc, &new_crtc_state->adjusted_mode); } } -EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); +EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants); static void crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) @@ -1276,6 +1291,7 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, disable_outputs(dev, old_state); drm_atomic_helper_update_legacy_modeset_state(dev, old_state); + drm_atomic_helper_calc_timestamping_constants(old_state); crtc_set_mode(dev, old_state); } @@ -1342,7 +1358,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", crtc->base.id, crtc->name); if (funcs->atomic_enable) - funcs->atomic_enable(crtc, old_crtc_state); + funcs->atomic_enable(crtc, old_state); else if (funcs->commit) funcs->commit(crtc); } @@ -1720,8 +1736,11 @@ int drm_atomic_helper_async_check(struct drm_device *dev, * overridden by a previous synchronous update's state. */ if (old_plane_state->commit && - !try_wait_for_completion(&old_plane_state->commit->hw_done)) + !try_wait_for_completion(&old_plane_state->commit->hw_done)) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n", + plane->base.id, plane->name); return -EBUSY; + } return funcs->atomic_async_check(plane, new_plane_state); } @@ -1939,6 +1958,9 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock) * commit with nonblocking ones. */ if (!completed && nonblock) { spin_unlock(&crtc->commit_lock); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n", + crtc->base.id, crtc->name); + return -EBUSY; } } else if (i == 1) { @@ -2113,8 +2135,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, /* Userspace is not allowed to get ahead of the previous * commit with nonblocking ones. */ if (nonblock && old_conn_state->commit && - !try_wait_for_completion(&old_conn_state->commit->flip_done)) + !try_wait_for_completion(&old_conn_state->commit->flip_done)) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n", + conn->base.id, conn->name); + return -EBUSY; + } /* Always track connectors explicitly for e.g. link retraining. */ commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc); @@ -2128,8 +2154,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, /* Userspace is not allowed to get ahead of the previous * commit with nonblocking ones. */ if (nonblock && old_plane_state->commit && - !try_wait_for_completion(&old_plane_state->commit->flip_done)) + !try_wait_for_completion(&old_plane_state->commit->flip_done)) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n", + plane->base.id, plane->name); + return -EBUSY; + } /* Always track planes explicitly for async pageflip support. */ commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc); @@ -2491,7 +2521,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, if (active_only && !new_crtc_state->active) continue; - funcs->atomic_begin(crtc, old_crtc_state); + funcs->atomic_begin(crtc, old_state); } for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { @@ -2549,7 +2579,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, if (active_only && !new_crtc_state->active) continue; - funcs->atomic_flush(crtc, old_crtc_state); + funcs->atomic_flush(crtc, old_state); } } EXPORT_SYMBOL(drm_atomic_helper_commit_planes); @@ -2587,7 +2617,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) crtc_funcs = crtc->helper_private; if (crtc_funcs && crtc_funcs->atomic_begin) - crtc_funcs->atomic_begin(crtc, old_crtc_state); + crtc_funcs->atomic_begin(crtc, old_state); drm_for_each_plane_mask(plane, crtc->dev, plane_mask) { struct drm_plane_state *old_plane_state = @@ -2613,7 +2643,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) } if (crtc_funcs && crtc_funcs->atomic_flush) - crtc_funcs->atomic_flush(crtc, old_crtc_state); + crtc_funcs->atomic_flush(crtc, old_state); } EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 25c269bc4681..ef82009035e6 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -469,6 +469,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, return -EFAULT; set_out_fence_for_crtc(state->state, crtc, fence_ptr); + } else if (property == crtc->scaling_filter_property) { + state->scaling_filter = val; } else if (crtc->funcs->atomic_set_property) { return crtc->funcs->atomic_set_property(crtc, state, property, val); } else { @@ -503,6 +505,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; else if (property == config->prop_out_fence_ptr) *val = 0; + else if (property == crtc->scaling_filter_property) + *val = state->scaling_filter; else if (crtc->funcs->atomic_get_property) return crtc->funcs->atomic_get_property(crtc, state, property, val); else @@ -585,6 +589,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, sizeof(struct drm_rect), &replaced); return ret; + } else if (property == plane->scaling_filter_property) { + state->scaling_filter = val; } else if (plane->funcs->atomic_set_property) { return plane->funcs->atomic_set_property(plane, state, property, val); @@ -643,6 +649,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, } else if (property == config->prop_fb_damage_clips) { *val = (state->fb_damage_clips) ? state->fb_damage_clips->base.id : 0; + } else if (property == plane->scaling_filter_property) { + *val = state->scaling_filter; } else if (plane->funcs->atomic_get_property) { return plane->funcs->atomic_get_property(plane, state, property, val); } else { diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index f1dcad96f341..ae2234aae93d 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -194,6 +194,19 @@ * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). + * + * SCALING_FILTER: + * + * Indicates scaling filter to be used for plane scaler + * + * The value of this property can be one of the following: + * Default: + * Driver's default scaling filter + * Nearest Neighbor: + * Nearest Neighbor scaling filter + * + * Drivers can set up this property for a plane by calling + * drm_plane_create_scaling_filter_property */ /** diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c index a58cbde59c34..791379816837 100644 --- a/drivers/gpu/drm/drm_bridge_connector.c +++ b/drivers/gpu/drm/drm_bridge_connector.c @@ -241,7 +241,7 @@ static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector, goto no_edid; edid = bridge->funcs->get_edid(bridge, connector); - if (!edid || !drm_edid_is_valid(edid)) { + if (!drm_edid_is_valid(edid)) { kfree(edid); goto no_edid; } diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 03e01b000f7a..0fe3c496002a 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st) struct sg_page_iter sg_iter; mb(); /*CLFLUSH is ordered only by using memory barriers*/ - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + for_each_sgtable_page(st, &sg_iter, 0) drm_clflush_page(sg_page_iter_page(&sg_iter)); mb(); /*Make sure that all cache line entry is flushed*/ diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 138ff34b31db..3bcabc2f6e0e 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -97,12 +97,12 @@ * &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They * are set up by calling drm_plane_create_color_properties(). * - * "COLOR_ENCODING" + * "COLOR_ENCODING": * Optional plane enum property to support different non RGB * color encodings. The driver can provide a subset of standard * enum values supported by the DRM plane. * - * "COLOR_RANGE" + * "COLOR_RANGE": * Optional plane enum property to support different non RGB * color parameter ranges. The driver can provide a subset of * standard enum values supported by the DRM plane. diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 3d48ad1c3682..1913d8b4e16a 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -960,6 +960,11 @@ static const struct drm_prop_enum_list dp_colorspaces[] = { * drm_connector_update_edid_property(), usually after having parsed * the EDID using drm_add_edid_modes(). Userspace cannot change this * property. + * + * User-space should not parse the EDID to obtain information exposed via + * other KMS properties (because the kernel might apply limits, quirks or + * fixups to the EDID). For instance, user-space should not try to parse + * mode lists from the EDID. * DPMS: * Legacy property for setting the power state of the connector. For atomic * drivers this is only provided for backwards compatibility with existing @@ -2289,7 +2294,7 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, - const struct list_head *export_list, + const struct list_head *modes, const struct drm_file *file_priv) { /* @@ -2305,15 +2310,17 @@ drm_mode_expose_to_userspace(const struct drm_display_mode *mode, * while preparing the list of user-modes. */ if (!file_priv->aspect_ratio_allowed) { - struct drm_display_mode *mode_itr; + const struct drm_display_mode *mode_itr; - list_for_each_entry(mode_itr, export_list, export_head) - if (drm_mode_match(mode_itr, mode, + list_for_each_entry(mode_itr, modes, head) { + if (mode_itr->expose_to_userspace && + drm_mode_match(mode_itr, mode, DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK | DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS)) return false; + } } return true; @@ -2333,7 +2340,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *encoder_ptr; - LIST_HEAD(export_list); if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; @@ -2377,25 +2383,30 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, out_resp->connection = connector->status; /* delayed so we get modes regardless of pre-fill_modes state */ - list_for_each_entry(mode, &connector->modes, head) - if (drm_mode_expose_to_userspace(mode, &export_list, + list_for_each_entry(mode, &connector->modes, head) { + WARN_ON(mode->expose_to_userspace); + + if (drm_mode_expose_to_userspace(mode, &connector->modes, file_priv)) { - list_add_tail(&mode->export_head, &export_list); + mode->expose_to_userspace = true; mode_count++; } + } /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. - * The modes that need to be exposed to the user are maintained in the - * 'export_list'. When the ioctl is called first time to determine the, - * space, the export_list gets filled, to find the no.of modes. In the - * 2nd time, the user modes are filled, one by one from the export_list. */ if ((out_resp->count_modes >= mode_count) && mode_count) { copied = 0; mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; - list_for_each_entry(mode, &export_list, export_head) { + list_for_each_entry(mode, &connector->modes, head) { + if (!mode->expose_to_userspace) + continue; + + /* Clear the tag for the next time around */ + mode->expose_to_userspace = false; + drm_mode_convert_to_umode(&u_mode, mode); /* * Reset aspect ratio flags of user-mode, if modes with @@ -2406,13 +2417,26 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; + + /* + * Clear the tag for the rest of + * the modes for the next time around. + */ + list_for_each_entry_continue(mode, &connector->modes, head) + mode->expose_to_userspace = false; + mutex_unlock(&dev->mode_config.mutex); goto out; } copied++; } + } else { + /* Clear the tag for the next time around */ + list_for_each_entry(mode, &connector->modes, head) + mode->expose_to_userspace = false; } + out_resp->count_modes = mode_count; mutex_unlock(&dev->mode_config.mutex); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index aecdd7ea26dc..f927976eca50 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -229,6 +229,15 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) * user-space must set this property to 0. * * Setting MODE_ID to 0 will release reserved resources for the CRTC. + * SCALING_FILTER: + * Atomic property for setting the scaling filter for CRTC scaler + * + * The value of this property can be one of the following: + * Default: + * Driver's default scaling filter + * Nearest Neighbor: + * Nearest Neighbor scaling filter + * */ /** @@ -774,3 +783,34 @@ int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, return ret; } + +/** + * drm_crtc_create_scaling_filter_property - create a new scaling filter + * property + * + * @crtc: drm CRTC + * @supported_filters: bitmask of supported scaling filters, must include + * BIT(DRM_SCALING_FILTER_DEFAULT). + * + * This function lets driver to enable the scaling filter property on a given + * CRTC. + * + * RETURNS: + * Zero for success or -errno + */ +int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc, + unsigned int supported_filters) +{ + struct drm_property *prop = + drm_create_scaling_filter_prop(crtc->dev, supported_filters); + + if (IS_ERR(prop)) + return PTR_ERR(prop); + + drm_object_attach_property(&crtc->base, prop, + DRM_SCALING_FILTER_DEFAULT); + crtc->scaling_filter_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property); diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index da96b2f64d7e..54d4cf1233e9 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -72,6 +72,9 @@ int drm_crtc_force_disable(struct drm_crtc *crtc); struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc); +struct drm_property * +drm_create_scaling_filter_prop(struct drm_device *dev, + unsigned int supported_filters); /* IOCTLs */ int drm_mode_getcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index 5d67a41f7c3a..3dd70d813f69 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf, source[len - 1] = '\0'; ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); - if (ret) + if (ret) { + kfree(source); return ret; + } spin_lock_irq(&crc->lock); diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index 2510717d5a08..e25181bf2c48 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index) mutex_lock(&aux_idr_mutex); aux_dev = idr_find(&aux_idr, index); - if (!kref_get_unless_zero(&aux_dev->refcount)) + if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount)) aux_dev = NULL; mutex_unlock(&aux_idr_mutex); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 1e7c638873c8..37ec3b94389c 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -150,11 +150,8 @@ void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) } EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); -void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +static void __drm_dp_link_train_channel_eq_delay(unsigned long rd_interval) { - unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_TRAINING_AUX_RD_MASK; - if (rd_interval > 4) DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n", rd_interval); @@ -166,8 +163,35 @@ void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) usleep_range(rd_interval, rd_interval * 2); } + +void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + __drm_dp_link_train_channel_eq_delay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK); +} EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); +void drm_dp_lttpr_link_train_clock_recovery_delay(void) +{ + usleep_range(100, 200); +} +EXPORT_SYMBOL(drm_dp_lttpr_link_train_clock_recovery_delay); + +static u8 dp_lttpr_phy_cap(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE], int r) +{ + return phy_cap[r - DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1]; +} + +void drm_dp_lttpr_link_train_channel_eq_delay(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE]) +{ + u8 interval = dp_lttpr_phy_cap(phy_cap, + DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) & + DP_TRAINING_AUX_RD_MASK; + + __drm_dp_link_train_channel_eq_delay(interval); +} +EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay); + u8 drm_dp_link_rate_to_bw_code(int link_rate) { /* Spec says link_bw = link_rate / 0.27Gbps */ @@ -364,6 +388,123 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); /** + * drm_dp_dpcd_read_phy_link_status - get the link status information for a DP PHY + * @aux: DisplayPort AUX channel + * @dp_phy: the DP PHY to get the link status for + * @link_status: buffer to return the status in + * + * Fetch the AUX DPCD registers for the DPRX or an LTTPR PHY link status. The + * layout of the returned @link_status matches the DPCD register layout of the + * DPRX PHY link status. + * + * Returns 0 if the information was read successfully or a negative error code + * on failure. + */ +int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, + enum drm_dp_phy dp_phy, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + int ret; + + if (dp_phy == DP_PHY_DPRX) { + ret = drm_dp_dpcd_read(aux, + DP_LANE0_1_STATUS, + link_status, + DP_LINK_STATUS_SIZE); + + if (ret < 0) + return ret; + + WARN_ON(ret != DP_LINK_STATUS_SIZE); + + return 0; + } + + ret = drm_dp_dpcd_read(aux, + DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy), + link_status, + DP_LINK_STATUS_SIZE - 1); + + if (ret < 0) + return ret; + + WARN_ON(ret != DP_LINK_STATUS_SIZE - 1); + + /* Convert the LTTPR to the sink PHY link status layout */ + memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1], + &link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS], + DP_LINK_STATUS_SIZE - (DP_SINK_STATUS - DP_LANE0_1_STATUS) - 1); + link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS] = 0; + + return 0; +} +EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status); + +static bool is_edid_digital_input_dp(const struct edid *edid) +{ + return edid && edid->revision >= 4 && + edid->input & DRM_EDID_INPUT_DIGITAL && + (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_DP; +} + +/** + * drm_dp_downstream_is_type() - is the downstream facing port of certain type? + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * @type: port type to be checked. Can be: + * %DP_DS_PORT_TYPE_DP, %DP_DS_PORT_TYPE_VGA, %DP_DS_PORT_TYPE_DVI, + * %DP_DS_PORT_TYPE_HDMI, %DP_DS_PORT_TYPE_NON_EDID, + * %DP_DS_PORT_TYPE_DP_DUALMODE or %DP_DS_PORT_TYPE_WIRELESS. + * + * Caveat: Only works with DPCD 1.1+ port caps. + * + * Returns: whether the downstream facing port matches the type. + */ +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], u8 type) +{ + return drm_dp_is_branch(dpcd) && + dpcd[DP_DPCD_REV] >= 0x11 && + (port_cap[0] & DP_DS_PORT_TYPE_MASK) == type; +} +EXPORT_SYMBOL(drm_dp_downstream_is_type); + +/** + * drm_dp_downstream_is_tmds() - is the downstream facing port TMDS? + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * @edid: EDID + * + * Returns: whether the downstream facing port is TMDS (HDMI/DVI). + */ +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid) +{ + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_TMDS: + return true; + default: + return false; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP_DUALMODE: + if (is_edid_digital_input_dp(edid)) + return false; + fallthrough; + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_HDMI: + return true; + default: + return false; + } +} +EXPORT_SYMBOL(drm_dp_downstream_is_tmds); + +/** * drm_dp_send_real_edid_checksum() - send back real edid checksum value * @aux: DisplayPort AUX channel * @real_edid_checksum: real edid checksum for the last block @@ -545,79 +686,191 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux, ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); if (ret < 0) return ret; + if (ret != len) + return -EIO; - return ret == len ? 0 : -EIO; + DRM_DEBUG_KMS("%s: DPCD DFP: %*ph\n", + aux->name, len, downstream_ports); + + return 0; } EXPORT_SYMBOL(drm_dp_read_downstream_info); /** - * drm_dp_downstream_max_clock() - extract branch device max - * pixel rate for legacy VGA - * converter or max TMDS clock - * rate for others + * drm_dp_downstream_max_dotclock() - extract downstream facing port max dot clock * @dpcd: DisplayPort configuration data * @port_cap: port capabilities * - * See also: - * drm_dp_read_downstream_info() - * drm_dp_downstream_max_bpc() - * - * Returns: Max clock in kHz on success or 0 if max clock not defined + * Returns: Downstream facing port max dot clock in kHz on success, + * or 0 if max clock not defined */ -int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]) +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) { - int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; - bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DETAILED_CAP_INFO_AVAILABLE; + if (!drm_dp_is_branch(dpcd)) + return 0; - if (!detailed_cap_info) + if (dpcd[DP_DPCD_REV] < 0x11) return 0; - switch (type) { + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { case DP_DS_PORT_TYPE_VGA: - return port_cap[1] * 8 * 1000; - case DP_DS_PORT_TYPE_DVI: - case DP_DS_PORT_TYPE_HDMI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 0; + return port_cap[1] * 8000; + default: + return 0; + } +} +EXPORT_SYMBOL(drm_dp_downstream_max_dotclock); + +/** + * drm_dp_downstream_max_tmds_clock() - extract downstream facing port max TMDS clock + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * @edid: EDID + * + * Returns: HDMI/DVI downstream facing port max TMDS clock in kHz on success, + * or 0 if max TMDS clock not defined + */ +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid) +{ + if (!drm_dp_is_branch(dpcd)) + return 0; + + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_TMDS: + return 165000; + default: + return 0; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { case DP_DS_PORT_TYPE_DP_DUALMODE: + if (is_edid_digital_input_dp(edid)) + return 0; + /* + * It's left up to the driver to check the + * DP dual mode adapter's max TMDS clock. + * + * Unfortunatley it looks like branch devices + * may not fordward that the DP dual mode i2c + * access so we just usually get i2c nak :( + */ + fallthrough; + case DP_DS_PORT_TYPE_HDMI: + /* + * We should perhaps assume 165 MHz when detailed cap + * info is not available. But looks like many typical + * branch devices fall into that category and so we'd + * probably end up with users complaining that they can't + * get high resolution modes with their favorite dongle. + * + * So let's limit to 300 MHz instead since DPCD 1.4 + * HDMI 2.0 DFPs are required to have the detailed cap + * info. So it's more likely we're dealing with a HDMI 1.4 + * compatible* device here. + */ + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 300000; + return port_cap[1] * 2500; + case DP_DS_PORT_TYPE_DVI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 165000; + /* FIXME what to do about DVI dual link? */ return port_cap[1] * 2500; default: return 0; } } -EXPORT_SYMBOL(drm_dp_downstream_max_clock); +EXPORT_SYMBOL(drm_dp_downstream_max_tmds_clock); /** - * drm_dp_downstream_max_bpc() - extract branch device max - * bits per component + * drm_dp_downstream_min_tmds_clock() - extract downstream facing port min TMDS clock * @dpcd: DisplayPort configuration data * @port_cap: port capabilities + * @edid: EDID * - * See also: - * drm_dp_read_downstream_info() - * drm_dp_downstream_max_clock() + * Returns: HDMI/DVI downstream facing port min TMDS clock in kHz on success, + * or 0 if max TMDS clock not defined + */ +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + const struct edid *edid) +{ + if (!drm_dp_is_branch(dpcd)) + return 0; + + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_TMDS: + return 25000; + default: + return 0; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP_DUALMODE: + if (is_edid_digital_input_dp(edid)) + return 0; + fallthrough; + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_HDMI: + /* + * Unclear whether the protocol converter could + * utilize pixel replication. Assume it won't. + */ + return 25000; + default: + return 0; + } +} +EXPORT_SYMBOL(drm_dp_downstream_min_tmds_clock); + +/** + * drm_dp_downstream_max_bpc() - extract downstream facing port max + * bits per component + * @dpcd: DisplayPort configuration data + * @port_cap: downstream facing port capabilities + * @edid: EDID * * Returns: Max bpc on success or 0 if max bpc not defined */ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4]) + const u8 port_cap[4], + const struct edid *edid) { - int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; - bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & - DP_DETAILED_CAP_INFO_AVAILABLE; - int bpc; - - if (!detailed_cap_info) + if (!drm_dp_is_branch(dpcd)) return 0; - switch (type) { - case DP_DS_PORT_TYPE_VGA: - case DP_DS_PORT_TYPE_DVI: - case DP_DS_PORT_TYPE_HDMI: + if (dpcd[DP_DPCD_REV] < 0x11) { + switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) { + case DP_DWN_STRM_PORT_TYPE_DP: + return 0; + default: + return 8; + } + } + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP: + return 0; case DP_DS_PORT_TYPE_DP_DUALMODE: - bpc = port_cap[2] & DP_DS_MAX_BPC_MASK; + if (is_edid_digital_input_dp(edid)) + return 0; + fallthrough; + case DP_DS_PORT_TYPE_HDMI: + case DP_DS_PORT_TYPE_DVI: + case DP_DS_PORT_TYPE_VGA: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return 8; - switch (bpc) { + switch (port_cap[2] & DP_DS_MAX_BPC_MASK) { case DP_DS_8BPC: return 8; case DP_DS_10BPC: @@ -626,15 +879,132 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], return 12; case DP_DS_16BPC: return 16; + default: + return 8; } - fallthrough; + break; default: - return 0; + return 8; } } EXPORT_SYMBOL(drm_dp_downstream_max_bpc); /** + * drm_dp_downstream_420_passthrough() - determine downstream facing port + * YCbCr 4:2:0 pass-through capability + * @dpcd: DisplayPort configuration data + * @port_cap: downstream facing port capabilities + * + * Returns: whether the downstream facing port can pass through YCbCr 4:2:0 + */ +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + if (!drm_dp_is_branch(dpcd)) + return false; + + if (dpcd[DP_DPCD_REV] < 0x13) + return false; + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_DP: + return true; + case DP_DS_PORT_TYPE_HDMI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return false; + + return port_cap[3] & DP_DS_HDMI_YCBCR420_PASS_THROUGH; + default: + return false; + } +} +EXPORT_SYMBOL(drm_dp_downstream_420_passthrough); + +/** + * drm_dp_downstream_444_to_420_conversion() - determine downstream facing port + * YCbCr 4:4:4->4:2:0 conversion capability + * @dpcd: DisplayPort configuration data + * @port_cap: downstream facing port capabilities + * + * Returns: whether the downstream facing port can convert YCbCr 4:4:4 to 4:2:0 + */ +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + if (!drm_dp_is_branch(dpcd)) + return false; + + if (dpcd[DP_DPCD_REV] < 0x13) + return false; + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_HDMI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return false; + + return port_cap[3] & DP_DS_HDMI_YCBCR444_TO_420_CONV; + default: + return false; + } +} +EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion); + +/** + * drm_dp_downstream_mode() - return a mode for downstream facing port + * @dev: DRM device + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * + * Provides a suitable mode for downstream facing ports without EDID. + * + * Returns: A new drm_display_mode on success or NULL on failure + */ +struct drm_display_mode * +drm_dp_downstream_mode(struct drm_device *dev, + const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) + +{ + u8 vic; + + if (!drm_dp_is_branch(dpcd)) + return NULL; + + if (dpcd[DP_DPCD_REV] < 0x11) + return NULL; + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_NON_EDID: + switch (port_cap[0] & DP_DS_NON_EDID_MASK) { + case DP_DS_NON_EDID_720x480i_60: + vic = 6; + break; + case DP_DS_NON_EDID_720x480i_50: + vic = 21; + break; + case DP_DS_NON_EDID_1920x1080i_60: + vic = 5; + break; + case DP_DS_NON_EDID_1920x1080i_50: + vic = 20; + break; + case DP_DS_NON_EDID_1280x720_60: + vic = 4; + break; + case DP_DS_NON_EDID_1280x720_50: + vic = 19; + break; + default: + return NULL; + } + return drm_display_mode_from_cea_vic(dev, vic); + default: + return NULL; + } +} +EXPORT_SYMBOL(drm_dp_downstream_mode); + +/** * drm_dp_downstream_id() - identify branch device * @aux: DisplayPort AUX channel * @id: DisplayPort branch device id @@ -652,12 +1022,15 @@ EXPORT_SYMBOL(drm_dp_downstream_id); * @m: pointer for debugfs file * @dpcd: DisplayPort configuration data * @port_cap: port capabilities + * @edid: EDID * @aux: DisplayPort AUX channel * */ void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE], - const u8 port_cap[4], struct drm_dp_aux *aux) + const u8 port_cap[4], + const struct edid *edid, + struct drm_dp_aux *aux) { bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE; @@ -715,16 +1088,19 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); if (detailed_cap_info) { - clk = drm_dp_downstream_max_clock(dpcd, port_cap); + clk = drm_dp_downstream_max_dotclock(dpcd, port_cap); + if (clk > 0) + seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk); - if (clk > 0) { - if (type == DP_DS_PORT_TYPE_VGA) - seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk); - else - seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk); - } + clk = drm_dp_downstream_max_tmds_clock(dpcd, port_cap, edid); + if (clk > 0) + seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk); - bpc = drm_dp_downstream_max_bpc(dpcd, port_cap); + clk = drm_dp_downstream_min_tmds_clock(dpcd, port_cap, edid); + if (clk > 0) + seq_printf(m, "\t\tMin TMDS clock: %d kHz\n", clk); + + bpc = drm_dp_downstream_max_bpc(dpcd, port_cap, edid); if (bpc > 0) seq_printf(m, "\t\tMax bpc: %d\n", bpc); @@ -734,7 +1110,8 @@ EXPORT_SYMBOL(drm_dp_downstream_debug); /** * drm_dp_subconnector_type() - get DP branch device type - * + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities */ enum drm_mode_subconnector drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -785,6 +1162,10 @@ EXPORT_SYMBOL(drm_dp_subconnector_type); /** * drm_mode_set_dp_subconnector_property - set subconnector for DP connector + * @connector: connector to set property on + * @status: connector status + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities * * Called by a driver on every detect event. */ @@ -1578,6 +1959,7 @@ static const struct edid_quirk edid_quirk_list[] = { { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, + { MFG(0x09, 0xe5), PROD_ID(0xde, 0x08), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) }, }; #undef MFG @@ -1800,6 +2182,153 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs); /** + * drm_dp_read_lttpr_common_caps - read the LTTPR common capabilities + * @aux: DisplayPort AUX channel + * @caps: buffer to return the capability info in + * + * Read capabilities common to all LTTPRs. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux, + u8 caps[DP_LTTPR_COMMON_CAP_SIZE]) +{ + int ret; + + ret = drm_dp_dpcd_read(aux, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + caps, DP_LTTPR_COMMON_CAP_SIZE); + if (ret < 0) + return ret; + + WARN_ON(ret != DP_LTTPR_COMMON_CAP_SIZE); + + return 0; +} +EXPORT_SYMBOL(drm_dp_read_lttpr_common_caps); + +/** + * drm_dp_read_lttpr_phy_caps - read the capabilities for a given LTTPR PHY + * @aux: DisplayPort AUX channel + * @dp_phy: LTTPR PHY to read the capabilities for + * @caps: buffer to return the capability info in + * + * Read the capabilities for the given LTTPR PHY. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux, + enum drm_dp_phy dp_phy, + u8 caps[DP_LTTPR_PHY_CAP_SIZE]) +{ + int ret; + + ret = drm_dp_dpcd_read(aux, + DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy), + caps, DP_LTTPR_PHY_CAP_SIZE); + if (ret < 0) + return ret; + + WARN_ON(ret != DP_LTTPR_PHY_CAP_SIZE); + + return 0; +} +EXPORT_SYMBOL(drm_dp_read_lttpr_phy_caps); + +static u8 dp_lttpr_common_cap(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE], int r) +{ + return caps[r - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; +} + +/** + * drm_dp_lttpr_count - get the number of detected LTTPRs + * @caps: LTTPR common capabilities + * + * Get the number of detected LTTPRs from the LTTPR common capabilities info. + * + * Returns: + * -ERANGE if more than supported number (8) of LTTPRs are detected + * -EINVAL if the DP_PHY_REPEATER_CNT register contains an invalid value + * otherwise the number of detected LTTPRs + */ +int drm_dp_lttpr_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]) +{ + u8 count = dp_lttpr_common_cap(caps, DP_PHY_REPEATER_CNT); + + switch (hweight8(count)) { + case 0: + return 0; + case 1: + return 8 - ilog2(count); + case 8: + return -ERANGE; + default: + return -EINVAL; + } +} +EXPORT_SYMBOL(drm_dp_lttpr_count); + +/** + * drm_dp_lttpr_max_link_rate - get the maximum link rate supported by all LTTPRs + * @caps: LTTPR common capabilities + * + * Returns the maximum link rate supported by all detected LTTPRs. + */ +int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]) +{ + u8 rate = dp_lttpr_common_cap(caps, DP_MAX_LINK_RATE_PHY_REPEATER); + + return drm_dp_bw_code_to_link_rate(rate); +} +EXPORT_SYMBOL(drm_dp_lttpr_max_link_rate); + +/** + * drm_dp_lttpr_max_lane_count - get the maximum lane count supported by all LTTPRs + * @caps: LTTPR common capabilities + * + * Returns the maximum lane count supported by all detected LTTPRs. + */ +int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]) +{ + u8 max_lanes = dp_lttpr_common_cap(caps, DP_MAX_LANE_COUNT_PHY_REPEATER); + + return max_lanes & DP_MAX_LANE_COUNT_MASK; +} +EXPORT_SYMBOL(drm_dp_lttpr_max_lane_count); + +/** + * drm_dp_lttpr_voltage_swing_level_3_supported - check for LTTPR vswing3 support + * @caps: LTTPR PHY capabilities + * + * Returns true if the @caps for an LTTPR TX PHY indicate support for + * voltage swing level 3. + */ +bool +drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]) +{ + u8 txcap = dp_lttpr_phy_cap(caps, DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1); + + return txcap & DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED; +} +EXPORT_SYMBOL(drm_dp_lttpr_voltage_swing_level_3_supported); + +/** + * drm_dp_lttpr_pre_emphasis_level_3_supported - check for LTTPR preemph3 support + * @caps: LTTPR PHY capabilities + * + * Returns true if the @caps for an LTTPR TX PHY indicate support for + * pre-emphasis level 3. + */ +bool +drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]) +{ + u8 txcap = dp_lttpr_phy_cap(caps, DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1); + + return txcap & DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED; +} +EXPORT_SYMBOL(drm_dp_lttpr_pre_emphasis_level_3_supported); + +/** * drm_dp_get_phy_test_pattern() - get the requested pattern from the sink. * @aux: DisplayPort AUX channel * @data: DP phy compliance test parameters. diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 17dbed0a9800..153b6065ba29 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -20,11 +20,13 @@ * OF THIS SOFTWARE. */ +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/random.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/iopoll.h> @@ -423,6 +425,22 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); idx += req->u.i2c_write.num_bytes; break; + case DP_QUERY_STREAM_ENC_STATUS: { + const struct drm_dp_query_stream_enc_status *msg; + + msg = &req->u.enc_status; + buf[idx] = msg->stream_id; + idx++; + memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id)); + idx += sizeof(msg->client_id); + buf[idx] = 0; + buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event); + buf[idx] |= msg->valid_stream_event ? BIT(2) : 0; + buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior); + buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0; + idx++; + } + break; } raw->cur_len = idx; } @@ -551,6 +569,20 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, return -ENOMEM; } break; + case DP_QUERY_STREAM_ENC_STATUS: + req->u.enc_status.stream_id = buf[idx++]; + for (i = 0; i < sizeof(req->u.enc_status.client_id); i++) + req->u.enc_status.client_id[i] = buf[idx++]; + + req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0), + buf[idx]); + req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2), + buf[idx]); + req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3), + buf[idx]); + req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5), + buf[idx]); + break; } return 0; @@ -629,6 +661,16 @@ drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, req->u.i2c_write.bytes); break; + case DP_QUERY_STREAM_ENC_STATUS: + P("stream_id=%u client_id=%*ph stream_event=%x " + "valid_event=%d stream_behavior=%x valid_behavior=%d", + req->u.enc_status.stream_id, + (int)ARRAY_SIZE(req->u.enc_status.client_id), + req->u.enc_status.client_id, req->u.enc_status.stream_event, + req->u.enc_status.valid_stream_event, + req->u.enc_status.stream_behavior, + req->u.enc_status.valid_stream_behavior); + break; default: P("???\n"); break; @@ -936,6 +978,42 @@ static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_ms return true; } +static bool +drm_dp_sideband_parse_query_stream_enc_status( + struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + struct drm_dp_query_stream_enc_status_ack_reply *reply; + + reply = &repmsg->u.enc_status; + + reply->stream_id = raw->msg[3]; + + reply->reply_signed = raw->msg[2] & BIT(0); + + /* + * NOTE: It's my impression from reading the spec that the below parsing + * is correct. However I noticed while testing with an HDCP 1.4 display + * through an HDCP 2.2 hub that only bit 3 was set. In that case, I + * would expect both bits to be set. So keep the parsing following the + * spec, but beware reality might not match the spec (at least for some + * configurations). + */ + reply->hdcp_1x_device_present = raw->msg[2] & BIT(4); + reply->hdcp_2x_device_present = raw->msg[2] & BIT(3); + + reply->query_capable_device_present = raw->msg[2] & BIT(5); + reply->legacy_device_present = raw->msg[2] & BIT(6); + reply->unauthorizable_device_present = raw->msg[2] & BIT(7); + + reply->auth_completed = !!(raw->msg[1] & BIT(3)); + reply->encryption_enabled = !!(raw->msg[1] & BIT(4)); + reply->repeater_present = !!(raw->msg[1] & BIT(5)); + reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6; + + return true; +} + static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, struct drm_dp_sideband_msg_reply_body *msg) { @@ -961,6 +1039,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); case DP_REMOTE_I2C_READ: return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); + case DP_REMOTE_I2C_WRITE: + return true; /* since there's nothing to parse */ case DP_ENUM_PATH_RESOURCES: return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); case DP_ALLOCATE_PAYLOAD: @@ -970,6 +1050,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); case DP_CLEAR_PAYLOAD_ID_TABLE: return true; /* since there's nothing to parse */ + case DP_QUERY_STREAM_ENC_STATUS: + return drm_dp_sideband_parse_query_stream_enc_status(raw, msg); default: DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); @@ -1121,6 +1203,25 @@ static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg, msg->path_msg = true; } +static int +build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id, + u8 *q_id) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_QUERY_STREAM_ENC_STATUS; + req.u.enc_status.stream_id = stream_id; + memcpy(req.u.enc_status.client_id, q_id, + sizeof(req.u.enc_status.client_id)); + req.u.enc_status.stream_event = 0; + req.u.enc_status.valid_stream_event = false; + req.u.enc_status.stream_behavior = 0; + req.u.enc_status.valid_stream_behavior = false; + + drm_dp_encode_sideband_req(&req, msg); + return 0; +} + static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_vcpi *vcpi) { @@ -3153,6 +3254,57 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, } EXPORT_SYMBOL(drm_dp_send_power_updown_phy); +int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_query_stream_enc_status_ack_reply *status) +{ + struct drm_dp_sideband_msg_tx *txmsg; + u8 nonce[7]; + int len, ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) { + ret = -EINVAL; + goto out_get_port; + } + + get_random_bytes(nonce, sizeof(nonce)); + + /* + * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message + * transaction at the MST Branch device directly connected to the + * Source" + */ + txmsg->dst = mgr->mst_primary; + + len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg); + if (ret < 0) { + goto out; + } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + drm_dbg_kms(mgr->dev, "query encryption status nak received\n"); + ret = -ENXIO; + goto out; + } + + ret = 0; + memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); + +out: + drm_dp_mst_topology_put_port(port); +out_get_port: + kfree(txmsg); + return ret; +} +EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status); + static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, int id, struct drm_dp_payload *payload) @@ -3534,9 +3686,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms WARN_ON(mgr->mst_primary); /* get dpcd info */ - ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); - if (ret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("failed to read DPCD\n"); + ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); + if (ret < 0) { + drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", + mgr->aux->name, ret); goto out_unlock; } @@ -5349,29 +5502,29 @@ static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num) msgs[num - 1].len <= 0xff; } -/* I2C device */ -static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, - int num) +static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num) +{ + int i; + + for (i = 0; i < num - 1; i++) { + if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) || + msgs[i].len > 0xff) + return false; + } + + return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff; +} + +static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port, + struct i2c_msg *msgs, int num) { - struct drm_dp_aux *aux = adapter->algo_data; - struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); - struct drm_dp_mst_branch *mstb; struct drm_dp_mst_topology_mgr *mgr = port->mgr; unsigned int i; struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_tx *txmsg = NULL; int ret; - mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); - if (!mstb) - return -EREMOTEIO; - - if (!remote_i2c_read_ok(msgs, num)) { - DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); - ret = -EIO; - goto out; - } - memset(&msg, 0, sizeof(msg)); msg.req_type = DP_REMOTE_I2C_READ; msg.u.i2c_read.num_transactions = num - 1; @@ -5412,6 +5565,78 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs } out: kfree(txmsg); + return ret; +} + +static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port, + struct i2c_msg *msgs, int num) +{ + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + unsigned int i; + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_sideband_msg_tx *txmsg = NULL; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < num; i++) { + memset(&msg, 0, sizeof(msg)); + msg.req_type = DP_REMOTE_I2C_WRITE; + msg.u.i2c_write.port_number = port->port_num; + msg.u.i2c_write.write_i2c_device_id = msgs[i].addr; + msg.u.i2c_write.num_bytes = msgs[i].len; + msg.u.i2c_write.bytes = msgs[i].buf; + + memset(txmsg, 0, sizeof(*txmsg)); + txmsg->dst = mstb; + + drm_dp_encode_sideband_req(&msg, txmsg); + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + ret = -EREMOTEIO; + goto out; + } + } else { + goto out; + } + } + ret = num; +out: + kfree(txmsg); + return ret; +} + +/* I2C device */ +static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + struct drm_dp_mst_port *port = + container_of(aux, struct drm_dp_mst_port, aux); + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + int ret; + + mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); + if (!mstb) + return -EREMOTEIO; + + if (remote_i2c_read_ok(msgs, num)) { + ret = drm_dp_mst_i2c_read(mstb, port, msgs, num); + } else if (remote_i2c_write_ok(msgs, num)) { + ret = drm_dp_mst_i2c_write(mstb, port, msgs, num); + } else { + DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); + ret = -EIO; + } + drm_dp_mst_topology_put_mstb(mstb); return ret; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 13068fdf4331..cd162d406078 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -240,13 +240,13 @@ void drm_minor_release(struct drm_minor *minor) * DOC: driver instance overview * * A device instance for a drm driver is represented by &struct drm_device. This - * is initialized with drm_dev_init(), usually from bus-specific ->probe() - * callbacks implemented by the driver. The driver then needs to initialize all - * the various subsystems for the drm device like memory management, vblank - * handling, modesetting support and intial output configuration plus obviously - * initialize all the corresponding hardware bits. Finally when everything is up - * and running and ready for userspace the device instance can be published - * using drm_dev_register(). + * is allocated and initialized with devm_drm_dev_alloc(), usually from + * bus-specific ->probe() callbacks implemented by the driver. The driver then + * needs to initialize all the various subsystems for the drm device like memory + * management, vblank handling, modesetting support and initial output + * configuration plus obviously initialize all the corresponding hardware bits. + * Finally when everything is up and running and ready for userspace the device + * instance can be published using drm_dev_register(). * * There is also deprecated support for initalizing device instances using * bus-specific helpers and the &drm_driver.load callback. But due to @@ -274,7 +274,7 @@ void drm_minor_release(struct drm_minor *minor) * * The following example shows a typical structure of a DRM display driver. * The example focus on the probe() function and the other functions that is - * almost always present and serves as a demonstration of devm_drm_dev_init(). + * almost always present and serves as a demonstration of devm_drm_dev_alloc(). * * .. code-block:: c * @@ -294,22 +294,12 @@ void drm_minor_release(struct drm_minor *minor) * struct drm_device *drm; * int ret; * - * // devm_kzalloc() can't be used here because the drm_device ' - * // lifetime can exceed the device lifetime if driver unbind - * // happens when userspace still has open file descriptors. - * priv = kzalloc(sizeof(*priv), GFP_KERNEL); - * if (!priv) - * return -ENOMEM; - * + * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, + * struct driver_device, drm); + * if (IS_ERR(priv)) + * return PTR_ERR(priv); * drm = &priv->drm; * - * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver); - * if (ret) { - * kfree(priv); - * return ret; - * } - * drmm_add_final_kfree(drm, priv); - * * ret = drmm_mode_config_init(drm); * if (ret) * return ret; @@ -550,9 +540,9 @@ static void drm_fs_inode_free(struct inode *inode) * following guidelines apply: * * - The entire device initialization procedure should be run from the - * &component_master_ops.master_bind callback, starting with drm_dev_init(), - * then binding all components with component_bind_all() and finishing with - * drm_dev_register(). + * &component_master_ops.master_bind callback, starting with + * devm_drm_dev_alloc(), then binding all components with + * component_bind_all() and finishing with drm_dev_register(). * * - The opaque pointer passed to all components through component_bind_all() * should point at &struct drm_device of the device instance, not some driver @@ -583,43 +573,9 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) drm_legacy_destroy_members(dev); } -/** - * drm_dev_init - Initialise new DRM device - * @dev: DRM device - * @driver: DRM driver - * @parent: Parent device object - * - * Initialize a new DRM device. No device registration is done. - * Call drm_dev_register() to advertice the device to user space and register it - * with other core subsystems. This should be done last in the device - * initialization sequence to make sure userspace can't access an inconsistent - * state. - * - * The initial ref-count of the object is 1. Use drm_dev_get() and - * drm_dev_put() to take and drop further ref-counts. - * - * It is recommended that drivers embed &struct drm_device into their own device - * structure. - * - * Drivers that do not want to allocate their own device struct - * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers - * that do embed &struct drm_device it must be placed first in the overall - * structure, and the overall structure must be allocated using kmalloc(): The - * drm core's release function unconditionally calls kfree() on the @dev pointer - * when the final reference is released. To override this behaviour, and so - * allow embedding of the drm_device inside the driver's device struct at an - * arbitrary offset, you must supply a &drm_driver.release callback and control - * the finalization explicitly. - * - * Note that drivers must call drmm_add_final_kfree() after this function has - * completed successfully. - * - * RETURNS: - * 0 on success, or error code on failure. - */ -int drm_dev_init(struct drm_device *dev, - struct drm_driver *driver, - struct device *parent) +static int drm_dev_init(struct drm_device *dev, + struct drm_driver *driver, + struct device *parent) { int ret; @@ -699,31 +655,15 @@ err: return ret; } -EXPORT_SYMBOL(drm_dev_init); static void devm_drm_dev_init_release(void *data) { drm_dev_put(data); } -/** - * devm_drm_dev_init - Resource managed drm_dev_init() - * @parent: Parent device object - * @dev: DRM device - * @driver: DRM driver - * - * Managed drm_dev_init(). The DRM device initialized with this function is - * automatically put on driver detach using drm_dev_put(). - * - * Note that drivers must call drmm_add_final_kfree() after this function has - * completed successfully. - * - * RETURNS: - * 0 on success, or error code on failure. - */ -int devm_drm_dev_init(struct device *parent, - struct drm_device *dev, - struct drm_driver *driver) +static int devm_drm_dev_init(struct device *parent, + struct drm_device *dev, + struct drm_driver *driver) { int ret; @@ -737,7 +677,6 @@ int devm_drm_dev_init(struct device *parent, return ret; } -EXPORT_SYMBOL(devm_drm_dev_init); void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, size_t size, size_t offset) @@ -767,19 +706,9 @@ EXPORT_SYMBOL(__devm_drm_dev_alloc); * @driver: DRM driver to allocate device for * @parent: Parent device object * - * Allocate and initialize a new DRM device. No device registration is done. - * Call drm_dev_register() to advertice the device to user space and register it - * with other core subsystems. This should be done last in the device - * initialization sequence to make sure userspace can't access an inconsistent - * state. - * - * The initial ref-count of the object is 1. Use drm_dev_get() and - * drm_dev_put() to take and drop further ref-counts. - * - * Note that for purely virtual devices @parent can be NULL. - * - * Drivers that wish to subclass or embed &struct drm_device into their - * own struct should look at using drm_dev_init() instead. + * This is the deprecated version of devm_drm_dev_alloc(), which does not support + * subclassing through embedding the struct &drm_device in a driver private + * structure, and which does not support automatic cleanup through devres. * * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6840f0530a38..c7363af731b4 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1844,7 +1844,7 @@ static void connector_bad_edid(struct drm_connector *connector, if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) return; - drm_warn(connector->dev, "%s: EDID is invalid:\n", connector->name); + drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name); for (i = 0; i < num_blocks; i++) { u8 *block = edid + i * EDID_LENGTH; char prefix[20]; @@ -1856,7 +1856,7 @@ static void connector_bad_edid(struct drm_connector *connector, else sprintf(prefix, "\t[%02x] GOOD ", i); - print_hex_dump(KERN_WARNING, + print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, 16, 1, block, EDID_LENGTH, false); } @@ -3738,6 +3738,34 @@ drm_add_cmdb_modes(struct drm_connector *connector, u8 svd) bitmap_set(hdmi->y420_cmdb_modes, vic, 1); } +/** + * drm_display_mode_from_cea_vic() - return a mode for CEA VIC + * @dev: DRM device + * @video_code: CEA VIC of the mode + * + * Creates a new mode matching the specified CEA VIC. + * + * Returns: A new drm_display_mode on success or NULL on failure + */ +struct drm_display_mode * +drm_display_mode_from_cea_vic(struct drm_device *dev, + u8 video_code) +{ + const struct drm_display_mode *cea_mode; + struct drm_display_mode *newmode; + + cea_mode = cea_mode_for_vic(video_code); + if (!cea_mode) + return NULL; + + newmode = drm_mode_duplicate(dev, cea_mode); + if (!newmode) + return NULL; + + return newmode; +} +EXPORT_SYMBOL(drm_display_mode_from_cea_vic); + static int do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8697554ccd41..92e0db30fdf7 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -281,18 +281,12 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); #ifdef CONFIG_MAGIC_SYSRQ -/* - * restore fbcon display for all kms driver's using this helper, used for sysrq - * and panic handling. - */ -static bool drm_fb_helper_force_kernel_mode(void) +/* emergency restore, don't bother with error reporting */ +static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) { - bool ret, error = false; struct drm_fb_helper *helper; - if (list_empty(&kernel_fb_helper_list)) - return false; - + mutex_lock(&kernel_fb_helper_lock); list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { struct drm_device *dev = helper->dev; @@ -300,22 +294,12 @@ static bool drm_fb_helper_force_kernel_mode(void) continue; mutex_lock(&helper->lock); - ret = drm_client_modeset_commit_locked(&helper->client); - if (ret) - error = true; + drm_client_modeset_commit_locked(&helper->client); mutex_unlock(&helper->lock); } - return error; + mutex_unlock(&kernel_fb_helper_lock); } -static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) -{ - bool ret; - - ret = drm_fb_helper_force_kernel_mode(); - if (ret == true) - DRM_ERROR("Failed to restore crtc configuration\n"); -} static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); static void drm_fb_helper_sysrq(int dummy1) @@ -325,7 +309,7 @@ static void drm_fb_helper_sysrq(int dummy1) static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { .handler = drm_fb_helper_sysrq, - .help_msg = "force-fb(V)", + .help_msg = "force-fb(v)", .action_msg = "Restore framebuffer console", }; #else diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 722c7ebe4e88..03262472059c 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -202,6 +202,7 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index df656366a530..2f5b0c2bb0fe 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -176,8 +176,7 @@ static int framebuffer_check(struct drm_device *dev, int i; /* check if the format is supported at all */ - info = __drm_format_info(r->pixel_format); - if (!info) { + if (!__drm_format_info(r->pixel_format)) { struct drm_format_name_buf format_name; DRM_DEBUG_KMS("bad framebuffer format %s\n", @@ -186,9 +185,6 @@ static int framebuffer_check(struct drm_device *dev, return -EINVAL; } - /* now let the driver pick its own format info */ - info = drm_get_format_info(dev, r); - if (r->width == 0) { DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); return -EINVAL; @@ -199,6 +195,9 @@ static int framebuffer_check(struct drm_device *dev, return -EINVAL; } + /* now let the driver pick its own format info */ + info = drm_get_format_info(dev, r); + for (i = 0; i < info->num_planes; i++) { unsigned int width = fb_plane_width(r->width, info, i); unsigned int height = fb_plane_height(r->height, info, i); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 19d73868490e..d586068f5509 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -247,12 +247,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) { struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; - struct drm_device *dev = obj->dev; - if (obj->funcs && obj->funcs->close) + if (obj->funcs->close) obj->funcs->close(obj, file_priv); - else if (dev->driver->gem_close_object) - dev->driver->gem_close_object(obj, file_priv); drm_gem_remove_prime_handles(obj, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv); @@ -403,14 +400,10 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, if (ret) goto err_remove; - if (obj->funcs && obj->funcs->open) { + if (obj->funcs->open) { ret = obj->funcs->open(obj, file_priv); if (ret) goto err_revoke; - } else if (dev->driver->gem_open_object) { - ret = dev->driver->gem_open_object(obj, file_priv); - if (ret) - goto err_revoke; } *handlep = handle; @@ -982,12 +975,11 @@ drm_gem_object_free(struct kref *kref) { struct drm_gem_object *obj = container_of(kref, struct drm_gem_object, refcount); - struct drm_device *dev = obj->dev; - if (obj->funcs) - obj->funcs->free(obj); - else if (dev->driver->gem_free_object_unlocked) - dev->driver->gem_free_object_unlocked(obj); + if (WARN_ON(!obj->funcs->free)) + return; + + obj->funcs->free(obj); } EXPORT_SYMBOL(drm_gem_object_free); @@ -1049,9 +1041,9 @@ EXPORT_SYMBOL(drm_gem_vm_close); * @obj_size: the object size to be mapped, in bytes * @vma: VMA for the area to be mapped * - * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops - * provided by the driver. Depending on their requirements, drivers can either - * provide a fault handler in their gem_vm_ops (in which case any accesses to + * Set up the VMA to prepare mapping of the GEM object using the GEM object's + * vm_ops. Depending on their requirements, GEM objects can either + * provide a fault handler in their vm_ops (in which case any accesses to * the object will be trapped, to perform migration, GTT binding, surface * register allocation, or performance monitoring), or mmap the buffer memory * synchronously after calling drm_gem_mmap_obj. @@ -1065,12 +1057,11 @@ EXPORT_SYMBOL(drm_gem_vm_close); * callers must verify access restrictions before calling this helper. * * Return 0 or success or -EINVAL if the object size is smaller than the VMA - * size, or if no gem_vm_ops are provided. + * size, or if no vm_ops are provided. */ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma) { - struct drm_device *dev = obj->dev; int ret; /* Check for valid size. */ @@ -1085,7 +1076,9 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, */ drm_gem_object_get(obj); - if (obj->funcs && obj->funcs->mmap) { + vma->vm_private_data = obj; + + if (obj->funcs->mmap) { ret = obj->funcs->mmap(obj, vma); if (ret) { drm_gem_object_put(obj); @@ -1093,10 +1086,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, } WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); } else { - if (obj->funcs && obj->funcs->vm_ops) + if (obj->funcs->vm_ops) vma->vm_ops = obj->funcs->vm_ops; - else if (dev->driver->gem_vm_ops) - vma->vm_ops = dev->driver->gem_vm_ops; else { drm_gem_object_put(obj); return -EINVAL; @@ -1107,8 +1098,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); } - vma->vm_private_data = obj; - return 0; } EXPORT_SYMBOL(drm_gem_mmap_obj); @@ -1198,36 +1187,30 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, drm_printf_indent(p, indent, "imported=%s\n", obj->import_attach ? "yes" : "no"); - if (obj->funcs && obj->funcs->print_info) + if (obj->funcs->print_info) obj->funcs->print_info(p, indent, obj); } int drm_gem_pin(struct drm_gem_object *obj) { - if (obj->funcs && obj->funcs->pin) + if (obj->funcs->pin) return obj->funcs->pin(obj); - else if (obj->dev->driver->gem_prime_pin) - return obj->dev->driver->gem_prime_pin(obj); else return 0; } void drm_gem_unpin(struct drm_gem_object *obj) { - if (obj->funcs && obj->funcs->unpin) + if (obj->funcs->unpin) obj->funcs->unpin(obj); - else if (obj->dev->driver->gem_prime_unpin) - obj->dev->driver->gem_prime_unpin(obj); } void *drm_gem_vmap(struct drm_gem_object *obj) { void *vaddr; - if (obj->funcs && obj->funcs->vmap) + if (obj->funcs->vmap) vaddr = obj->funcs->vmap(obj); - else if (obj->dev->driver->gem_prime_vmap) - vaddr = obj->dev->driver->gem_prime_vmap(obj); else vaddr = ERR_PTR(-EOPNOTSUPP); @@ -1242,10 +1225,8 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) if (!vaddr) return; - if (obj->funcs && obj->funcs->vunmap) + if (obj->funcs->vunmap) obj->funcs->vunmap(obj, vaddr); - else if (obj->dev->driver->gem_prime_vunmap) - obj->dev->driver->gem_prime_vunmap(obj, vaddr); } /** diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 822edeadbab3..2165633c9b9e 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -171,17 +171,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, * GEM object state and frees the memory used to store the object itself. * If the buffer is imported and the virtual address is set, it is released. * Drivers using the CMA helpers should set this as their - * &drm_driver.gem_free_object_unlocked callback. + * &drm_gem_object_funcs.free callback. */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) { - struct drm_gem_cma_object *cma_obj; - - cma_obj = to_drm_gem_cma_obj(gem_obj); + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj); + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr); if (gem_obj->import_attach) { if (cma_obj->vaddr) - dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr); + dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, cma_obj->sgt); } else if (cma_obj->vaddr) { dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, @@ -419,7 +418,7 @@ EXPORT_SYMBOL(drm_gem_cma_print_info); * * This function exports a scatter/gather table suitable for PRIME usage by * calling the standard DMA mapping API. Drivers using the CMA helpers should - * set this as their &drm_driver.gem_prime_get_sg_table callback. + * set this as their &drm_gem_object_funcs.get_sg_table callback. * * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. @@ -471,26 +470,9 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, { struct drm_gem_cma_object *cma_obj; - if (sgt->nents != 1) { - /* check if the entries in the sg_table are contiguous */ - dma_addr_t next_addr = sg_dma_address(sgt->sgl); - struct scatterlist *s; - unsigned int i; - - for_each_sg(sgt->sgl, s, sgt->nents, i) { - /* - * sg_dma_address(s) is only valid for entries - * that have sg_dma_len(s) != 0 - */ - if (!sg_dma_len(s)) - continue; - - if (sg_dma_address(s) != next_addr) - return ERR_PTR(-EINVAL); - - next_addr = sg_dma_address(s) + sg_dma_len(s); - } - } + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) + return ERR_PTR(-EINVAL); /* Create a CMA GEM buffer. */ cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size); @@ -542,7 +524,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); * virtual address space. Since the CMA buffers are already mapped into the * kernel virtual address space this simply returns the cached virtual * address. Drivers using the CMA helpers should set this as their DRM - * driver's &drm_driver.gem_prime_vmap callback. + * driver's &drm_gem_object_funcs.vmap callback. * * Returns: * The kernel virtual address of the CMA GEM object's backing store. @@ -564,7 +546,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); * This function removes a buffer exported via DRM PRIME from the kernel's * virtual address space. This is a no-op because CMA buffers cannot be * unmapped from kernel space. Drivers using the CMA helpers should set this - * as their &drm_driver.gem_prime_vunmap callback. + * as their &drm_gem_object_funcs.vunmap callback. */ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { @@ -634,22 +616,23 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev, { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *obj; - void *vaddr; + struct dma_buf_map map; + int ret; - vaddr = dma_buf_vmap(attach->dmabuf); - if (!vaddr) { + ret = dma_buf_vmap(attach->dmabuf, &map); + if (ret) { DRM_ERROR("Failed to vmap PRIME buffer\n"); - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) { - dma_buf_vunmap(attach->dmabuf, vaddr); + dma_buf_vunmap(attach->dmabuf, &map); return obj; } cma_obj = to_drm_gem_cma_obj(obj); - cma_obj->vaddr = vaddr; + cma_obj->vaddr = map.vaddr; return obj; } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 4b7cfbac4daa..8233bda4692f 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, shmem->sgt); } else { if (shmem->sgt) { - dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, - shmem->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(obj->dev->dev, shmem->sgt, + DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); } @@ -261,13 +261,16 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin); static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - int ret; + struct dma_buf_map map; + int ret = 0; if (shmem->vmap_use_count++ > 0) return shmem->vaddr; if (obj->import_attach) { - shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); + ret = dma_buf_vmap(obj->import_attach->dmabuf, &map); + if (!ret) + shmem->vaddr = map.vaddr; } else { pgprot_t prot = PAGE_KERNEL; @@ -279,11 +282,12 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot); + if (!shmem->vaddr) + ret = -ENOMEM; } - if (!shmem->vaddr) { - DRM_DEBUG_KMS("Failed to vmap pages\n"); - ret = -ENOMEM; + if (ret) { + DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); goto err_put_pages; } @@ -333,6 +337,7 @@ EXPORT_SYMBOL(drm_gem_shmem_vmap); static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr); if (WARN_ON_ONCE(!shmem->vmap_use_count)) return; @@ -341,7 +346,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) return; if (obj->import_attach) - dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); + dma_buf_vunmap(obj->import_attach->dmabuf, &map); else vunmap(shmem->vaddr); @@ -424,8 +429,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); - dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, - shmem->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); shmem->sgt = NULL; @@ -594,8 +598,13 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) /* Remove the fake offset */ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); - if (obj->import_attach) + if (obj->import_attach) { + /* Drop the reference drm_gem_mmap_obj() acquired.*/ + drm_gem_object_put(obj); + vma->vm_private_data = NULL; + return dma_buf_mmap(obj->dma_buf, vma, 0); + } shmem = to_drm_gem_shmem_obj(obj); @@ -656,7 +665,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) WARN_ON(shmem->base.import_attach); - return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); + return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); @@ -697,12 +706,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) goto err_put_pages; } /* Map the pages for use by the h/w. */ - dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) + goto err_free_sgt; shmem->sgt = sgt; return sgt; +err_free_sgt: + sg_free_table(sgt); + kfree(sgt); err_put_pages: drm_gem_shmem_put_pages(shmem); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 892b2288a104..0e4fb9ba43ad 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -43,12 +43,9 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname)); drm_printf(p, "\n"); - if (bo->mem.bus.is_iomem) { - drm_printf_indent(p, indent, "bus.base=%lx\n", - (unsigned long)bo->mem.bus.base); + if (bo->mem.bus.is_iomem) drm_printf_indent(p, indent, "bus.offset=%lx\n", (unsigned long)bo->mem.bus.offset); - } } EXPORT_SYMBOL(drm_gem_ttm_print_info); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 545a877406f4..16d68c04ea5d 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -15,7 +15,6 @@ #include <drm/drm_plane.h> #include <drm/drm_prime.h> #include <drm/drm_simple_kms_helper.h> -#include <drm/ttm/ttm_page_alloc.h> static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; @@ -97,8 +96,8 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; * hardware's draing engine. * * To access a buffer object's memory from the DRM driver, call - * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address - * space and returns the memory address. Use drm_gem_vram_kunmap() to + * drm_gem_vram_vmap(). It maps the buffer into kernel address + * space and returns the memory address. Use drm_gem_vram_vunmap() to * release the mapping. */ @@ -135,28 +134,25 @@ static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo) static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, unsigned long pl_flag) { + u32 invariant_flags = 0; unsigned int i; unsigned int c = 0; - u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN; + + if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN) + invariant_flags = TTM_PL_FLAG_TOPDOWN; gbo->placement.placement = gbo->placements; gbo->placement.busy_placement = gbo->placements; - if (pl_flag & TTM_PL_FLAG_VRAM) - gbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM | - invariant_flags; - - if (pl_flag & TTM_PL_FLAG_SYSTEM) - gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM | - invariant_flags; + if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) { + gbo->placements[c].mem_type = TTM_PL_VRAM; + gbo->placements[c++].flags = invariant_flags; + } - if (!c) - gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM | - invariant_flags; + if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) { + gbo->placements[c].mem_type = TTM_PL_SYSTEM; + gbo->placements[c++].flags = invariant_flags; + } gbo->placement.num_placement = c; gbo->placement.num_busy_placement = c; @@ -167,49 +163,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, } } -static int drm_gem_vram_init(struct drm_device *dev, - struct drm_gem_vram_object *gbo, - size_t size, unsigned long pg_align) -{ - struct drm_vram_mm *vmm = dev->vram_mm; - struct ttm_bo_device *bdev; - int ret; - size_t acc_size; - - if (WARN_ONCE(!vmm, "VRAM MM not initialized")) - return -EINVAL; - bdev = &vmm->bdev; - - gbo->bo.base.funcs = &drm_gem_vram_object_funcs; - - ret = drm_gem_object_init(dev, &gbo->bo.base, size); - if (ret) - return ret; - - acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); - - gbo->bo.bdev = bdev; - drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); - - ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, - &gbo->placement, pg_align, false, acc_size, - NULL, NULL, ttm_buffer_object_destroy); - if (ret) - goto err_drm_gem_object_release; - - return 0; - -err_drm_gem_object_release: - drm_gem_object_release(&gbo->bo.base); - return ret; -} - /** * drm_gem_vram_create() - Creates a VRAM-backed GEM object * @dev: the DRM device * @size: the buffer size in bytes * @pg_align: the buffer's alignment in multiples of the page size * + * GEM objects are allocated by calling struct drm_driver.gem_create_object, + * if set. Otherwise kzalloc() will be used. Drivers can set their own GEM + * object functions in struct drm_driver.gem_create_object. If no functions + * are set, the new GEM object will use the default functions from GEM VRAM + * helpers. + * * Returns: * A new instance of &struct drm_gem_vram_object on success, or * an ERR_PTR()-encoded error code otherwise. @@ -219,11 +184,17 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, unsigned long pg_align) { struct drm_gem_vram_object *gbo; + struct drm_gem_object *gem; + struct drm_vram_mm *vmm = dev->vram_mm; + struct ttm_bo_device *bdev; int ret; + size_t acc_size; + + if (WARN_ONCE(!vmm, "VRAM MM not initialized")) + return ERR_PTR(-EINVAL); if (dev->driver->gem_create_object) { - struct drm_gem_object *gem = - dev->driver->gem_create_object(dev, size); + gem = dev->driver->gem_create_object(dev, size); if (!gem) return ERR_PTR(-ENOMEM); gbo = drm_gem_vram_of_gem(gem); @@ -231,17 +202,35 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); if (!gbo) return ERR_PTR(-ENOMEM); + gem = &gbo->bo.base; } - ret = drm_gem_vram_init(dev, gbo, size, pg_align); - if (ret < 0) - goto err_kfree; + if (!gem->funcs) + gem->funcs = &drm_gem_vram_object_funcs; - return gbo; + ret = drm_gem_object_init(dev, gem, size); + if (ret) { + kfree(gbo); + return ERR_PTR(ret); + } -err_kfree: - kfree(gbo); - return ERR_PTR(ret); + bdev = &vmm->bdev; + acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); + + gbo->bo.bdev = bdev; + drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); + + /* + * A failing ttm_bo_init will call ttm_buffer_object_destroy + * to release gbo->bo.base and kfree gbo. + */ + ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, + &gbo->placement, pg_align, false, acc_size, + NULL, NULL, ttm_buffer_object_destroy); + if (ret) + return ERR_PTR(ret); + + return gbo; } EXPORT_SYMBOL(drm_gem_vram_create); @@ -296,7 +285,7 @@ static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) */ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) { - if (WARN_ON_ONCE(!gbo->pin_count)) + if (WARN_ON_ONCE(!gbo->bo.pin_count)) return (s64)-ENODEV; return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT; } @@ -305,24 +294,21 @@ EXPORT_SYMBOL(drm_gem_vram_offset); static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, unsigned long pl_flag) { - int i, ret; struct ttm_operation_ctx ctx = { false, false }; + int ret; - if (gbo->pin_count) + if (gbo->bo.pin_count) goto out; if (pl_flag) drm_gem_vram_placement(gbo, pl_flag); - for (i = 0; i < gbo->placement.num_placement; ++i) - gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; - ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); if (ret < 0) return ret; out: - ++gbo->pin_count; + ttm_bo_pin(&gbo->bo); return 0; } @@ -364,26 +350,9 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) } EXPORT_SYMBOL(drm_gem_vram_pin); -static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) +static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) { - int i, ret; - struct ttm_operation_ctx ctx = { false, false }; - - if (WARN_ON_ONCE(!gbo->pin_count)) - return 0; - - --gbo->pin_count; - if (gbo->pin_count) - return 0; - - for (i = 0; i < gbo->placement.num_placement ; ++i) - gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - - ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); - if (ret < 0) - return ret; - - return 0; + ttm_bo_unpin(&gbo->bo); } /** @@ -401,10 +370,11 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); if (ret) return ret; - ret = drm_gem_vram_unpin_locked(gbo); + + drm_gem_vram_unpin_locked(gbo); ttm_bo_unreserve(&gbo->bo); - return ret; + return 0; } EXPORT_SYMBOL(drm_gem_vram_unpin); @@ -436,39 +406,6 @@ out: return kmap->virtual; } -/** - * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space - * @gbo: the GEM VRAM object - * @map: establish a mapping if necessary - * @is_iomem: returns true if the mapped memory is I/O memory, or false \ - otherwise; can be NULL - * - * This function maps the buffer object into the kernel's address space - * or returns the current mapping. If the parameter map is false, the - * function only queries the current mapping, but does not establish a - * new one. - * - * Returns: - * The buffers virtual address if mapped, or - * NULL if not mapped, or - * an ERR_PTR()-encoded error code otherwise. - */ -void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, - bool *is_iomem) -{ - int ret; - void *virtual; - - ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); - if (ret) - return ERR_PTR(ret); - virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem); - ttm_bo_unreserve(&gbo->bo); - - return virtual; -} -EXPORT_SYMBOL(drm_gem_vram_kmap); - static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) { if (WARN_ON_ONCE(!gbo->kmap_use_count)) @@ -485,22 +422,6 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) } /** - * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object - * @gbo: the GEM VRAM object - */ -void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo) -{ - int ret; - - ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); - if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) - return; - drm_gem_vram_kunmap_locked(gbo); - ttm_bo_unreserve(&gbo->bo); -} -EXPORT_SYMBOL(drm_gem_vram_kunmap); - -/** * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address * space * @gbo: The GEM VRAM object to map @@ -511,9 +432,6 @@ EXPORT_SYMBOL(drm_gem_vram_kunmap); * permanently. Call drm_gem_vram_vunmap() with the returned address to * unmap and unpin the GEM VRAM object. * - * If you have special requirements for the pinning or mapping operations, - * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly. - * * Returns: * The buffer's virtual address on success, or * an ERR_PTR()-encoded error code otherwise. @@ -647,7 +565,7 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo) static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, struct ttm_placement *pl) { - drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM); + drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); *pl = gbo->placement; } @@ -666,6 +584,23 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, kmap->virtual = NULL; } +static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, + bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem) +{ + int ret; + + drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); + ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); + if (ret) { + swap(*new_mem, gbo->bo.mem); + drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem); + swap(*new_mem, gbo->bo.mem); + } + return ret; +} + /* * Helpers for struct drm_gem_object_funcs */ @@ -967,16 +902,13 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { * TTM TT */ -static void backend_func_destroy(struct ttm_tt *tt) +static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt) { + ttm_tt_destroy_common(bdev, tt); ttm_tt_fini(tt); kfree(tt); } -static struct ttm_backend_func backend_func = { - .destroy = backend_func_destroy -}; - /* * TTM BO device */ @@ -991,9 +923,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, if (!tt) return NULL; - tt->func = &backend_func; - - ret = ttm_tt_init(tt, bo, page_flags); + ret = ttm_tt_init(tt, bo, page_flags, ttm_cached); if (ret < 0) goto err_ttm_tt_init; @@ -1018,9 +948,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo, drm_gem_vram_bo_driver_evict_flags(gbo, placement); } -static void bo_driver_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *new_mem) +static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_gem_vram_object *gbo; @@ -1030,7 +958,19 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo, gbo = drm_gem_vram_of_bo(bo); - drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); + drm_gem_vram_bo_driver_move_notify(gbo, false, NULL); +} + +static int bo_driver_move(struct ttm_buffer_object *bo, + bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem) +{ + struct drm_gem_vram_object *gbo; + + gbo = drm_gem_vram_of_bo(bo); + + return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem); } static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, @@ -1042,9 +982,9 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, case TTM_PL_SYSTEM: /* nothing to do */ break; case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = vmm->vram_base; + mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base; mem->bus.is_iomem = true; + mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; @@ -1055,9 +995,11 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, static struct ttm_bo_driver bo_driver = { .ttm_tt_create = bo_driver_ttm_tt_create, + .ttm_tt_destroy = bo_driver_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = bo_driver_evict_flags, - .move_notify = bo_driver_move_notify, + .move = bo_driver_move, + .delete_mem_notify = bo_driver_delete_mem_notify, .io_mem_reserve = bo_driver_io_mem_reserve, }; @@ -1102,17 +1044,15 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, vmm->vram_base = vram_base; vmm->vram_size = vram_size; - ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, + ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, - true); + false, true); if (ret) return ret; ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM, - TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, - TTM_PL_FLAG_WC, false, - vram_size >> PAGE_SHIFT); + false, vram_size >> PAGE_SHIFT); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 8e01caaf95cc..2bdac3557765 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -53,15 +53,15 @@ void drm_lastclose(struct drm_device *dev); #ifdef CONFIG_PCI /* drm_pci.c */ -int drm_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv); +int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv); void drm_pci_agp_destroy(struct drm_device *dev); int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); #else -static inline int drm_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv) { return -EINVAL; } @@ -95,6 +95,7 @@ void drm_minor_release(struct drm_minor *minor); /* drm_managed.c */ void drm_managed_release(struct drm_device *dev); +void drmm_add_final_kfree(struct drm_device *dev, void *container); /* drm_vblank.c */ static inline bool drm_vblank_passed(u64 seq, u64 ref) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 789ee65ac1f5..d273d1a8603a 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -578,7 +578,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), - DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0), diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c index 1e1356560c2e..37d7db6223be 100644 --- a/drivers/gpu/drm/drm_managed.c +++ b/drivers/gpu/drm/drm_managed.c @@ -27,7 +27,7 @@ * be done directly with drmm_kmalloc() and the related functions. Everything * will be released on the final drm_dev_put() in reverse order of how the * release actions have been added and memory has been allocated since driver - * loading started with drm_dev_init(). + * loading started with devm_drm_dev_alloc(). * * Note that release actions and managed memory can also be added and removed * during the lifetime of the driver, all the functions are fully concurrent @@ -125,18 +125,6 @@ static void add_dr(struct drm_device *dev, struct drmres *dr) dr, dr->node.name, (unsigned long) dr->node.size); } -/** - * drmm_add_final_kfree - add release action for the final kfree() - * @dev: DRM device - * @container: pointer to the kmalloc allocation containing @dev - * - * Since the allocation containing the struct &drm_device must be allocated - * before it can be initialized with drm_dev_init() there's no way to allocate - * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the - * pointer for this final kfree() must be specified by calling this function. It - * will be released in the final drm_dev_put() for @dev, after all other release - * actions installed through drmm_add_action() have been processed. - */ void drmm_add_final_kfree(struct drm_device *dev, void *container) { WARN_ON(dev->managed.final_kfree); @@ -144,7 +132,6 @@ void drmm_add_final_kfree(struct drm_device *dev, void *container) WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container))); dev->managed.final_kfree = container; } -EXPORT_SYMBOL(drmm_add_final_kfree); int __drmm_add_action(struct drm_device *dev, drmres_release_t action, diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index c250fb5a88ca..6dba4b8ce4fe 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -139,7 +139,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) } /** - * drm_irq_by_busid - Get interrupt from bus ID + * drm_legacy_irq_by_busid - Get interrupt from bus ID * @dev: DRM device * @data: IOCTL parameter pointing to a drm_irq_busid structure * @file_priv: DRM file private. @@ -150,8 +150,8 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) * * Return: 0 on success or a negative error code on failure. */ -int drm_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_irq_busid *p = data; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index affe1cfed009..e6231947f987 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -1231,3 +1231,76 @@ out: return ret; } + +struct drm_property * +drm_create_scaling_filter_prop(struct drm_device *dev, + unsigned int supported_filters) +{ + struct drm_property *prop; + static const struct drm_prop_enum_list props[] = { + { DRM_SCALING_FILTER_DEFAULT, "Default" }, + { DRM_SCALING_FILTER_NEAREST_NEIGHBOR, "Nearest Neighbor" }, + }; + unsigned int valid_mode_mask = BIT(DRM_SCALING_FILTER_DEFAULT) | + BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR); + int i; + + if (WARN_ON((supported_filters & ~valid_mode_mask) || + ((supported_filters & BIT(DRM_SCALING_FILTER_DEFAULT)) == 0))) + return ERR_PTR(-EINVAL); + + prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, + "SCALING_FILTER", + hweight32(supported_filters)); + if (!prop) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + if (!(BIT(props[i].type) & supported_filters)) + continue; + + ret = drm_property_add_enum(prop, props[i].type, + props[i].name); + + if (ret) { + drm_property_destroy(dev, prop); + + return ERR_PTR(ret); + } + } + + return prop; +} + +/** + * drm_plane_create_scaling_filter_property - create a new scaling filter + * property + * + * @plane: drm plane + * @supported_filters: bitmask of supported scaling filters, must include + * BIT(DRM_SCALING_FILTER_DEFAULT). + * + * This function lets driver to enable the scaling filter property on a given + * plane. + * + * RETURNS: + * Zero for success or -errno + */ +int drm_plane_create_scaling_filter_property(struct drm_plane *plane, + unsigned int supported_filters) +{ + struct drm_property *prop = + drm_create_scaling_filter_prop(plane->dev, supported_filters); + + if (IS_ERR(prop)) + return PTR_ERR(prop); + + drm_object_attach_property(&plane->base, prop, + DRM_SCALING_FILTER_DEFAULT); + plane->scaling_filter_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_scaling_filter_property); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 1693aa7c14b5..a7b61c2d9190 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -386,8 +386,6 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, if (obj->funcs && obj->funcs->export) dmabuf = obj->funcs->export(obj, flags); - else if (dev->driver->gem_prime_export) - dmabuf = dev->driver->gem_prime_export(obj, flags); else dmabuf = drm_gem_prime_export(obj, flags); if (IS_ERR(dmabuf)) { @@ -419,7 +417,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the - * &drm_driver.gem_prime_export driver callback. + * &drm_gem_object_funcs.export callback. */ int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, @@ -617,20 +615,24 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, { struct drm_gem_object *obj = attach->dmabuf->priv; struct sg_table *sgt; + int ret; if (WARN_ON(dir == DMA_NONE)) return ERR_PTR(-EINVAL); - if (obj->funcs) - sgt = obj->funcs->get_sg_table(obj); - else - sgt = obj->dev->driver->gem_prime_get_sg_table(obj); + if (WARN_ON(!obj->funcs->get_sg_table)) + return ERR_PTR(-ENOSYS); - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { + sgt = obj->funcs->get_sg_table(obj); + if (IS_ERR(sgt)) + return sgt; + + ret = dma_map_sgtable(attach->dev, sgt, dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (ret) { sg_free_table(sgt); kfree(sgt); - sgt = ERR_PTR(-ENOMEM); + sgt = ERR_PTR(ret); } return sgt; @@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, if (!sgt) return; - dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); kfree(sgt); } @@ -662,38 +663,41 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf); /** * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM * @dma_buf: buffer to be mapped + * @map: the virtual address of the buffer * * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. * * Returns the kernel virtual address or NULL on failure. */ -void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_gem_object *obj = dma_buf->priv; void *vaddr; vaddr = drm_gem_vmap(obj); if (IS_ERR(vaddr)) - vaddr = NULL; + return PTR_ERR(vaddr); + + dma_buf_map_set_vaddr(map, vaddr); - return vaddr; + return 0; } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); /** * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM * @dma_buf: buffer to be unmapped - * @vaddr: the virtual address of the buffer + * @map: the virtual address of the buffer * * Releases a kernel virtual mapping. This can be used as the * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. */ -void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_gem_object *obj = dma_buf->priv; - drm_gem_vunmap(obj, vaddr); + drm_gem_vunmap(obj, map->vaddr); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); @@ -793,6 +797,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { /** * drm_prime_pages_to_sg - converts a page array into an sg list + * @dev: DRM device * @pages: pointer to the array of page pointers to convert * @nr_pages: length of the page vector * @@ -802,30 +807,65 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { * * This is useful for implementing &drm_gem_object_funcs.get_sg_table. */ -struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) +struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages) { - struct sg_table *sg = NULL; - int ret; + struct sg_table *sg; + struct scatterlist *sge; + size_t max_segment = 0; sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!sg) { - ret = -ENOMEM; - goto out; + if (!sg) + return ERR_PTR(-ENOMEM); + + if (dev) + max_segment = dma_max_mapping_size(dev->dev); + if (max_segment == 0) + max_segment = UINT_MAX; + sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + max_segment, + NULL, 0, GFP_KERNEL); + if (IS_ERR(sge)) { + kfree(sg); + sg = ERR_CAST(sge); } - - ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, GFP_KERNEL); - if (ret) - goto out; - return sg; -out: - kfree(sg); - return ERR_PTR(ret); } EXPORT_SYMBOL(drm_prime_pages_to_sg); /** + * drm_prime_get_contiguous_size - returns the contiguous size of the buffer + * @sgt: sg_table describing the buffer to check + * + * This helper calculates the contiguous size in the DMA address space + * of the the buffer described by the provided sg_table. + * + * This is useful for implementing + * &drm_gem_object_funcs.gem_prime_import_sg_table. + */ +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) +{ + dma_addr_t expected = sg_dma_address(sgt->sgl); + struct scatterlist *sg; + unsigned long size = 0; + int i; + + for_each_sgtable_dma_sg(sgt, sg, i) { + unsigned int len = sg_dma_len(sg); + + if (!len) + break; + if (sg_dma_address(sg) != expected) + break; + expected += len; + size += len; + } + return size; +} +EXPORT_SYMBOL(drm_prime_get_contiguous_size); + +/** * drm_gem_prime_export - helper library implementation of the export callback * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR @@ -959,45 +999,26 @@ EXPORT_SYMBOL(drm_gem_prime_import); int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, dma_addr_t *addrs, int max_entries) { - unsigned count; - struct scatterlist *sg; - struct page *page; - u32 page_len, page_index; - dma_addr_t addr; - u32 dma_len, dma_index; - - /* - * Scatterlist elements contains both pages and DMA addresses, but - * one shoud not assume 1:1 relation between them. The sg->length is - * the size of the physical memory chunk described by the sg->page, - * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk - * described by the sg_dma_address(sg). - */ - page_index = 0; - dma_index = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - page_len = sg->length; - page = sg_page(sg); - dma_len = sg_dma_len(sg); - addr = sg_dma_address(sg); - - while (pages && page_len > 0) { - if (WARN_ON(page_index >= max_entries)) + struct sg_dma_page_iter dma_iter; + struct sg_page_iter page_iter; + struct page **p = pages; + dma_addr_t *a = addrs; + + if (pages) { + for_each_sgtable_page(sgt, &page_iter, 0) { + if (WARN_ON(p - pages >= max_entries)) return -1; - pages[page_index] = page; - page++; - page_len -= PAGE_SIZE; - page_index++; + *p++ = sg_page_iter_page(&page_iter); } - while (addrs && dma_len > 0) { - if (WARN_ON(dma_index >= max_entries)) + } + if (addrs) { + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + if (WARN_ON(a - addrs >= max_entries)) return -1; - addrs[dma_index] = addr; - addr += PAGE_SIZE; - dma_len -= PAGE_SIZE; - dma_index++; + *a++ = sg_page_iter_dma_address(&dma_iter); } } + return 0; } EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 74946690aba4..743e57c1b44f 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -86,20 +86,22 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc, } static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - bool has_primary = state->plane_mask & + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + bool has_primary = crtc_state->plane_mask & drm_plane_mask(crtc->primary); /* We always want to have an active plane with an active CRTC */ - if (has_primary != state->enable) + if (has_primary != crtc_state->enable) return -EINVAL; - return drm_atomic_add_affected_planes(state->state, crtc); + return drm_atomic_add_affected_planes(state, crtc); } static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_plane *plane; struct drm_simple_display_pipe *pipe; @@ -113,7 +115,7 @@ static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc, } static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_simple_display_pipe *pipe; diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index b18e1efbbae1..f135b79593dd 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -674,7 +674,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in - * drm_atomic_helper_update_legacy_modeset_state(). + * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * @@ -819,7 +819,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal); * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in - * drm_atomic_helper_update_legacy_modeset_state(). + * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index a9a3afaef9a1..aa270b79e585 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -468,12 +468,6 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = { ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW), }; -static const struct vm_operations_struct vm_ops = { - .fault = etnaviv_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct file_operations fops = { .owner = THIS_MODULE, .open = drm_open, @@ -490,16 +484,9 @@ static struct drm_driver etnaviv_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = etnaviv_open, .postclose = etnaviv_postclose, - .gem_free_object_unlocked = etnaviv_gem_free_object, - .gem_vm_ops = &vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_pin = etnaviv_gem_prime_pin, - .gem_prime_unpin = etnaviv_gem_prime_unpin, - .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, - .gem_prime_vmap = etnaviv_gem_prime_vmap, - .gem_prime_vunmap = etnaviv_gem_prime_vunmap, .gem_prime_mmap = etnaviv_gem_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = etnaviv_debugfs_init, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 4d8dc9236e5f..914f0867ff71 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -49,7 +49,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); -vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf); int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index f06e19e7be04..67d9a2b9ea6a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) * because display controller, GPU, etc. are not coherent. */ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) - dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); } static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) @@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj * discard those writes. */ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) - dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); } /* called with etnaviv_obj->lock held */ @@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) int npages = etnaviv_obj->base.size >> PAGE_SHIFT; struct sg_table *sgt; - sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); + sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev, + etnaviv_obj->pages, npages); if (IS_ERR(sgt)) { dev_err(dev->dev, "failed to allocate sgt: %ld\n", PTR_ERR(sgt)); @@ -170,7 +171,7 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) return obj->ops->mmap(obj, vma); } -vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) +static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; @@ -404,9 +405,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (etnaviv_obj->flags & ETNA_BO_CACHED) { - dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, - etnaviv_obj->sgt->nents, - etnaviv_op_to_dma_dir(op)); + dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt, + etnaviv_op_to_dma_dir(op)); etnaviv_obj->last_cpu_prep_op = op; } @@ -421,8 +421,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) if (etnaviv_obj->flags & ETNA_BO_CACHED) { /* fini without a prep is almost certainly a userspace error */ WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); - dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, - etnaviv_obj->sgt->nents, + dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt, etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); etnaviv_obj->last_cpu_prep_op = 0; } @@ -560,6 +559,22 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) mutex_unlock(&priv->gem_lock); } +static const struct vm_operations_struct vm_ops = { + .fault = etnaviv_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { + .free = etnaviv_gem_free_object, + .pin = etnaviv_gem_prime_pin, + .unpin = etnaviv_gem_prime_unpin, + .get_sg_table = etnaviv_gem_prime_get_sg_table, + .vmap = etnaviv_gem_prime_vmap, + .vunmap = etnaviv_gem_prime_vunmap, + .vm_ops = &vm_ops, +}; + static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj) { @@ -594,6 +609,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, INIT_LIST_HEAD(&etnaviv_obj->vram_list); *obj = &etnaviv_obj->base; + (*obj)->funcs = &etnaviv_gem_object_funcs; return 0; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 6d9e5c3c4dd5..135fbff6fecf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ return ERR_PTR(-EINVAL); - return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); + return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); } void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) @@ -70,9 +70,10 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) { + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr); + if (etnaviv_obj->vaddr) - dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, - etnaviv_obj->vaddr); + dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: @@ -85,9 +86,15 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) { + struct dma_buf_map map; + int ret; + lockdep_assert_held(&etnaviv_obj->lock); - return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf); + ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); + if (ret) + return NULL; + return map.vaddr; } static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 3607d348c298..15d9fa3879e5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len, int prot) { struct scatterlist *sg; unsigned int da = iova; - unsigned int i, j; + unsigned int i; int ret; if (!context || !sgt) return -EINVAL; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_dma_sg(sgt, sg, i) { u32 pa = sg_dma_address(sg) - sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset; @@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return 0; fail: - da = iova; - - for_each_sg(sgt->sgl, sg, i, j) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - da += bytes; - } + etnaviv_context_unmap(context, iova, da - iova); return ret; } @@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, unsigned int da = iova; int i; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_dma_sg(sgt, sg, i) { size_t bytes = sg_dma_len(sg) + sg->offset; etnaviv_context_unmap(context, da, bytes); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 1c03485676ef..4153f302de7c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -19,7 +19,7 @@ #include "exynos_drm_plane.h" static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); @@ -30,7 +30,7 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc, } static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); @@ -49,21 +49,23 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc, } static int exynos_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - if (!state->enable) + if (!crtc_state->enable) return 0; if (exynos_crtc->ops->atomic_check) - return exynos_crtc->ops->atomic_check(exynos_crtc, state); + return exynos_crtc->ops->atomic_check(exynos_crtc, crtc_state); return 0; } static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); @@ -72,7 +74,7 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, } static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index 58b89ec11b0e..0644936afee2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -5,7 +5,7 @@ // Author: Andrzej Hajda <a.hajda@samsung.com> #include <linux/dma-iommu.h> -#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/iommu.h> #include <linux/platform_device.h> @@ -31,23 +31,6 @@ #define EXYNOS_DEV_ADDR_START 0x20000000 #define EXYNOS_DEV_ADDR_SIZE 0x40000000 -static inline int configure_dma_max_seg_size(struct device *dev) -{ - if (!dev->dma_parms) - dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; - - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - return 0; -} - -static inline void clear_dma_max_seg_size(struct device *dev) -{ - kfree(dev->dma_parms); - dev->dma_parms = NULL; -} - /* * drm_iommu_attach_device- attach device to iommu mapping * @@ -69,10 +52,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, return -EINVAL; } - ret = configure_dma_max_seg_size(subdrv_dev); - if (ret) - return ret; - + dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32)); if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { /* * Keep the original DMA mapping of the sub-device and @@ -89,9 +69,6 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, ret = iommu_attach_device(priv->mapping, subdrv_dev); } - if (ret) - clear_dma_max_seg_size(subdrv_dev); - return ret; } @@ -114,8 +91,6 @@ static void drm_iommu_detach_device(struct drm_device *drm_dev, arm_iommu_attach_device(subdrv_dev, *dma_priv); } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) iommu_detach_device(priv->mapping, subdrv_dev); - - clear_dma_max_seg_size(subdrv_dev); } int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index dbd80f1e4c78..fe46680ca208 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -75,11 +75,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) file->driver_priv = NULL; } -static const struct vm_operations_struct exynos_drm_gem_vm_ops = { - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct drm_ioctl_desc exynos_ioctls[] = { DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, DRM_RENDER_ALLOW), @@ -124,16 +119,11 @@ static struct drm_driver exynos_drm_driver = { .open = exynos_drm_open, .lastclose = drm_fb_helper_lastclose, .postclose = exynos_drm_postclose, - .gem_free_object_unlocked = exynos_drm_gem_free_object, - .gem_vm_ops = &exynos_drm_gem_vm_ops, .dumb_create = exynos_drm_gem_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = exynos_drm_gem_prime_import, - .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table, .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, - .gem_prime_vmap = exynos_drm_gem_prime_vmap, - .gem_prime_vunmap = exynos_drm_gem_prime_vunmap, .gem_prime_mmap = exynos_drm_gem_prime_mmap, .ioctls = exynos_ioctls, .num_ioctls = ARRAY_SIZE(exynos_ioctls), diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 1a1a2853a842..5b9666fc7af1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1760,11 +1760,8 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->supplies[1].supply = "vddio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), dsi->supplies); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_info(dev, "failed to get regulators: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks, sizeof(*dsi->clks), diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 03be31427181..967a5cdc120e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, return; out: - dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl, - g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt, + DMA_BIDIRECTIONAL, 0); pages = frame_vector_pages(g2d_userptr->vec); if (!IS_ERR(pages)) { @@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, g2d_userptr->sgt = sgt; - if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL)) { + ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt, + DMA_BIDIRECTIONAL, 0); + if (ret) { DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n"); - ret = -ENOMEM; goto err_sg_free_table; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index efa476858db5..4afbf5109cbf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -42,8 +42,6 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) if (exynos_gem->flags & EXYNOS_BO_WC || !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) attr |= DMA_ATTR_WRITE_COMBINE; - else - attr |= DMA_ATTR_NON_CONSISTENT; /* FBDev emulation requires kernel mapping */ if (!kvmap) @@ -129,6 +127,19 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) kfree(exynos_gem); } +static const struct vm_operations_struct exynos_drm_gem_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { + .free = exynos_drm_gem_free_object, + .get_sg_table = exynos_drm_gem_prime_get_sg_table, + .vmap = exynos_drm_gem_prime_vmap, + .vunmap = exynos_drm_gem_prime_vunmap, + .vm_ops = &exynos_drm_gem_vm_ops, +}; + static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { @@ -143,6 +154,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, exynos_gem->size = size; obj = &exynos_gem->base; + obj->funcs = &exynos_drm_gem_object_funcs; + ret = drm_gem_object_init(dev, obj, size); if (ret < 0) { DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n"); @@ -431,27 +444,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, { struct exynos_drm_gem *exynos_gem; - if (sgt->nents < 1) + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { + DRM_ERROR("buffer chunks must be mapped contiguously"); return ERR_PTR(-EINVAL); - - /* - * Check if the provided buffer has been mapped as contiguous - * into DMA address space. - */ - if (sgt->nents > 1) { - dma_addr_t next_addr = sg_dma_address(sgt->sgl); - struct scatterlist *s; - unsigned int i; - - for_each_sg(sgt->sgl, s, sgt->nents, i) { - if (!sg_dma_len(s)) - break; - if (sg_dma_address(s) != next_addr) { - DRM_ERROR("buffer chunks must be mapped contiguously"); - return ERR_PTR(-EINVAL); - } - next_addr = sg_dma_address(s) + sg_dma_len(s); - } } exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index c5ba32fca5f3..dc01c188c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1797,11 +1797,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata) hdata->regul_bulk[i].supply = supply[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk); - if (ret) { - if (ret != -EPROBE_DEFER) - DRM_DEV_ERROR(dev, "failed to get regulators\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index b9ca81a6f80f..2af60d98f48f 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -21,7 +21,7 @@ #include "fsl_dcu_drm_plane.h" static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; @@ -43,8 +43,10 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, } static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; @@ -62,7 +64,7 @@ static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, } static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 720a767118c9..bfd9a15d63b1 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -1501,8 +1501,7 @@ cdv_intel_dp_start_link_train(struct gma_encoder *encoder) clock_recovery = false; DRM_DEBUG_KMS("Start train\n"); - reg = DP | DP_LINK_TRAIN_PAT_1; - + reg = DP | DP_LINK_TRAIN_PAT_1; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ @@ -1575,7 +1574,7 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder) cr_tries = 0; DRM_DEBUG_KMS("\n"); - reg = DP | DP_LINK_TRAIN_PAT_2; + reg = DP | DP_LINK_TRAIN_PAT_2; for (;;) { @@ -2083,7 +2082,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev DRM_INFO("failed to retrieve link info, disabling eDP\n"); drm_encoder_cleanup(encoder); cdv_intel_dp_destroy(connector); - goto err_priv; + goto err_connector; } else { DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", intel_dp->dpcd[0], intel_dp->dpcd[1], diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index da02d7e8a8f5..5ede24fb44ae 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -24,6 +24,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include "framebuffer.h" +#include "gem.h" #include "gtt.h" #include "psb_drv.h" #include "psb_intel_drv.h" @@ -164,7 +165,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) return 0; } -static struct fb_ops psbfb_ops = { +static const struct fb_ops psbfb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_setcolreg = psbfb_setcolreg, @@ -175,7 +176,7 @@ static struct fb_ops psbfb_ops = { .fb_sync = psbfb_sync, }; -static struct fb_ops psbfb_roll_ops = { +static const struct fb_ops psbfb_roll_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_setcolreg = psbfb_setcolreg, @@ -186,7 +187,7 @@ static struct fb_ops psbfb_roll_ops = { .fb_mmap = psbfb_mmap, }; -static struct fb_ops psbfb_unaccel_ops = { +static const struct fb_ops psbfb_unaccel_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_setcolreg = psbfb_setcolreg, @@ -285,6 +286,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size) /* Begin by trying to use stolen memory backing */ backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE); if (backing) { + backing->gem.funcs = &psb_gem_object_funcs; drm_gem_private_object_init(dev, &backing->gem, aligned_size); return backing; } diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index f9c4b1d76f56..8f07de83b6fb 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -18,7 +18,9 @@ #include "psb_drv.h" -void psb_gem_free_object(struct drm_gem_object *obj) +static vm_fault_t psb_gem_fault(struct vm_fault *vmf); + +static void psb_gem_free_object(struct drm_gem_object *obj) { struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); @@ -36,6 +38,17 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data, return -EINVAL; } +static const struct vm_operations_struct psb_gem_vm_ops = { + .fault = psb_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +const struct drm_gem_object_funcs psb_gem_object_funcs = { + .free = psb_gem_free_object, + .vm_ops = &psb_gem_vm_ops, +}; + /** * psb_gem_create - create a mappable object * @file: the DRM file of the client @@ -63,6 +76,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); return -ENOSPC; } + r->gem.funcs = &psb_gem_object_funcs; /* Initialize the extra goodies GEM needs to do all the hard work */ if (drm_gem_object_init(dev, &r->gem, size) != 0) { psb_gtt_free_range(dev, r); @@ -123,7 +137,7 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, * vma->vm_private_data points to the GEM object that is backing this * mapping. */ -vm_fault_t psb_gem_fault(struct vm_fault *vmf) +static vm_fault_t psb_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj; diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h index 4a74dc623b6b..3741a711b9fd 100644 --- a/drivers/gpu/drm/gma500/gem.h +++ b/drivers/gpu/drm/gma500/gem.h @@ -8,6 +8,9 @@ #ifndef _GEM_H #define _GEM_H +extern const struct drm_gem_object_funcs psb_gem_object_funcs; + extern int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, u32 *handlep, int stolen, u32 align); + #endif diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 34b4aae9a15e..b13376a6fb91 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -480,12 +480,6 @@ static const struct dev_pm_ops psb_pm_ops = { .runtime_idle = psb_runtime_idle, }; -static const struct vm_operations_struct psb_gem_vm_ops = { - .fault = psb_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct file_operations psb_gem_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -507,9 +501,6 @@ static struct drm_driver driver = { .irq_uninstall = psb_irq_uninstall, .irq_handler = psb_irq_handler, - .gem_free_object_unlocked = psb_gem_free_object, - .gem_vm_ops = &psb_gem_vm_ops, - .dumb_create = psb_gem_dumb_create, .ioctls = psb_ioctls, .fops = &psb_gem_fops, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 956926341316..c71a5a4e912c 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -735,12 +735,10 @@ extern const struct drm_connector_helper_funcs extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; /* gem.c */ -extern void psb_gem_free_object(struct drm_gem_object *obj); extern int psb_gem_get_aperture(struct drm_device *dev, void *data, struct drm_file *file); extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); -extern vm_fault_t psb_gem_fault(struct vm_fault *vmf); /* psb_device.c */ extern const struct psb_ops psb_chip_ops; diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile index f99132715597..684ef794eb7c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Makefile +++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o +hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index 4d57ec688f82..ea962acfeae0 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -23,15 +23,15 @@ #include "hibmc_drm_regs.h" struct hibmc_display_panel_pll { - unsigned long M; - unsigned long N; - unsigned long OD; - unsigned long POD; + u64 M; + u64 N; + u64 OD; + u64 POD; }; struct hibmc_dislay_pll_config { - unsigned long hdisplay; - unsigned long vdisplay; + u64 hdisplay; + u64 vdisplay; u32 pll1_config_value; u32 pll2_config_value; }; @@ -52,8 +52,6 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = { {1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ}, }; -#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1))) - static int hibmc_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { @@ -104,8 +102,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *state = plane->state; u32 reg; s64 gpu_addr = 0; - unsigned int line_l; - struct hibmc_drm_private *priv = plane->dev->dev_private; + u32 line_l; + struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev); struct drm_gem_vram_object *gbo; if (!state->fb) @@ -141,7 +139,7 @@ static const u32 channel_formats1[] = { DRM_FORMAT_ABGR8888 }; -static struct drm_plane_funcs hibmc_plane_funcs = { +static const struct drm_plane_funcs hibmc_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, @@ -157,10 +155,10 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = { .atomic_update = hibmc_plane_atomic_update, }; -static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms) +static void hibmc_crtc_dpms(struct drm_crtc *crtc, u32 dpms) { - struct hibmc_drm_private *priv = crtc->dev->dev_private; - unsigned int reg; + struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev); + u32 reg; reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL); reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK; @@ -172,10 +170,10 @@ static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms) } static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { - unsigned int reg; - struct hibmc_drm_private *priv = crtc->dev->dev_private; + u32 reg; + struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev); hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0); @@ -191,10 +189,10 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc, } static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { - unsigned int reg; - struct hibmc_drm_private *priv = crtc->dev->dev_private; + u32 reg; + struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev); hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF); drm_crtc_vblank_off(crtc); @@ -214,7 +212,7 @@ static enum drm_mode_status hibmc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { - int i = 0; + size_t i = 0; int vrefresh = drm_mode_vrefresh(mode); if (vrefresh < 59 || vrefresh > 61) @@ -229,9 +227,9 @@ hibmc_crtc_mode_valid(struct drm_crtc *crtc, return MODE_BAD; } -static unsigned int format_pll_reg(void) +static u32 format_pll_reg(void) { - unsigned int pllreg = 0; + u32 pllreg = 0; struct hibmc_display_panel_pll pll = {0}; /* @@ -251,10 +249,10 @@ static unsigned int format_pll_reg(void) return pllreg; } -static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll) +static void set_vclock_hisilicon(struct drm_device *dev, u64 pll) { u32 val; - struct hibmc_drm_private *priv = dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); val = readl(priv->mmio + CRT_PLL1_HS); val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1)); @@ -281,11 +279,10 @@ static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll) writel(val, priv->mmio + CRT_PLL1_HS); } -static void get_pll_config(unsigned long x, unsigned long y, - u32 *pll1, u32 *pll2) +static void get_pll_config(u64 x, u64 y, u32 *pll1, u32 *pll2) { - int i; - int count = ARRAY_SIZE(hibmc_pll_table); + size_t i; + size_t count = ARRAY_SIZE(hibmc_pll_table); for (i = 0; i < count; i++) { if (hibmc_pll_table[i].hdisplay == x && @@ -308,14 +305,14 @@ static void get_pll_config(unsigned long x, unsigned long y, * FPGA only supports 7 predefined pixel clocks, and clock select is * in bit 4:0 of new register 0x802a8. */ -static unsigned int display_ctrl_adjust(struct drm_device *dev, - struct drm_display_mode *mode, - unsigned int ctrl) +static u32 display_ctrl_adjust(struct drm_device *dev, + struct drm_display_mode *mode, + u32 ctrl) { - unsigned long x, y; + u64 x, y; u32 pll1; /* bit[31:0] of PLL */ u32 pll2; /* bit[63:32] of PLL */ - struct hibmc_drm_private *priv = dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); x = mode->hdisplay; y = mode->vdisplay; @@ -360,12 +357,12 @@ static unsigned int display_ctrl_adjust(struct drm_device *dev, static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc) { - unsigned int val; + u32 val; struct drm_display_mode *mode = &crtc->state->mode; struct drm_device *dev = crtc->dev; - struct hibmc_drm_private *priv = dev->dev_private; - int width = mode->hsync_end - mode->hsync_start; - int height = mode->vsync_end - mode->vsync_start; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); + u32 width = mode->hsync_end - mode->hsync_start; + u32 height = mode->vsync_end - mode->vsync_start; writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL); writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) | @@ -393,11 +390,11 @@ static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { - unsigned int reg; + u32 reg; struct drm_device *dev = crtc->dev; - struct hibmc_drm_private *priv = dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0); @@ -413,7 +410,7 @@ static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc, } static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { unsigned long flags; @@ -427,7 +424,7 @@ static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc, static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc) { - struct hibmc_drm_private *priv = crtc->dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev); writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1), priv->mmio + HIBMC_RAW_INTERRUPT_EN); @@ -437,7 +434,7 @@ static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc) static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc) { - struct hibmc_drm_private *priv = crtc->dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev); writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0), priv->mmio + HIBMC_RAW_INTERRUPT_EN); @@ -445,18 +442,18 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc) static void hibmc_crtc_load_lut(struct drm_crtc *crtc) { - struct hibmc_drm_private *priv = crtc->dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev); void __iomem *mmio = priv->mmio; u16 *r, *g, *b; - unsigned int reg; - int i; + u32 reg; + u32 i; r = crtc->gamma_store; g = r + crtc->gamma_size; b = g + crtc->gamma_size; for (i = 0; i < crtc->gamma_size; i++) { - unsigned int offset = i << 2; + u32 offset = i << 2; u8 red = *r++ >> 8; u8 green = *g++ >> 8; u8 blue = *b++ >> 8; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 085d1b2fa8c0..fee6fe810e74 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -29,8 +29,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops); static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) { struct drm_device *dev = (struct drm_device *)arg; - struct hibmc_drm_private *priv = - (struct hibmc_drm_private *)dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); u32 status; status = readl(priv->mmio + HIBMC_RAW_INTERRUPT); @@ -122,12 +121,11 @@ static void hibmc_kms_fini(struct hibmc_drm_private *priv) /* * It can operate in one of three modes: 0, 1 or Sleep. */ -void hibmc_set_power_mode(struct hibmc_drm_private *priv, - unsigned int power_mode) +void hibmc_set_power_mode(struct hibmc_drm_private *priv, u32 power_mode) { - unsigned int control_value = 0; + u32 control_value = 0; void __iomem *mmio = priv->mmio; - unsigned int input = 1; + u32 input = 1; if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP) return; @@ -145,8 +143,8 @@ void hibmc_set_power_mode(struct hibmc_drm_private *priv, void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate) { - unsigned int gate_reg; - unsigned int mode; + u32 gate_reg; + u32 mode; void __iomem *mmio = priv->mmio; /* Get current power mode. */ @@ -171,7 +169,7 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate) static void hibmc_hw_config(struct hibmc_drm_private *priv) { - unsigned int reg; + u32 reg; /* On hardware reset, power mode 0 is default. */ hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0); @@ -244,7 +242,7 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv) static int hibmc_unload(struct drm_device *dev) { - struct hibmc_drm_private *priv = dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); drm_atomic_helper_shutdown(dev); @@ -371,7 +369,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev) drm_dev_put(dev); } -static struct pci_device_id hibmc_pci_table[] = { +static const struct pci_device_id hibmc_pci_table[] = { { PCI_VDEVICE(HUAWEI, 0x1711) }, {0,} }; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index 197485e2fe0b..f310a83d9c48 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -14,31 +14,51 @@ #ifndef HIBMC_DRM_DRV_H #define HIBMC_DRM_DRV_H +#include <linux/gpio/consumer.h> +#include <linux/i2c-algo-bit.h> +#include <linux/i2c.h> + +#include <drm/drm_edid.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> -struct drm_device; +struct hibmc_connector { + struct drm_connector base; + + struct i2c_adapter adapter; + struct i2c_algo_bit_data bit_data; +}; struct hibmc_drm_private { /* hw */ void __iomem *mmio; void __iomem *fb_map; - unsigned long fb_base; - unsigned long fb_size; + resource_size_t fb_base; + resource_size_t fb_size; /* drm */ struct drm_device *dev; struct drm_plane primary_plane; struct drm_crtc crtc; struct drm_encoder encoder; - struct drm_connector connector; + struct hibmc_connector connector; bool mode_config_initialized; }; +static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector) +{ + return container_of(connector, struct hibmc_connector, base); +} + +static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev) +{ + return dev->dev_private; +} + void hibmc_set_power_mode(struct hibmc_drm_private *priv, - unsigned int power_mode); + u32 power_mode); void hibmc_set_current_gate(struct hibmc_drm_private *priv, - unsigned int gate); + u32 gate); int hibmc_de_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv); @@ -47,6 +67,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc); void hibmc_mm_fini(struct hibmc_drm_private *hibmc); int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); +int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector); extern const struct drm_mode_config_funcs hibmc_mode_funcs; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c new file mode 100644 index 000000000000..86d712090d87 --- /dev/null +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Hisilicon Hibmc SoC drm driver + * + * Based on the bochs drm driver. + * + * Copyright (c) 2016 Huawei Limited. + * + * Author: + * Tian Tao <tiantao6@hisilicon.com> + */ + +#include <linux/delay.h> +#include <linux/pci.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_probe_helper.h> + +#include "hibmc_drm_drv.h" + +#define GPIO_DATA 0x0802A0 +#define GPIO_DATA_DIRECTION 0x0802A4 + +#define I2C_SCL_MASK BIT(0) +#define I2C_SDA_MASK BIT(1) + +static void hibmc_set_i2c_signal(void *data, u32 mask, int value) +{ + struct hibmc_connector *hibmc_connector = data; + struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev); + u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION); + + if (value) { + tmp_dir &= ~mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } else { + u32 tmp_data = readl(priv->mmio + GPIO_DATA); + + tmp_data &= ~mask; + writel(tmp_data, priv->mmio + GPIO_DATA); + + tmp_dir |= mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } +} + +static int hibmc_get_i2c_signal(void *data, u32 mask) +{ + struct hibmc_connector *hibmc_connector = data; + struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev); + u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION); + + if ((tmp_dir & mask) != mask) { + tmp_dir &= ~mask; + writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION); + } + + return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0; +} + +static void hibmc_ddc_setsda(void *data, int state) +{ + hibmc_set_i2c_signal(data, I2C_SDA_MASK, state); +} + +static void hibmc_ddc_setscl(void *data, int state) +{ + hibmc_set_i2c_signal(data, I2C_SCL_MASK, state); +} + +static int hibmc_ddc_getsda(void *data) +{ + return hibmc_get_i2c_signal(data, I2C_SDA_MASK); +} + +static int hibmc_ddc_getscl(void *data) +{ + return hibmc_get_i2c_signal(data, I2C_SCL_MASK); +} + +int hibmc_ddc_create(struct drm_device *drm_dev, + struct hibmc_connector *connector) +{ + connector->adapter.owner = THIS_MODULE; + connector->adapter.class = I2C_CLASS_DDC; + snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus"); + connector->adapter.dev.parent = &drm_dev->pdev->dev; + i2c_set_adapdata(&connector->adapter, connector); + connector->adapter.algo_data = &connector->bit_data; + + connector->bit_data.udelay = 20; + connector->bit_data.timeout = usecs_to_jiffies(2000); + connector->bit_data.data = connector; + connector->bit_data.setsda = hibmc_ddc_setsda; + connector->bit_data.setscl = hibmc_ddc_setscl; + connector->bit_data.getsda = hibmc_ddc_getsda; + connector->bit_data.getscl = hibmc_ddc_getscl; + + return i2c_bit_add_bus(&connector->adapter); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index 376a05ddbc2f..74e26c27d878 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -21,21 +21,41 @@ static int hibmc_connector_get_modes(struct drm_connector *connector) { int count; + void *edid; + struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector); + + edid = drm_get_edid(connector, &hibmc_connector->adapter); + if (edid) { + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + if (count) + goto out; + } count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width, connector->dev->mode_config.max_height); drm_set_preferred_mode(connector, 1024, 768); +out: + kfree(edid); return count; } static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { return MODE_OK; } +static void hibmc_connector_destroy(struct drm_connector *connector) +{ + struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector); + + i2c_del_adapter(&hibmc_connector->adapter); + drm_connector_cleanup(connector); +} + static const struct drm_connector_helper_funcs hibmc_connector_helper_funcs = { .get_modes = hibmc_connector_get_modes, @@ -44,7 +64,7 @@ static const struct drm_connector_helper_funcs static const struct drm_connector_funcs hibmc_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, + .destroy = hibmc_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -56,7 +76,7 @@ static void hibmc_encoder_mode_set(struct drm_encoder *encoder, { u32 reg; struct drm_device *dev = encoder->dev; - struct hibmc_drm_private *priv = dev->dev_private; + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE); reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1); @@ -77,10 +97,17 @@ static const struct drm_encoder_funcs hibmc_encoder_funcs = { int hibmc_vdac_init(struct hibmc_drm_private *priv) { struct drm_device *dev = priv->dev; + struct hibmc_connector *hibmc_connector = &priv->connector; struct drm_encoder *encoder = &priv->encoder; - struct drm_connector *connector = &priv->connector; + struct drm_connector *connector = &hibmc_connector->base; int ret; + ret = hibmc_ddc_create(dev, hibmc_connector); + if (ret) { + drm_err(dev, "failed to create ddc: %d\n", ret); + return ret; + } + encoder->possible_crtcs = 0x1; ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); @@ -91,12 +118,15 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); - ret = drm_connector_init(dev, connector, &hibmc_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + ret = drm_connector_init_with_ddc(dev, connector, + &hibmc_connector_funcs, + DRM_MODE_CONNECTOR_VGA, + &hibmc_connector->adapter); if (ret) { drm_err(dev, "failed to init connector: %d\n", ret); return ret; } + drm_connector_helper_add(connector, &hibmc_connector_helper_funcs); drm_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index e1108c1735ad..d84d41f3e78f 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -436,7 +436,7 @@ static void ade_dump_regs(void __iomem *base) { } #endif static void ade_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct kirin_crtc *kcrtc = to_kirin_crtc(crtc); struct ade_hw_ctx *ctx = kcrtc->hw_ctx; @@ -459,7 +459,7 @@ static void ade_crtc_atomic_enable(struct drm_crtc *crtc, } static void ade_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct kirin_crtc *kcrtc = to_kirin_crtc(crtc); struct ade_hw_ctx *ctx = kcrtc->hw_ctx; @@ -485,7 +485,7 @@ static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void ade_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct kirin_crtc *kcrtc = to_kirin_crtc(crtc); struct ade_hw_ctx *ctx = kcrtc->hw_ctx; @@ -498,7 +498,7 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc, } static void ade_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct kirin_crtc *kcrtc = to_kirin_crtc(crtc); diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 303c2d483c6e..88250860f8e4 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -853,11 +853,11 @@ static void i810_dma_quiescent(struct drm_device *dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); } -static int i810_flush_queue(struct drm_device *dev) +static void i810_flush_queue(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; - int i, ret = 0; + int i; RING_LOCALS; i810_kernel_lost_context(dev); @@ -882,7 +882,7 @@ static int i810_flush_queue(struct drm_device *dev) DRM_DEBUG("still on client\n"); } - return ret; + return; } /* Must be called with the lock held */ diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 9afa5c4a6bf0..1e1cb245fca7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -25,6 +25,7 @@ config DRM_I915 select CRC32 select SND_HDA_I915 if SND_HDA_CORE select CEC_CORE if CEC_NOTIFIER + select VMAP_PFN help Choose this option if you have a system that has "Intel Graphics Media Accelerator" or "HD Graphics" integrated graphics, diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 1cb28c20807c..25cd9788a4d5 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -153,6 +153,7 @@ config DRM_I915_SELFTEST select DRM_EXPORT_FOR_TESTS if m select FAULT_INJECTION select PRIME_NUMBERS + select CRC32 help Choose this option to allow the driver to perform selftests upon loading; also requires the i915.selftest=1 module parameter. To diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index bda4c0e408f8..e5574e506a5c 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -234,6 +234,7 @@ i915-y += \ display/intel_ddi.o \ display/intel_dp.o \ display/intel_dp_aux_backlight.o \ + display/intel_dp_hdcp.o \ display/intel_dp_link_training.o \ display/intel_dp_mst.o \ display/intel_dsi.o \ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index f4053dd6bde9..096652921453 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -205,6 +205,32 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host, return 0; } +void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 tmp, mode_flags; + enum port port; + + mode_flags = crtc_state->mode_flags; + + /* + * case 1 also covers dual link + * In case of dual link, frame update should be set on + * DSI_0 + */ + if (mode_flags & I915_MODE_FLAG_DSI_USE_TE0) + port = PORT_A; + else if (mode_flags & I915_MODE_FLAG_DSI_USE_TE1) + port = PORT_B; + else + return; + + tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); + tmp |= DSI_FRAME_UPDATE_REQUEST; + intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); +} + static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -429,7 +455,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ - if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) { + if (IS_JSL_EHL(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) { tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)); tmp &= ~LATENCY_OPTIM_MASK; @@ -586,7 +612,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, } } - if (IS_ELKHARTLAKE(dev_priv)) { + if (IS_JSL_EHL(dev_priv)) { for_each_dsi_phy(phy, intel_dsi->phys) { tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy)); tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; @@ -1447,6 +1473,18 @@ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi) return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE); } +static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi, + struct intel_crtc_state *pipe_config) +{ + if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A))) + pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1 | + I915_MODE_FLAG_DSI_USE_TE0; + else if (intel_dsi->ports == BIT(PORT_B)) + pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1; + else + pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE0; +} + static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -1468,6 +1506,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); + /* Get the details on which TE should be enabled */ + if (is_cmd_mode(intel_dsi)) + gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config); + if (gen11_dsi_is_periodic_cmd_mode(intel_dsi)) pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; } @@ -1562,18 +1604,8 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, * receive TE from the slave if * dual link is enabled */ - if (is_cmd_mode(intel_dsi)) { - if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A))) - pipe_config->mode_flags |= - I915_MODE_FLAG_DSI_USE_TE1 | - I915_MODE_FLAG_DSI_USE_TE0; - else if (intel_dsi->ports == BIT(PORT_B)) - pipe_config->mode_flags |= - I915_MODE_FLAG_DSI_USE_TE1; - else - pipe_config->mode_flags |= - I915_MODE_FLAG_DSI_USE_TE0; - } + if (is_cmd_mode(intel_dsi)) + gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config); return 0; } @@ -1636,6 +1668,19 @@ out: return ret; } +static bool gen11_dsi_initial_fastset_check(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + if (crtc_state->dsc.compression_enable) { + drm_dbg_kms(encoder->base.dev, "Forcing full modeset due to DSC being enabled\n"); + crtc_state->uapi.mode_changed = true; + + return false; + } + + return true; +} + static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder) { intel_encoder_destroy(encoder); @@ -1646,6 +1691,7 @@ static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = { }; static const struct drm_connector_funcs gen11_dsi_connector_funcs = { + .detect = intel_panel_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, @@ -1890,6 +1936,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->update_pipe = intel_panel_update_backlight; encoder->compute_config = gen11_dsi_compute_config; encoder->get_hw_state = gen11_dsi_get_hw_state; + encoder->initial_fastset_check = gen11_dsi_initial_fastset_check; encoder->type = INTEL_OUTPUT_DSI; encoder->cloneable = 0; encoder->pipe_mask = ~0; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 630f49b7aa01..86be032bcf96 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -527,8 +527,6 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) intel_atomic_clear_global_state(state); state->dpll_set = state->modeset = false; - state->global_state_changed = false; - state->active_pipes = 0; } struct intel_crtc_state * @@ -542,40 +540,3 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, return to_intel_crtc_state(crtc_state); } - -int _intel_atomic_lock_global_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - state->global_state_changed = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - int ret; - - ret = drm_modeset_lock(&crtc->base.mutex, - state->base.acquire_ctx); - if (ret) - return ret; - } - - return 0; -} - -int _intel_atomic_serialize_global_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - state->global_state_changed = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - } - - return 0; -} diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 11146292b06f..285de07011dc 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -56,8 +56,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state); -int _intel_atomic_lock_global_state(struct intel_atomic_state *state); - -int _intel_atomic_serialize_global_state(struct intel_atomic_state *state); - #endif /* __INTEL_ATOMIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 79032701873a..3334ff253600 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -262,6 +262,7 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, plane_state->hw.rotation = from_plane_state->uapi.rotation; plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding; plane_state->hw.color_range = from_plane_state->uapi.color_range; + plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter; } void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, @@ -408,7 +409,11 @@ void intel_update_plane(struct intel_plane *plane, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); trace_intel_update_plane(&plane->base, crtc); - plane->update_plane(plane, crtc_state, plane_state); + + if (crtc_state->uapi.async_flip && plane->async_flip) + plane->async_flip(plane, crtc_state, plane_state); + else + plane->update_plane(plane, crtc_state, plane_state); } void intel_disable_plane(struct intel_plane *plane, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index ad4aa66fd676..f7de55707746 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -958,13 +958,8 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state, if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); - cdclk_state->force_min_cdclk_changed = true; cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0; - ret = intel_atomic_lock_global_state(&cdclk_state->base); - if (ret) - return ret; - return drm_atomic_commit(&state->base); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index a0a41ec5c341..4cc949b228f2 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -425,6 +425,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, const struct bdb_lfp_backlight_data *backlight_data; const struct lfp_backlight_data_entry *entry; int panel_type = dev_priv->vbt.panel_type; + u16 level; backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); if (!backlight_data) @@ -459,14 +460,39 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; - dev_priv->vbt.backlight.min_brightness = entry->min_brightness; + + if (bdb->version >= 234) { + u16 min_level; + bool scale; + + level = backlight_data->brightness_level[panel_type].level; + min_level = backlight_data->brightness_min_level[panel_type].level; + + if (bdb->version >= 236) + scale = backlight_data->brightness_precision_bits[panel_type] == 16; + else + scale = level > 255; + + if (scale) + min_level = min_level / 255; + + if (min_level > 255) { + drm_warn(&dev_priv->drm, "Brightness min level > 255\n"); + level = 255; + } + dev_priv->vbt.backlight.min_brightness = min_level; + } else { + level = backlight_data->level[panel_type]; + dev_priv->vbt.backlight.min_brightness = entry->min_brightness; + } + drm_dbg_kms(&dev_priv->drm, "VBT backlight PWM modulation frequency %u Hz, " "active %s, min brightness %u, level %u, controller %u\n", dev_priv->vbt.backlight.pwm_freq_hz, dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", dev_priv->vbt.backlight.min_brightness, - backlight_data->level[panel_type], + level, dev_priv->vbt.backlight.controller); } @@ -1602,7 +1628,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) const u8 *ddc_pin_map; int n_entries; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) { + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) { + return vbt_pin; + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) { ddc_pin_map = icp_ddc_pin_map; n_entries = ARRAY_SIZE(icp_ddc_pin_map); } else if (HAS_PCH_CNP(dev_priv)) { @@ -1656,22 +1684,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv, [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT }, [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 }, [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 }, + [PORT_H] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 }, + [PORT_I] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 }, }; /* - * Bspec lists the ports as A, B, C, D - however internally in our - * driver we keep them as PORT_A, PORT_B, PORT_D and PORT_E so the - * registers in Display Engine match the right offsets. Apply the - * mapping here to translate from VBT to internal convention. + * RKL VBT uses PHY based mapping. Combo PHYs A,B,C,D + * map to DDI A,B,TC1,TC2 respectively. */ static const int rkl_port_mapping[][3] = { [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, [PORT_C] = { -1 }, - [PORT_D] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, - [PORT_E] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, + [PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, + [PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, }; - if (IS_ROCKETLAKE(dev_priv)) + if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping), ARRAY_SIZE(rkl_port_mapping[0]), rkl_port_mapping, @@ -1887,7 +1915,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, expected_size = 37; } else if (bdb->version <= 215) { expected_size = 38; - } else if (bdb->version <= 229) { + } else if (bdb->version <= 237) { expected_size = 39; } else { expected_size = sizeof(*child); @@ -2133,7 +2161,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv) INIT_LIST_HEAD(&dev_priv->vbt.display_devices); - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) { + if (!HAS_DISPLAY(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "Skipping VBT init due to disabled display.\n"); return; @@ -2636,10 +2664,16 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, aux_ch = AUX_CH_B; break; case DP_AUX_C: - aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_D : AUX_CH_C; + /* + * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D + * map to DDI A,B,TC1,TC2 respectively. + */ + aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ? + AUX_CH_USBC1 : AUX_CH_C; break; case DP_AUX_D: - aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_E : AUX_CH_D; + aux_ch = (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) ? + AUX_CH_USBC2 : AUX_CH_D; break; case DP_AUX_E: aux_ch = AUX_CH_E; @@ -2650,6 +2684,12 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, case DP_AUX_G: aux_ch = AUX_CH_G; break; + case DP_AUX_H: + aux_ch = AUX_CH_H; + break; + case DP_AUX_I: + aux_ch = AUX_CH_I; + break; default: MISSING_CASE(info->alternate_aux_channel); aux_ch = AUX_CH_A; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 577c4441f32d..c449d28d0560 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1233,6 +1233,30 @@ static const struct intel_cdclk_vals icl_cdclk_table[] = { {} }; +static const struct intel_cdclk_vals rkl_cdclk_table[] = { + { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 }, + { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 }, + { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 }, + { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 }, + { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 }, + { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 }, + + { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 }, + { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 }, + { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 }, + { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 }, + { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 }, + { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 }, + + { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 }, + { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 }, + { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 }, + { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 }, + { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 }, + { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 }, + {} +}; + static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) { const struct intel_cdclk_vals *table = dev_priv->cdclk.table; @@ -2426,7 +2450,6 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa if (!cdclk_state) return NULL; - cdclk_state->force_min_cdclk_changed = false; cdclk_state->pipe = INVALID_PIPE; return &cdclk_state->base; @@ -2501,6 +2524,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) if (ret) return ret; } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes || + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk || intel_cdclk_changed(&old_cdclk_state->logical, &new_cdclk_state->logical)) { ret = intel_atomic_lock_global_state(&new_cdclk_state->base); @@ -2588,7 +2612,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) */ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { - if (IS_ELKHARTLAKE(dev_priv)) { + if (IS_JSL_EHL(dev_priv)) { if (dev_priv->cdclk.hw.ref == 24000) dev_priv->max_cdclk_freq = 552000; else @@ -2680,6 +2704,18 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv) DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); } +static int dg1_rawclk(struct drm_i915_private *dev_priv) +{ + /* + * DG1 always uses a 38.4 MHz rawclk. The bspec tells us + * "Program Numerator=2, Denominator=4, Divider=37 decimal." + */ + I915_WRITE(PCH_RAWCLK_FREQ, + CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); + + return 38400; +} + static int cnp_rawclk(struct drm_i915_private *dev_priv) { u32 rawclk; @@ -2788,7 +2824,9 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) { u32 freq; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + freq = dg1_rawclk(dev_priv); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) freq = pch_rawclk(dev_priv); @@ -2809,13 +2847,19 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) */ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 12) { + if (IS_ROCKETLAKE(dev_priv)) { + dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; + dev_priv->cdclk.table = rkl_cdclk_table; + } else if (INTEL_GEN(dev_priv) >= 12) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; - } else if (IS_ELKHARTLAKE(dev_priv)) { + } else if (IS_JSL_EHL(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 6b31fde4be16..b34eb00fb327 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -49,7 +49,6 @@ struct intel_cdclk_state { /* forced minimum cdclk for glk+ audio w/a */ int force_min_cdclk; - bool force_min_cdclk_changed; /* bitmask of active pipes */ u8 active_pipes; diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 945bb03bdd4d..172d398081ee 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -638,10 +638,17 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; - if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: ilk_load_lut_8(crtc, gamma_lut); - else + break; + case GAMMA_MODE_MODE_10BIT: ilk_load_lut_10(crtc, gamma_lut); + break; + default: + MISSING_CASE(crtc_state->gamma_mode); + break; + } } static int ivb_lut_10_size(u32 prec_index) @@ -745,21 +752,27 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; + const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; - if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - ilk_load_lut_8(crtc, gamma_lut); - } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + ilk_load_lut_8(crtc, blob); + break; + case GAMMA_MODE_MODE_SPLIT: ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); - } else { - const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; - + break; + case GAMMA_MODE_MODE_10BIT: ivb_load_lut_10(crtc, blob, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); + break; + default: + MISSING_CASE(crtc_state->gamma_mode); + break; } } @@ -768,21 +781,28 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; + const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; - if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - ilk_load_lut_8(crtc, gamma_lut); - } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + ilk_load_lut_8(crtc, blob); + break; + case GAMMA_MODE_MODE_SPLIT: bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); - } else { - const struct drm_property_blob *blob = gamma_lut ?: degamma_lut; + break; + case GAMMA_MODE_MODE_10BIT: bdw_load_lut_10(crtc, blob, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); + break; + default: + MISSING_CASE(crtc_state->gamma_mode); + break; } } @@ -818,12 +838,14 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * as compared to just 16 to achieve this. */ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), - lut[i].green); + lut[i].green); } /* Clamp values > 1.0. */ while (i++ < 35) intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + + intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) @@ -851,6 +873,8 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat /* Clamp values > 1.0. */ while (i++ < 35) intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + + intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_luts(const struct intel_crtc_state *crtc_state) @@ -871,11 +895,17 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state) else glk_load_degamma_lut_linear(crtc_state); - if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: ilk_load_lut_8(crtc, gamma_lut); - } else { + break; + case GAMMA_MODE_MODE_10BIT: bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); + break; + default: + MISSING_CASE(crtc_state->gamma_mode); + break; } } @@ -1007,9 +1037,13 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) icl_program_gamma_superfine_segment(crtc_state); icl_program_gamma_multi_segment(crtc_state); break; - default: + case GAMMA_MODE_MODE_10BIT: bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc_state); + break; + default: + MISSING_CASE(crtc_state->gamma_mode); + break; } intel_dsb_commit(crtc_state); @@ -1026,13 +1060,6 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color) return drm_color_lut_extract(color->red, 14); } -static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) -{ - entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10); - entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10); - entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10); -} - static void chv_load_cgm_degamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -1060,6 +1087,13 @@ static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color) return drm_color_lut_extract(color->red, 10); } +static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) +{ + entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10); + entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10); + entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10); +} + static void chv_load_cgm_gamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -1733,7 +1767,7 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, break; default: MISSING_CASE(gamma_mode); - return false; + return false; } return true; @@ -1913,23 +1947,34 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state) if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) return; - if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); - else + break; + case GAMMA_MODE_MODE_10BIT: crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc); + break; + default: + MISSING_CASE(crtc_state->gamma_mode); + break; + } } -static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc, +/* On BDW+ the index auto increment mode actually works */ +static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, u32 prec_index) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int i, hw_lut_size = ivb_lut_10_size(prec_index); + int lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; struct drm_color_lut *lut; + drm_WARN_ON(&dev_priv->drm, lut_size != hw_lut_size); + blob = drm_property_create_blob(&dev_priv->drm, - sizeof(struct drm_color_lut) * hw_lut_size, + sizeof(struct drm_color_lut) * lut_size, NULL); if (IS_ERR(blob)) return NULL; @@ -1939,7 +1984,7 @@ static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc, intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index | PAL_PREC_AUTO_INCREMENT); - for (i = 0; i < hw_lut_size; i++) { + for (i = 0; i < lut_size; i++) { u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); @@ -1957,10 +2002,17 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state) if (!crtc_state->gamma_enable) return; - if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); - else - crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); + break; + case GAMMA_MODE_MODE_10BIT: + crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); + break; + default: + MISSING_CASE(crtc_state->gamma_mode); + break; + } } static struct drm_property_blob * @@ -2012,11 +2064,15 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state) case GAMMA_MODE_MODE_8BIT: crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); break; + case GAMMA_MODE_MODE_10BIT: + crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); + break; case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc); break; default: - crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); + MISSING_CASE(crtc_state->gamma_mode); + break; } } diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 157d8c8c605a..d5ad61e4083e 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -188,8 +188,9 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy) * PHY-B and may not even have instances of the register for the * other combo PHY's. */ - if (IS_ELKHARTLAKE(i915) || - IS_ROCKETLAKE(i915)) + if (IS_JSL_EHL(i915) || + IS_ROCKETLAKE(i915) || + IS_DG1(i915)) return phy < PHY_C; return true; @@ -242,14 +243,14 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy) * * ICL,TGL: * A(master) -> B(slave), C(slave) - * RKL: + * RKL,DG1: * A(master) -> B(slave) * C(master) -> D(slave) * * We must set the IREFGEN bit for any PHY acting as a master * to another PHY. */ - if (IS_ROCKETLAKE(dev_priv) && phy == PHY_C) + if ((IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) && phy == PHY_C) return true; return phy == PHY_A; @@ -282,7 +283,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), IREFGEN, IREFGEN); - if (IS_ELKHARTLAKE(dev_priv)) { + if (IS_JSL_EHL(dev_priv)) { if (ehl_vbt_ddi_d_present(dev_priv)) expected_val = ICL_PHY_MISC_MUX_DDID; @@ -376,7 +377,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) * "internal" child devices. */ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); - if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) { + if (IS_JSL_EHL(dev_priv) && phy == PHY_A) { val &= ~ICL_PHY_MISC_MUX_DDID; if (ehl_vbt_ddi_d_present(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 5b4510ce5693..4934edd51cb0 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -833,6 +833,9 @@ intel_crt_detect(struct drm_connector *connector, connector->base.id, connector->name, force); + if (!INTEL_DISPLAY_ENABLED(dev_priv)) + return connector_status_disconnected; + if (dev_priv->params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c index d5db16764619..67dc64df78a5 100644 --- a/drivers/gpu/drm/i915/display/intel_csr.c +++ b/drivers/gpu/drm/i915/display/intel_csr.c @@ -40,13 +40,16 @@ #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE +#define DG1_CSR_PATH "i915/dg1_dmc_ver2_02.bin" +#define DG1_CSR_VERSION_REQUIRED CSR_VERSION(2, 2) +MODULE_FIRMWARE(DG1_CSR_PATH); + #define RKL_CSR_PATH "i915/rkl_dmc_ver2_02.bin" #define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2) MODULE_FIRMWARE(RKL_CSR_PATH); #define TGL_CSR_PATH "i915/tgl_dmc_ver2_08.bin" #define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8) -#define TGL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(TGL_CSR_PATH); #define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin" @@ -686,14 +689,17 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) */ intel_csr_runtime_pm_get(dev_priv); - if (IS_ROCKETLAKE(dev_priv)) { + if (IS_DG1(dev_priv)) { + csr->fw_path = DG1_CSR_PATH; + csr->required_version = DG1_CSR_VERSION_REQUIRED; + csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; + } else if (IS_ROCKETLAKE(dev_priv)) { csr->fw_path = RKL_CSR_PATH; csr->required_version = RKL_CSR_VERSION_REQUIRED; csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; } else if (INTEL_GEN(dev_priv) >= 12) { csr->fw_path = TGL_CSR_PATH; csr->required_version = TGL_CSR_VERSION_REQUIRED; - /* Allow to load fw via parameter using the last known size */ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; } else if (IS_GEN(dev_priv, 11)) { csr->fw_path = ICL_CSR_PATH; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 19ac6b2a664c..19b16517a502 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -572,13 +572,13 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = { /* NT mV Trans mV db */ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ - { 0xC, 0x64, 0x30, 0x00, 0x0F }, /* 350 700 6.0 */ - { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ + { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */ { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ - { 0xC, 0x64, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ - { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */ { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ - { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; @@ -1034,103 +1034,235 @@ cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) } static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, - int *n_entries) +icl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; +} + +static const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (type == INTEL_OUTPUT_HDMI) { - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); - return icl_combo_phy_ddi_translations_hdmi; - } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { + if (crtc_state->port_clock > 540000) { *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); return icl_combo_phy_ddi_translations_edp_hbr3; - } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { + } else if (dev_priv->vbt.edp.low_vswing) { *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); return icl_combo_phy_ddi_translations_edp_hbr2; } - *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); - return icl_combo_phy_ddi_translations_dp_hbr2; + return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return icl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct icl_mg_phy_ddi_buf_trans * -icl_get_mg_buf_trans(struct intel_encoder *encoder, int type, int rate, - int *n_entries) +icl_get_mg_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi); + return icl_mg_phy_ddi_translations_hdmi; +} + +static const struct icl_mg_phy_ddi_buf_trans * +icl_get_mg_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - if (type == INTEL_OUTPUT_HDMI) { - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi); - return icl_mg_phy_ddi_translations_hdmi; - } else if (rate > 270000) { + if (crtc_state->port_clock > 270000) { *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3); return icl_mg_phy_ddi_translations_hbr2_hbr3; + } else { + *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr); + return icl_mg_phy_ddi_translations_rbr_hbr; } +} - *n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr); - return icl_mg_phy_ddi_translations_rbr_hbr; +static const struct icl_mg_phy_ddi_buf_trans * +icl_get_mg_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return icl_get_mg_buf_trans_hdmi(encoder, crtc_state, n_entries); + else + return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, - int *n_entries) +ehl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) { - *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp); - return ehl_combo_phy_ddi_translations_dp; + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp); + return ehl_combo_phy_ddi_translations_dp; +} + +static const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; } - return icl_get_combo_buf_trans(encoder, type, rate, n_entries); + return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, +ehl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return ehl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return ehl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} - if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.hobl) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} - if (!intel_dp->hobl_failed && rate <= 540000) { - /* Same table applies to TGL, RKL and DG1 */ - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); - return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; - } - } +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { - return icl_get_combo_buf_trans(encoder, type, rate, n_entries); - } else if (rate > 270000) { + if (crtc_state->port_clock > 270000) { if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); return tgl_uy_combo_phy_ddi_translations_dp_hbr2; + } else { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); + return tgl_combo_phy_ddi_translations_dp_hbr2; } + } else { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); + return tgl_combo_phy_ddi_translations_dp_hbr; + } +} - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); - return tgl_combo_phy_ddi_translations_dp_hbr2; +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (crtc_state->port_clock > 540000) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl); + return tgl_combo_phy_ddi_translations_edp_hbr2_hobl; + } else if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; } - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); - return tgl_combo_phy_ddi_translations_dp_hbr; + return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } static const struct tgl_dkl_phy_ddi_buf_trans * -tgl_get_dkl_buf_trans(struct intel_encoder *encoder, int type, int rate, - int *n_entries) +tgl_get_dkl_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); + return tgl_dkl_phy_hdmi_ddi_trans; +} + +static const struct tgl_dkl_phy_ddi_buf_trans * +tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - if (type == INTEL_OUTPUT_HDMI) { - *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); - return tgl_dkl_phy_hdmi_ddi_trans; - } else if (rate > 270000) { + if (crtc_state->port_clock > 270000) { *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2); return tgl_dkl_phy_dp_ddi_trans_hbr2; + } else { + *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); + return tgl_dkl_phy_dp_ddi_trans; } +} - *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); - return tgl_dkl_phy_dp_ddi_trans; +static const struct tgl_dkl_phy_ddi_buf_trans * +tgl_get_dkl_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, n_entries); + else + return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); } -static int intel_ddi_hdmi_level(struct intel_encoder *encoder) +static int intel_ddi_hdmi_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int n_entries, level, default_entry; @@ -1138,19 +1270,15 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder) if (INTEL_GEN(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) - tgl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI, - 0, &n_entries); + tgl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); else - tgl_get_dkl_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0, - &n_entries); + tgl_get_dkl_buf_trans_hdmi(encoder, crtc_state, &n_entries); default_entry = n_entries - 1; } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI, - 0, &n_entries); + icl_get_combo_buf_trans_hdmi(encoder, crtc_state, &n_entries); else - icl_get_mg_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0, - &n_entries); + icl_get_mg_buf_trans_hdmi(encoder, crtc_state, &n_entries); default_entry = n_entries - 1; } else if (IS_CANNONLAKE(dev_priv)) { cnl_get_buf_trans_hdmi(encoder, &n_entries); @@ -1477,14 +1605,15 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, DP_TP_CTL_ENABLE); } -static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) +static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_dp->DP = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); - intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); + intel_dp->DP |= DDI_PORT_WIDTH(crtc_state->lane_count); } static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, @@ -1791,6 +1920,8 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); + drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING); + ctl &= ~TRANS_DDI_FUNC_ENABLE; if (IS_GEN_RANGE(dev_priv, 8, 10)) @@ -1818,12 +1949,12 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state } int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + enum transcoder cpu_transcoder, bool enable) { struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; - enum pipe pipe = 0; int ret = 0; u32 tmp; @@ -1832,19 +1963,12 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, if (drm_WARN_ON(dev, !wakeref)) return -ENXIO; - if (drm_WARN_ON(dev, - !intel_encoder->get_hw_state(intel_encoder, &pipe))) { - ret = -EIO; - goto out; - } - - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe)); + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (enable) tmp |= TRANS_DDI_HDCP_SIGNALLING; else tmp &= ~TRANS_DDI_HDCP_SIGNALLING; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp); -out: + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp); intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } @@ -2153,13 +2277,14 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, } static void skl_ddi_set_iboost(struct intel_encoder *encoder, - int level, enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u8 iboost; - if (type == INTEL_OUTPUT_HDMI) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) iboost = intel_bios_hdmi_boost_level(encoder); else iboost = intel_bios_dp_boost_level(encoder); @@ -2168,14 +2293,12 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, const struct ddi_buf_trans *ddi_translations; int n_entries; - if (type == INTEL_OUTPUT_HDMI) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); - else if (type == INTEL_OUTPUT_EDP) - ddi_translations = intel_ddi_get_buf_trans_edp(encoder, - &n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + ddi_translations = intel_ddi_get_buf_trans_edp(encoder, &n_entries); else - ddi_translations = intel_ddi_get_buf_trans_dp(encoder, - &n_entries); + ddi_translations = intel_ddi_get_buf_trans_dp(encoder, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; @@ -2198,16 +2321,17 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, } static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, - int level, enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct bxt_ddi_buf_trans *ddi_translations; enum port port = encoder->port; int n_entries; - if (type == INTEL_OUTPUT_HDMI) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries); - else if (type == INTEL_OUTPUT_EDP) + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries); else ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries); @@ -2224,7 +2348,8 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, ddi_translations[level].deemphasis); } -static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp) +static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -2234,33 +2359,28 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) - tgl_get_combo_buf_trans(encoder, encoder->type, - intel_dp->link_rate, &n_entries); + tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries); else - tgl_get_dkl_buf_trans(encoder, encoder->type, - intel_dp->link_rate, &n_entries); + tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); } else if (INTEL_GEN(dev_priv) == 11) { - if (IS_ELKHARTLAKE(dev_priv)) - ehl_get_combo_buf_trans(encoder, encoder->type, - intel_dp->link_rate, &n_entries); + if (IS_JSL_EHL(dev_priv)) + ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries); else if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans(encoder, encoder->type, - intel_dp->link_rate, &n_entries); + icl_get_combo_buf_trans(encoder, crtc_state, &n_entries); else - icl_get_mg_buf_trans(encoder, encoder->type, - intel_dp->link_rate, &n_entries); + icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); } else if (IS_CANNONLAKE(dev_priv)) { - if (encoder->type == INTEL_OUTPUT_EDP) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) cnl_get_buf_trans_edp(encoder, &n_entries); else cnl_get_buf_trans_dp(encoder, &n_entries); } else if (IS_GEN9_LP(dev_priv)) { - if (encoder->type == INTEL_OUTPUT_EDP) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) bxt_get_buf_trans_edp(encoder, &n_entries); else bxt_get_buf_trans_dp(encoder, &n_entries); } else { - if (encoder->type == INTEL_OUTPUT_EDP) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) intel_ddi_get_buf_trans_edp(encoder, &n_entries); else intel_ddi_get_buf_trans_dp(encoder, &n_entries); @@ -2287,7 +2407,8 @@ static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp) } static void cnl_ddi_vswing_program(struct intel_encoder *encoder, - int level, enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct cnl_ddi_buf_trans *ddi_translations; @@ -2295,9 +2416,9 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, int n_entries, ln; u32 val; - if (type == INTEL_OUTPUT_HDMI) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries); - else if (type == INTEL_OUTPUT_EDP) + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries); else ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries); @@ -2351,22 +2472,16 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, } static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, - int level, enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; int width, rate, ln; u32 val; - if (type == INTEL_OUTPUT_HDMI) { - width = 4; - rate = 0; /* Rate is always < than 6GHz for HDMI */ - } else { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - width = intel_dp->lane_count; - rate = intel_dp->link_rate; - } + width = crtc_state->lane_count; + rate = crtc_state->port_clock; /* * 1. If port type is eDP or DP, @@ -2374,10 +2489,10 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, * else clear to 0b. */ val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port)); - if (type != INTEL_OUTPUT_HDMI) - val |= COMMON_KEEPER_EN; - else + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) val &= ~COMMON_KEEPER_EN; + else + val |= COMMON_KEEPER_EN; intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val); /* 2. Program loadgen select */ @@ -2409,7 +2524,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val); /* 5. Program swing and de-emphasis */ - cnl_ddi_vswing_program(encoder, level, type); + cnl_ddi_vswing_program(encoder, crtc_state, level); /* 6. Set training enable to trigger update */ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port)); @@ -2418,23 +2533,21 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, - u32 level, int type, int rate) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct cnl_ddi_buf_trans *ddi_translations; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - const struct cnl_ddi_buf_trans *ddi_translations = NULL; - u32 n_entries, val; - int ln; + int n_entries, ln; + u32 val; if (INTEL_GEN(dev_priv) >= 12) - ddi_translations = tgl_get_combo_buf_trans(encoder, type, rate, - &n_entries); - else if (IS_ELKHARTLAKE(dev_priv)) - ddi_translations = ehl_get_combo_buf_trans(encoder, type, rate, - &n_entries); + ddi_translations = tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries); + else if (IS_JSL_EHL(dev_priv)) + ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries); else - ddi_translations = icl_get_combo_buf_trans(encoder, type, rate, - &n_entries); + ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries); if (!ddi_translations) return; @@ -2445,7 +2558,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, level = n_entries - 1; } - if (type == INTEL_OUTPUT_EDP) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED; @@ -2493,25 +2606,16 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, } static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - u32 level, - enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - int width = 0; - int rate = 0; + int width, rate, ln; u32 val; - int ln = 0; - if (type == INTEL_OUTPUT_HDMI) { - width = 4; - /* Rate is always < than 6GHz for HDMI */ - } else { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - width = intel_dp->lane_count; - rate = intel_dp->link_rate; - } + width = crtc_state->lane_count; + rate = crtc_state->port_clock; /* * 1. If port type is eDP or DP, @@ -2519,7 +2623,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * else clear to 0b. */ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)); - if (type == INTEL_OUTPUT_HDMI) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) val &= ~COMMON_KEEPER_EN; else val |= COMMON_KEEPER_EN; @@ -2554,7 +2658,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(encoder, level, type, rate); + icl_ddi_combo_vswing_program(encoder, crtc_state, level); /* 6. Set training enable to trigger update */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); @@ -2563,23 +2667,16 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - int link_clock, u32 level, - enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct icl_mg_phy_ddi_buf_trans *ddi_translations; - u32 n_entries, val; - int ln, rate = 0; - - if (type != INTEL_OUTPUT_HDMI) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - rate = intel_dp->link_rate; - } + int n_entries, ln; + u32 val; - ddi_translations = icl_get_mg_buf_trans(encoder, type, rate, - &n_entries); + ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); /* The table does not have values for level 3 and level 9. */ if (level >= n_entries || level == 3 || level == 9) { drm_dbg_kms(&dev_priv->drm, @@ -2646,7 +2743,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, */ for (ln = 0; ln < 2; ln++) { val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port)); - if (link_clock < 300000) + if (crtc_state->port_clock < 300000) val |= CFG_LOW_RATE_LKREN_EN; else val &= ~CFG_LOW_RATE_LKREN_EN; @@ -2657,7 +2754,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, for (ln = 0; ln < 2; ln++) { val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (link_clock <= 500000) { + if (crtc_state->port_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; } else { val |= CFG_AMI_CK_DIV_OVERRIDE_EN | @@ -2667,7 +2764,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (link_clock <= 500000) { + if (crtc_state->port_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; } else { val |= CFG_AMI_CK_DIV_OVERRIDE_EN | @@ -2693,38 +2790,30 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, - int link_clock, - u32 level, - enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (intel_phy_is_combo(dev_priv, phy)) - icl_combo_phy_ddi_vswing_sequence(encoder, level, type); + icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level); else - icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level, - type); + icl_mg_phy_ddi_vswing_sequence(encoder, crtc_state, level); } static void -tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, - u32 level, enum intel_output_type type) +tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; - u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; - int rate = 0; - - if (type == INTEL_OUTPUT_HDMI) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - rate = intel_dp->link_rate; - } + u32 val, dpcnt_mask, dpcnt_val; + int n_entries, ln; - ddi_translations = tgl_get_dkl_buf_trans(encoder, encoder->type, rate, - &n_entries); + ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); if (level >= n_entries) level = n_entries - 1; @@ -2760,20 +2849,20 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, } static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder, - int link_clock, - u32 level, - enum intel_output_type type) + const struct intel_crtc_state *crtc_state, + int level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (intel_phy_is_combo(dev_priv, phy)) - icl_combo_phy_ddi_vswing_sequence(encoder, level, type); + icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level); else - tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level, type); + tgl_dkl_phy_ddi_vswing_sequence(encoder, crtc_state, level); } -static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels) +static int translate_signal_level(struct intel_dp *intel_dp, + u8 signal_levels) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int i; @@ -2790,55 +2879,58 @@ static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels) return 0; } -static u32 intel_ddi_dp_level(struct intel_dp *intel_dp) +static int intel_ddi_dp_level(struct intel_dp *intel_dp) { u8 train_set = intel_dp->train_set[0]; - int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); + u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); return translate_signal_level(intel_dp, signal_levels); } static void -tgl_set_signal_levels(struct intel_dp *intel_dp) +tgl_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int level = intel_ddi_dp_level(intel_dp); - tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate, - level, encoder->type); + tgl_ddi_vswing_sequence(encoder, crtc_state, level); } static void -icl_set_signal_levels(struct intel_dp *intel_dp) +icl_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int level = intel_ddi_dp_level(intel_dp); - icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, - level, encoder->type); + icl_ddi_vswing_sequence(encoder, crtc_state, level); } static void -cnl_set_signal_levels(struct intel_dp *intel_dp) +cnl_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int level = intel_ddi_dp_level(intel_dp); - cnl_ddi_vswing_sequence(encoder, level, encoder->type); + cnl_ddi_vswing_sequence(encoder, crtc_state, level); } static void -bxt_set_signal_levels(struct intel_dp *intel_dp) +bxt_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int level = intel_ddi_dp_level(intel_dp); - bxt_ddi_vswing_sequence(encoder, level, encoder->type); + bxt_ddi_vswing_sequence(encoder, crtc_state, level); } static void -hsw_set_signal_levels(struct intel_dp *intel_dp) +hsw_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -2855,7 +2947,7 @@ hsw_set_signal_levels(struct intel_dp *intel_dp) intel_dp->DP |= signal_levels; if (IS_GEN9_BC(dev_priv)) - skl_ddi_set_iboost(encoder, level, encoder->type); + skl_ddi_set_iboost(encoder, crtc_state, level); intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); @@ -3043,7 +3135,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, if (!intel_phy_is_combo(dev_priv, phy)) intel_de_write(dev_priv, DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); - else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C) + else if (IS_JSL_EHL(dev_priv) && port >= PORT_C) /* * MG does not exist but the programming is required * to ungate DDIC and DDID @@ -3092,7 +3184,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) if (INTEL_GEN(dev_priv) >= 11) { if (!intel_phy_is_combo(dev_priv, phy) || - (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)) + (IS_JSL_EHL(dev_priv) && port >= PORT_C)) intel_de_write(dev_priv, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } else if (IS_CANNONLAKE(dev_priv)) { @@ -3198,6 +3290,37 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, } } +static enum transcoder +tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) + return crtc_state->mst_master_transcoder; + else + return crtc_state->cpu_transcoder; +} + +i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (INTEL_GEN(dev_priv) >= 12) + return TGL_DP_TP_CTL(tgl_dp_tp_transcoder(crtc_state)); + else + return DP_TP_CTL(encoder->port); +} + +i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (INTEL_GEN(dev_priv) >= 12) + return TGL_DP_TP_STATUS(tgl_dp_tp_transcoder(crtc_state)); + else + return DP_TP_STATUS(encoder->port); +} + static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -3222,11 +3345,12 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, return; intel_dp = enc_to_intel_dp(encoder); - val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); val |= DP_TP_CTL_FEC_ENABLE; - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); - if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, + if (intel_de_wait_for_set(dev_priv, + dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) drm_err(&dev_priv->drm, "Timed out waiting for FEC Enable Status\n"); @@ -3243,10 +3367,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, return; intel_dp = enc_to_intel_dp(encoder); - val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); val &= ~DP_TP_CTL_FEC_ENABLE; - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); - intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); + intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); } static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -3260,13 +3384,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); - enum transcoder transcoder = crtc_state->cpu_transcoder; - - intel_dp_set_link_params(intel_dp, crtc_state->port_clock, - crtc_state->lane_count, is_mst); - intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder); - intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder); + intel_dp_set_link_params(intel_dp, + crtc_state->port_clock, + crtc_state->lane_count); /* * 1. Enable Power Wells @@ -3335,8 +3456,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, */ /* 7.e Configure voltage swing and related IO settings */ - tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, - encoder->type); + tgl_ddi_vswing_sequence(encoder, crtc_state, level); /* * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up @@ -3359,10 +3479,10 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * We only configure what the register value will be here. Actual * enabling happens during link training farther down. */ - intel_ddi_init_dp_buf_reg(encoder); + intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); /* @@ -3379,11 +3499,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent) * (timeout after 800 us) */ - intel_dp_start_link_train(intel_dp); + intel_dp_start_link_train(intel_dp, crtc_state); /* 7.k Set DP_TP_CTL link training to Normal */ if (!is_trans_port_sync_mode(crtc_state)) - intel_dp_stop_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp, crtc_state); /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); @@ -3409,8 +3529,9 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, else drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A); - intel_dp_set_link_params(intel_dp, crtc_state->port_clock, - crtc_state->lane_count, is_mst); + intel_dp_set_link_params(intel_dp, + crtc_state->port_clock, + crtc_state->lane_count); intel_edp_panel_on(intel_dp); @@ -3424,12 +3545,11 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, icl_program_mg_dp_mode(dig_port, crtc_state); if (INTEL_GEN(dev_priv) >= 11) - icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, - level, encoder->type); + icl_ddi_vswing_sequence(encoder, crtc_state, level); else if (IS_CANNONLAKE(dev_priv)) - cnl_ddi_vswing_sequence(encoder, level, encoder->type); + cnl_ddi_vswing_sequence(encoder, crtc_state, level); else if (IS_GEN9_LP(dev_priv)) - bxt_ddi_vswing_sequence(encoder, level, encoder->type); + bxt_ddi_vswing_sequence(encoder, crtc_state, level); else intel_prepare_dp_ddi_buffers(encoder, crtc_state); @@ -3442,16 +3562,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, lane_reversal); } - intel_ddi_init_dp_buf_reg(encoder); + intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_set_power(intel_dp, DP_SET_POWER_D0); + intel_dp_configure_protocol_converter(intel_dp); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); intel_dp_sink_set_fec_ready(intel_dp, crtc_state); - intel_dp_start_link_train(intel_dp); + intel_dp_start_link_train(intel_dp, crtc_state); if ((port != PORT_A || INTEL_GEN(dev_priv) >= 9) && !is_trans_port_sync_mode(crtc_state)) - intel_dp_stop_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp, crtc_state); intel_ddi_enable_fec(encoder, crtc_state); @@ -3491,7 +3612,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int level = intel_ddi_hdmi_level(encoder); + int level = intel_ddi_hdmi_level(encoder, crtc_state); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_ddi_clk_select(encoder, crtc_state); @@ -3501,20 +3622,18 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, icl_program_mg_dp_mode(dig_port, crtc_state); if (INTEL_GEN(dev_priv) >= 12) - tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, - level, INTEL_OUTPUT_HDMI); + tgl_ddi_vswing_sequence(encoder, crtc_state, level); else if (INTEL_GEN(dev_priv) == 11) - icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, - level, INTEL_OUTPUT_HDMI); + icl_ddi_vswing_sequence(encoder, crtc_state, level); else if (IS_CANNONLAKE(dev_priv)) - cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); + cnl_ddi_vswing_sequence(encoder, crtc_state, level); else if (IS_GEN9_LP(dev_priv)) - bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); + bxt_ddi_vswing_sequence(encoder, crtc_state, level); else intel_prepare_hdmi_ddi_buffers(encoder, level); if (IS_GEN9_BC(dev_priv)) - skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); + skl_ddi_set_iboost(encoder, crtc_state, level); intel_ddi_enable_pipe_clock(encoder, crtc_state); @@ -3556,19 +3675,17 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, intel_ddi_pre_enable_hdmi(state, encoder, crtc_state, conn_state); } else { - struct intel_lspcon *lspcon = - enc_to_intel_lspcon(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); - if (lspcon->active) { - struct intel_digital_port *dig_port = - enc_to_dig_port(encoder); + /* FIXME precompute everything properly */ + /* FIXME how do we turn infoframes off again? */ + if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink) dig_port->set_infoframes(encoder, crtc_state->has_infoframe, crtc_state, conn_state); - } } } @@ -3588,12 +3705,10 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, } if (intel_crtc_has_dp_encoder(crtc_state)) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); val |= DP_TP_CTL_LINK_TRAIN_PAT1; - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); } /* Disable FEC in DP Sink */ @@ -3623,7 +3738,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, * Power down sink before disabling the port, otherwise we end * up getting interrupts from the sink on detecting link loss. */ - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + intel_dp_set_power(intel_dp, DP_SET_POWER_D3); if (INTEL_GEN(dev_priv) >= 12) { if (is_mst) { @@ -3806,12 +3921,14 @@ static void trans_port_sync_stop_link_train(struct intel_atomic_state *state, crtc_state->cpu_transcoder) continue; - intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder)); + intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder), + slave_crtc_state); } usleep_range(200, 400); - intel_dp_stop_link_train(enc_to_intel_dp(encoder)); + intel_dp_stop_link_train(enc_to_intel_dp(encoder), + crtc_state); } static void intel_enable_ddi_dp(struct intel_atomic_state *state, @@ -3824,7 +3941,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, enum port port = encoder->port; if (port == PORT_A && INTEL_GEN(dev_priv) < 9) - intel_dp_stop_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp, crtc_state); intel_edp_backlight_on(crtc_state, conn_state); intel_psr_enable(intel_dp, crtc_state, conn_state); @@ -4012,18 +4129,19 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, intel_psr_update(intel_dp, crtc_state, conn_state); intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); - intel_edp_drrs_enable(intel_dp, crtc_state); + intel_edp_drrs_update(intel_dp, crtc_state); intel_panel_update_backlight(state, encoder, crtc_state, conn_state); } -static void intel_ddi_update_pipe(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +void intel_ddi_update_pipe(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { - if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && + !intel_encoder_is_mst(encoder)) intel_ddi_update_pipe_dp(state, encoder, crtc_state, conn_state); @@ -4084,15 +4202,16 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, crtc_state->lane_lat_optim_mask); } -static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) +static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; u32 dp_tp_ctl, ddi_buf_ctl; bool wait = false; - dp_tp_ctl = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); if (dp_tp_ctl & DP_TP_CTL_ENABLE) { ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port)); @@ -4104,23 +4223,23 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1; - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl); - intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); + intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); } dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1; - if (intel_dp->link_mst) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { dp_tp_ctl |= DP_TP_CTL_MODE_MST; - else { + } else { dp_tp_ctl |= DP_TP_CTL_MODE_SST; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl); - intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); + intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); intel_dp->DP |= DDI_BUF_CTL_ENABLE; intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); @@ -4130,16 +4249,17 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) } static void intel_ddi_set_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 temp; - temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - switch (dp_train_pat & train_pat_mask) { + switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; break; @@ -4157,20 +4277,21 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp, break; } - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp); + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp); } -static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp) +static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u32 val; - val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); val &= ~DP_TP_CTL_LINK_TRAIN_MASK; val |= DP_TP_CTL_LINK_TRAIN_IDLE; - intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); + intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); /* * Until TGL on PORT_A we can have only eDP in SST mode. There the only @@ -4182,7 +4303,8 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp) if (port == PORT_A && INTEL_GEN(dev_priv) < 12) return; - if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, + if (intel_de_wait_for_set(dev_priv, + dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_IDLE_DONE, 1)) drm_err(&dev_priv->drm, "Timed out waiting for DP idle patterns\n"); @@ -4206,7 +4328,7 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, { if (INTEL_GEN(dev_priv) >= 12 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 2; - else if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000) + else if (IS_JSL_EHL(dev_priv) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 3; else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 1; @@ -4280,7 +4402,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 temp, flags = 0; /* XXX: DSI transcoder paranoia */ @@ -4350,12 +4471,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_dp_get_m_n(intel_crtc, pipe_config); if (INTEL_GEN(dev_priv) >= 11) { - i915_reg_t dp_tp_ctl; - - if (IS_GEN(dev_priv, 11)) - dp_tp_ctl = DP_TP_CTL(encoder->port); - else - dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder); + i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config); pipe_config->fec_enable = intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; @@ -4388,16 +4504,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; } - if (INTEL_GEN(dev_priv) >= 12) { - enum transcoder transcoder = - intel_dp_mst_is_slave_trans(pipe_config) ? - pipe_config->mst_master_transcoder : - pipe_config->cpu_transcoder; - - intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder); - intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder); - } - pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); @@ -4452,6 +4558,22 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC); } +static void intel_ddi_sync_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + if (intel_crtc_has_dp_encoder(crtc_state)) + intel_dp_sync_state(encoder, crtc_state); +} + +static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + if (intel_crtc_has_dp_encoder(crtc_state)) + return intel_dp_initial_fastset_check(encoder, crtc_state); + + return true; +} + static enum intel_output_type intel_ddi_compute_output_type(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, @@ -4662,11 +4784,6 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) dig_port->dp.voltage_max = intel_ddi_dp_voltage_max; dig_port->dp.preemph_max = intel_ddi_dp_preemph_max; - if (INTEL_GEN(dev_priv) < 12) { - dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); - dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); - } - if (!intel_dp_init_connector(dig_port, connector)) { kfree(connector); return NULL; @@ -4949,11 +5066,71 @@ static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy) i915->hti_state & HDPORT_PHY_USED_HDMI(phy)); } +static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port >= PORT_TC1) + return HPD_PORT_C + port - PORT_TC1; + else + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port >= PORT_TC1) + return HPD_PORT_TC1 + port - PORT_TC1; + else + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (HAS_PCH_TGP(dev_priv)) + return tgl_hpd_pin(dev_priv, port); + + if (port >= PORT_TC1) + return HPD_PORT_C + port - PORT_TC1; + else + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port >= PORT_C) + return HPD_PORT_TC1 + port - PORT_C; + else + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port == PORT_D) + return HPD_PORT_A; + + if (HAS_PCH_MCC(dev_priv)) + return icl_hpd_pin(dev_priv, port); + + return HPD_PORT_A + port - PORT_A; +} + +static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + if (port == PORT_F) + return HPD_PORT_E; + + return HPD_PORT_A + port - PORT_A; +} + void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { struct intel_digital_port *dig_port; struct intel_encoder *encoder; - bool init_hdmi, init_dp, init_lspcon = false; + bool init_hdmi, init_dp; enum phy phy = intel_port_to_phy(dev_priv, port); /* @@ -4979,7 +5156,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) * is initialized before lspcon. */ init_dp = true; - init_lspcon = true; init_hdmi = false; drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n", port_name(port)); @@ -4998,8 +5174,34 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder = &dig_port->base; - drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); + if (INTEL_GEN(dev_priv) >= 12) { + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + + drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, + DRM_MODE_ENCODER_TMDS, + "DDI %s%c/PHY %s%c", + port >= PORT_TC1 ? "TC" : "", + port >= PORT_TC1 ? port_name(port) : port - PORT_TC1 + '1', + tc_port != TC_PORT_NONE ? "TC" : "", + tc_port != TC_PORT_NONE ? phy_name(phy) : tc_port - TC_PORT_1 + '1'); + } else if (INTEL_GEN(dev_priv) >= 11) { + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + + drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, + DRM_MODE_ENCODER_TMDS, + "DDI %c%s/PHY %s%c", + port_name(port), + port >= PORT_C ? " (TC)" : "", + tc_port != TC_PORT_NONE ? "TC" : "", + tc_port != TC_PORT_NONE ? phy_name(phy) : tc_port - TC_PORT_1 + '1'); + } else { + drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, + DRM_MODE_ENCODER_TMDS, + "DDI %c/PHY %c", port_name(port), phy_name(phy)); + } + + mutex_init(&dig_port->hdcp_mutex); + dig_port->num_hdcp_streams = 0; encoder->hotplug = intel_ddi_hotplug; encoder->compute_output_type = intel_ddi_compute_output_type; @@ -5013,7 +5215,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->update_pipe = intel_ddi_update_pipe; encoder->get_hw_state = intel_ddi_get_hw_state; encoder->get_config = intel_ddi_get_config; + encoder->sync_state = intel_ddi_sync_state; + encoder->initial_fastset_check = intel_ddi_initial_fastset_check; encoder->suspend = intel_dp_encoder_suspend; + encoder->shutdown = intel_dp_encoder_shutdown; encoder->get_power_domains = intel_ddi_get_power_domains; encoder->type = INTEL_OUTPUT_DDI; @@ -5022,6 +5227,21 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->cloneable = 0; encoder->pipe_mask = ~0; + if (IS_DG1(dev_priv)) + encoder->hpd_pin = dg1_hpd_pin(dev_priv, port); + else if (IS_ROCKETLAKE(dev_priv)) + encoder->hpd_pin = rkl_hpd_pin(dev_priv, port); + else if (INTEL_GEN(dev_priv) >= 12) + encoder->hpd_pin = tgl_hpd_pin(dev_priv, port); + else if (IS_JSL_EHL(dev_priv)) + encoder->hpd_pin = ehl_hpd_pin(dev_priv, port); + else if (IS_GEN(dev_priv, 11)) + encoder->hpd_pin = icl_hpd_pin(dev_priv, port); + else if (IS_GEN(dev_priv, 10)) + encoder->hpd_pin = cnl_hpd_pin(dev_priv, port); + else + encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); + if (INTEL_GEN(dev_priv) >= 11) dig_port->saved_port_bits = intel_de_read(dev_priv, DDI_BUF_CTL(port)) @@ -5064,22 +5284,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) goto err; } - if (init_lspcon) { - if (lspcon_init(dig_port)) - /* TODO: handle hdmi info frame part */ - drm_dbg_kms(&dev_priv->drm, - "LSPCON init success on port %c\n", - port_name(port)); - else - /* - * LSPCON init faied, but DP init was success, so - * lets try to drive as DP++ port. - */ - drm_err(&dev_priv->drm, - "LSPCON init failed on port %c\n", - port_name(port)); - } - if (INTEL_GEN(dev_priv) >= 11) { if (intel_phy_is_tc(dev_priv, phy)) dig_port->connected = intel_tc_port_connected; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 077e9dbbe367..dcc711cfe4fe 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -7,6 +7,7 @@ #define __INTEL_DDI_H__ #include "intel_display.h" +#include "i915_reg.h" struct drm_connector_state; struct drm_i915_private; @@ -16,7 +17,12 @@ struct intel_crtc_state; struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; +enum transcoder; +i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, struct intel_encoder *intel_encoder, const struct intel_crtc_state *old_crtc_state, @@ -40,9 +46,12 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, bool state); void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); -u32 bxt_signal_levels(struct intel_dp *intel_dp); -u32 ddi_signal_levels(struct intel_dp *intel_dp); +u32 bxt_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +u32 ddi_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + enum transcoder cpu_transcoder, bool enable); void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ddb588f863f8..cddbda5303ff 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -67,6 +67,7 @@ #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_color.h" +#include "intel_csr.h" #include "intel_display_types.h" #include "intel_dp_link_training.h" #include "intel_fbc.h" @@ -153,7 +154,7 @@ static void ilk_pch_clock_get(struct intel_crtc *crtc, static int intel_framebuffer_init(struct intel_framebuffer *ifb, struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); -static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); +static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, const struct intel_link_m_n *m_n, @@ -1807,6 +1808,17 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + u32 mode_flags = crtc->mode_flags; + + /* + * From Gen 11, In case of dsi cmd mode, frame counter wouldnt + * have updated at the beginning of TE, if we want to use + * the hw counter, then we would find it updated in only + * the next TE, hence switching to sw counter. + */ + if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1)) + return 0; /* * On i965gm the hardware frame counter reads @@ -1989,13 +2001,17 @@ static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) return ccs_plane - fb->format->num_planes / 2; } -/* Return either the main plane's CCS or - if not a CCS FB - UV plane */ int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) { + struct drm_i915_private *i915 = to_i915(fb->dev); + if (is_ccs_modifier(fb->modifier)) return main_to_ccs_plane(fb, main_plane); - - return 1; + else if (INTEL_GEN(i915) < 11 && + intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 1; + else + return 0; } bool @@ -3433,6 +3449,14 @@ initial_plane_vma(struct drm_i915_private *i915, if (IS_ERR(obj)) return NULL; + /* + * Mark it WT ahead of time to avoid changing the + * cache_level during fbdev initialization. The + * unbind there would get stuck waiting for rcu. + */ + i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ? + I915_CACHE_WT : I915_CACHE_NONE); + switch (plane_config->tiling) { case I915_TILING_NONE: break; @@ -3921,7 +3945,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * main surface offset, and it must be non-negative. Make * sure that is what we will get. */ - if (offset > aux_offset) + if (aux_plane && offset > aux_offset) offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, offset, aux_offset & ~(alignment - 1)); @@ -4010,8 +4034,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) if (is_ccs_modifier(fb->modifier)) { int ccs_plane = main_to_ccs_plane(fb, uv_plane); - int aux_offset = plane_state->color_plane[ccs_plane].offset; - int alignment = intel_surf_alignment(fb, uv_plane); + u32 aux_offset = plane_state->color_plane[ccs_plane].offset; + u32 alignment = intel_surf_alignment(fb, uv_plane); if (offset > aux_offset) offset = intel_plane_adjust_aligned_offset(&x, &y, @@ -4092,8 +4116,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) int skl_check_plane_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; - int ret; - bool needs_aux = false; + int ret, i; ret = intel_plane_compute_gtt(plane_state); if (ret) @@ -4107,7 +4130,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) * it. */ if (is_ccs_modifier(fb->modifier)) { - needs_aux = true; ret = skl_check_ccs_aux_surface(plane_state); if (ret) return ret; @@ -4115,20 +4137,15 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { - needs_aux = true; ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; } - if (!needs_aux) { - int i; - - for (i = 1; i < fb->format->num_planes; i++) { - plane_state->color_plane[i].offset = ~0xfff; - plane_state->color_plane[i].x = 0; - plane_state->color_plane[i].y = 0; - } + for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) { + plane_state->color_plane[i].offset = 0; + plane_state->color_plane[i].x = 0; + plane_state->color_plane[i].y = 0; } ret = skl_check_main_surface(plane_state); @@ -4784,6 +4801,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 plane_ctl = 0; + if (crtc_state->uapi.async_flip) + plane_ctl |= PLANE_CTL_ASYNC_FLIP; + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return plane_ctl; @@ -5021,18 +5041,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); - - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); + intel_hpd_init(dev_priv); ret = __intel_display_resume(dev, state, ctx); if (ret) drm_err(&dev_priv->drm, "Restoring old state failed with %i\n", ret); - intel_hpd_init(dev_priv); + intel_hpd_poll_disable(dev_priv); } drm_atomic_state_put(state); @@ -6273,6 +6289,105 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) skl_detach_scaler(crtc, i); } +static int cnl_coef_tap(int i) +{ + return i % 7; +} + +static u16 cnl_nearest_filter_coef(int t) +{ + return t == 3 ? 0x0800 : 0x3000; +} + +/* + * Theory behind setting nearest-neighbor integer scaling: + * + * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. + * The letter represents the filter tap (D is the center tap) and the number + * represents the coefficient set for a phase (0-16). + * + * +------------+------------------------+------------------------+ + * |Index value | Data value coeffient 1 | Data value coeffient 2 | + * +------------+------------------------+------------------------+ + * | 00h | B0 | A0 | + * +------------+------------------------+------------------------+ + * | 01h | D0 | C0 | + * +------------+------------------------+------------------------+ + * | 02h | F0 | E0 | + * +------------+------------------------+------------------------+ + * | 03h | A1 | G0 | + * +------------+------------------------+------------------------+ + * | 04h | C1 | B1 | + * +------------+------------------------+------------------------+ + * | ... | ... | ... | + * +------------+------------------------+------------------------+ + * | 38h | B16 | A16 | + * +------------+------------------------+------------------------+ + * | 39h | D16 | C16 | + * +------------+------------------------+------------------------+ + * | 3Ah | F16 | C16 | + * +------------+------------------------+------------------------+ + * | 3Bh | Reserved | G16 | + * +------------+------------------------+------------------------+ + * + * To enable nearest-neighbor scaling: program scaler coefficents with + * the center tap (Dxx) values set to 1 and all other values set to 0 as per + * SCALER_COEFFICIENT_FORMAT + * + */ + +static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv, + enum pipe pipe, int id, int set) +{ + int i; + + intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), + PS_COEE_INDEX_AUTO_INC); + + for (i = 0; i < 17 * 7; i += 2) { + u32 tmp; + int t; + + t = cnl_coef_tap(i); + tmp = cnl_nearest_filter_coef(t); + + t = cnl_coef_tap(i + 1); + tmp |= cnl_nearest_filter_coef(t) << 16; + + intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set), + tmp); + } + + intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0); +} + +inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set) +{ + if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) { + return (PS_FILTER_PROGRAMMED | + PS_Y_VERT_FILTER_SELECT(set) | + PS_Y_HORZ_FILTER_SELECT(set) | + PS_UV_VERT_FILTER_SELECT(set) | + PS_UV_HORZ_FILTER_SELECT(set)); + } + + return PS_FILTER_MEDIUM; +} + +void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe, + int id, int set, enum drm_scaling_filter filter) +{ + switch (filter) { + case DRM_SCALING_FILTER_DEFAULT: + break; + case DRM_SCALING_FILTER_NEAREST_NEIGHBOR: + cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set); + break; + default: + MISSING_CASE(filter); + } +} + static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -6293,6 +6408,7 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) int hscale, vscale; unsigned long irqflags; int id; + u32 ps_ctrl; if (!crtc_state->pch_pfit.enabled) return; @@ -6309,10 +6425,16 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) id = scaler_state->scaler_id; + ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); + ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode; + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN | - PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); + skl_scaler_setup_filter(dev_priv, pipe, id, 0, + crtc_state->hw.scaling_filter); + + intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl); + intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), @@ -6558,6 +6680,43 @@ static void intel_post_plane_update(struct intel_atomic_state *state, icl_wa_scalerclkgating(dev_priv, pipe, false); } +static void skl_disable_async_flip_wa(struct intel_atomic_state *state, + struct intel_crtc *crtc, + const struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_plane *plane; + struct intel_plane_state *new_plane_state; + int i; + + for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { + u32 update_mask = new_crtc_state->update_planes; + u32 plane_ctl, surf_addr; + enum plane_id plane_id; + unsigned long irqflags; + enum pipe pipe; + + if (crtc->pipe != plane->pipe || + !(update_mask & BIT(plane->id))) + continue; + + plane_id = plane->id; + pipe = plane->pipe; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id)); + surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id)); + + plane_ctl &= ~PLANE_CTL_ASYNC_FLIP; + + intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + } + + intel_wait_for_vblank(dev_priv, crtc->pipe); +} + static void intel_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -6643,6 +6802,15 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + + /* + * WA for platforms where async address update enable bit + * is double buffered and only latched at start of vblank. + */ + if (old_crtc_state->uapi.async_flip && + !new_crtc_state->uapi.async_flip && + IS_GEN_RANGE(dev_priv, 9, 10)) + skl_disable_async_flip_wa(state, crtc, new_crtc_state); } static void intel_crtc_disable_planes(struct intel_atomic_state *state, @@ -6942,7 +7110,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, if (intel_crtc_has_dp_encoder(new_crtc_state)) intel_dp_set_m_n(new_crtc_state, M1_N1); - intel_set_pipe_timings(new_crtc_state); + intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); if (new_crtc_state->has_pch_encoder) @@ -7087,7 +7255,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_enable(state, crtc); if (!transcoder_is_dsi(cpu_transcoder)) - intel_set_pipe_timings(new_crtc_state); + intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); @@ -7273,7 +7441,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) return false; else if (IS_ROCKETLAKE(dev_priv)) return phy <= PHY_D; - else if (IS_ELKHARTLAKE(dev_priv)) + else if (IS_JSL_EHL(dev_priv)) return phy <= PHY_C; else if (INTEL_GEN(dev_priv) >= 11) return phy <= PHY_B; @@ -7287,7 +7455,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) return false; else if (INTEL_GEN(dev_priv) >= 12) return phy >= PHY_D && phy <= PHY_I; - else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) + else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv)) return phy >= PHY_C && phy <= PHY_F; else return false; @@ -7295,23 +7463,23 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) { - if (IS_ROCKETLAKE(i915) && port >= PORT_D) - return (enum phy)port - 1; - else if (IS_ELKHARTLAKE(i915) && port == PORT_D) + if (IS_ROCKETLAKE(i915) && port >= PORT_TC1) + return PHY_C + port - PORT_TC1; + else if (IS_JSL_EHL(i915) && port == PORT_D) return PHY_A; - return (enum phy)port; + return PHY_A + port - PORT_A; } enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) { if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) - return PORT_TC_NONE; + return TC_PORT_NONE; if (INTEL_GEN(dev_priv) >= 12) - return port - PORT_D; - - return port - PORT_C; + return TC_PORT_1 + port - PORT_TC1; + else + return TC_PORT_1 + port - PORT_C; } enum intel_display_power_domain intel_port_to_power_domain(enum port port) @@ -7331,6 +7499,10 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) return POWER_DOMAIN_PORT_DDI_F_LANES; case PORT_G: return POWER_DOMAIN_PORT_DDI_G_LANES; + case PORT_H: + return POWER_DOMAIN_PORT_DDI_H_LANES; + case PORT_I: + return POWER_DOMAIN_PORT_DDI_I_LANES; default: MISSING_CASE(port); return POWER_DOMAIN_PORT_OTHER; @@ -7356,6 +7528,10 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) return POWER_DOMAIN_AUX_F_TBT; case AUX_CH_G: return POWER_DOMAIN_AUX_G_TBT; + case AUX_CH_H: + return POWER_DOMAIN_AUX_H_TBT; + case AUX_CH_I: + return POWER_DOMAIN_AUX_I_TBT; default: MISSING_CASE(dig_port->aux_ch); return POWER_DOMAIN_AUX_C_TBT; @@ -7387,6 +7563,10 @@ intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) return POWER_DOMAIN_AUX_F; case AUX_CH_G: return POWER_DOMAIN_AUX_G; + case AUX_CH_H: + return POWER_DOMAIN_AUX_H; + case AUX_CH_I: + return POWER_DOMAIN_AUX_I; default: MISSING_CASE(aux_ch); return POWER_DOMAIN_AUX_A; @@ -7470,7 +7650,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, if (intel_crtc_has_dp_encoder(new_crtc_state)) intel_dp_set_m_n(new_crtc_state, M1_N1); - intel_set_pipe_timings(new_crtc_state); + intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { @@ -7538,7 +7718,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, if (intel_crtc_has_dp_encoder(new_crtc_state)) intel_dp_set_m_n(new_crtc_state, M1_N1); - intel_set_pipe_timings(new_crtc_state); + intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); i9xx_set_pipeconf(new_crtc_state); @@ -8155,7 +8335,7 @@ static void compute_m_n(unsigned int m, unsigned int n, * which the devices expect also in synchronous clock mode. */ if (constant_n) - *ret_n = 0x8000; + *ret_n = DP_LINK_CONSTANT_N_VALUE; else *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); @@ -8792,7 +8972,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, crtc_state->dpll_hw_state.dpll = dpll; } -static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) +static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -8878,8 +9058,8 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; } -static void intel_get_pipe_timings(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static void intel_get_transcoder_timings(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -9502,7 +9682,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (INTEL_GEN(dev_priv) < 4) pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; - intel_get_pipe_timings(crtc, pipe_config); + intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); i9xx_get_pfit_config(pipe_config); @@ -10622,6 +10802,10 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, val & PLANE_CTL_FLIP_HORIZONTAL) plane_config->rotation |= DRM_MODE_REFLECT_X; + /* 90/270 degree rotation would require extra work */ + if (drm_rotation_90_or_270(plane_config->rotation)) + goto error; + base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; plane_config->base = base; @@ -10783,7 +10967,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } - intel_get_pipe_timings(crtc, pipe_config); + intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); ilk_get_pfit_config(pipe_config); @@ -11200,7 +11384,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || INTEL_GEN(dev_priv) >= 11) { hsw_get_ddi_port_state(crtc, pipe_config); - intel_get_pipe_timings(crtc, pipe_config); + intel_get_transcoder_timings(crtc, pipe_config); } intel_get_pipe_src_size(crtc, pipe_config); @@ -11216,18 +11400,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } else { pipe_config->output_format = bdw_get_pipemisc_output_format(crtc); - - /* - * Currently there is no interface defined to - * check user preference between RGB/YCBCR444 - * or YCBCR420. So the only possible case for - * YCBCR444 usage is driving YCBCR420 output - * with LSPCON, when pipe is configured for - * YCBCR444 output and LSPCON takes care of - * downsampling it. - */ - pipe_config->lspcon_downsampling = - pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; } pipe_config->gamma_mode = intel_de_read(dev_priv, @@ -11799,6 +11971,9 @@ static void i9xx_update_cursor(struct intel_plane *plane, if (INTEL_GEN(dev_priv) >= 9) skl_write_cursor_wm(plane, crtc_state); + if (!needs_modeset(crtc_state)) + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); + if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || plane->cursor.cntl != cntl) { @@ -12810,8 +12985,11 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, } - if (!mode_changed) - intel_psr2_sel_fetch_update(state, crtc); + if (!mode_changed) { + ret = intel_psr2_sel_fetch_update(state, crtc); + if (ret) + return ret; + } return 0; } @@ -13084,6 +13262,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, transcoder_name(pipe_config->cpu_transcoder), pipe_config->pipe_bpp, pipe_config->dither); + drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", + transcoder_name(pipe_config->mst_master_transcoder)); + drm_dbg_kms(&dev_priv->drm, "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n", transcoder_name(pipe_config->master_transcoder), @@ -13181,8 +13362,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, pipe_config->csc_mode, pipe_config->gamma_mode, pipe_config->gamma_enable, pipe_config->csc_enable); - drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", - transcoder_name(pipe_config->mst_master_transcoder)); + drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n", + pipe_config->hw.degamma_lut ? + drm_color_lut_size(pipe_config->hw.degamma_lut) : 0, + pipe_config->hw.gamma_lut ? + drm_color_lut_size(pipe_config->hw.gamma_lut) : 0); dump_planes: if (!state) @@ -13276,6 +13460,7 @@ intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) crtc_state->hw.active = crtc_state->uapi.active; crtc_state->hw.mode = crtc_state->uapi.mode; crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; + crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); } @@ -13287,6 +13472,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0); crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; + crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter; /* copy color blobs to uapi */ drm_property_replace_blob(&crtc_state->uapi.degamma_lut, @@ -13471,12 +13657,6 @@ encoder_retry: "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", base_bpp, pipe_config->pipe_bpp, pipe_config->dither); - /* - * Make drm_calc_timestamping_constants in - * drm_atomic_helper_update_legacy_modeset_state() happy - */ - pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode; - return 0; } @@ -14297,7 +14477,6 @@ verify_crtc_state(struct intel_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc_state *pipe_config = old_crtc_state; struct drm_atomic_state *state = old_crtc_state->uapi.state; - bool active; __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); intel_crtc_free_hw_state(old_crtc_state); @@ -14307,16 +14486,19 @@ verify_crtc_state(struct intel_crtc *crtc, drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); - active = dev_priv->display.get_pipe_config(crtc, pipe_config); + pipe_config->hw.enable = new_crtc_state->hw.enable; + + pipe_config->hw.active = + dev_priv->display.get_pipe_config(crtc, pipe_config); /* we keep both pipes enabled on 830 */ - if (IS_I830(dev_priv)) - active = new_crtc_state->hw.active; + if (IS_I830(dev_priv) && pipe_config->hw.active) + pipe_config->hw.active = new_crtc_state->hw.active; - I915_STATE_WARN(new_crtc_state->hw.active != active, + I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", - new_crtc_state->hw.active, active); + new_crtc_state->hw.active, pipe_config->hw.active); I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, "transitional active state does not match atomic hw state " @@ -14325,6 +14507,7 @@ verify_crtc_state(struct intel_crtc *crtc, for_each_encoder_on_crtc(dev, &crtc->base, encoder) { enum pipe pipe; + bool active; active = encoder->get_hw_state(encoder, &pipe); I915_STATE_WARN(active != new_crtc_state->hw.active, @@ -14636,16 +14819,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state, static int intel_modeset_checks(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int ret; state->modeset = true; - state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes); - - if (state->active_pipes != dev_priv->active_pipes) { - ret = _intel_atomic_lock_global_state(state); - if (ret) - return ret; - } if (IS_HASWELL(dev_priv)) return hsw_mode_set_planes_workaround(state); @@ -14789,7 +14964,8 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_cdclk_state *new_cdclk_state; + const struct intel_cdclk_state *old_cdclk_state; + const struct intel_cdclk_state *new_cdclk_state; struct intel_plane_state *plane_state; struct intel_bw_state *new_bw_state; struct intel_plane *plane; @@ -14808,9 +14984,11 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state, return ret; } + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed) + if (new_cdclk_state && + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) *need_cdclk_calc = true; ret = dev_priv->display.bw_calc_min_cdclk(state); @@ -14842,8 +15020,10 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state) int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - int ret = intel_crtc_atomic_check(state, crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); + int ret; + + ret = intel_crtc_atomic_check(state, crtc); if (ret) { drm_dbg_atomic(&i915->drm, "[CRTC:%d:%s] atomic driver check failed\n", @@ -14873,6 +15053,139 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, } /** + * DOC: asynchronous flip implementation + * + * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC + * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. + * Correspondingly, support is currently added for primary plane only. + * + * Async flip can only change the plane surface address, so anything else + * changing is rejected from the intel_atomic_check_async() function. + * Once this check is cleared, flip done interrupt is enabled using + * the skl_enable_flip_done() function. + * + * As soon as the surface address register is written, flip done interrupt is + * generated and the requested events are sent to the usersapce in the interrupt + * handler itself. The timestamp and sequence sent during the flip done event + * correspond to the last vblank and have no relation to the actual time when + * the flip done event was sent. + */ +static int intel_atomic_check_async(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state, *new_crtc_state; + const struct intel_plane_state *new_plane_state, *old_plane_state; + struct intel_crtc *crtc; + struct intel_plane *plane; + int i; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (needs_modeset(new_crtc_state)) { + drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); + return -EINVAL; + } + + if (!new_crtc_state->hw.active) { + drm_dbg_kms(&i915->drm, "CRTC inactive\n"); + return -EINVAL; + } + if (old_crtc_state->active_planes != new_crtc_state->active_planes) { + drm_dbg_kms(&i915->drm, + "Active planes cannot be changed during async flip\n"); + return -EINVAL; + } + } + + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + /* + * TODO: Async flip is only supported through the page flip IOCTL + * as of now. So support currently added for primary plane only. + * Support for other planes on platforms on which supports + * this(vlv/chv and icl+) should be added when async flip is + * enabled in the atomic IOCTL path. + */ + if (plane->id != PLANE_PRIMARY) + return -EINVAL; + + /* + * FIXME: This check is kept generic for all platforms. + * Need to verify this for all gen9 and gen10 platforms to enable + * this selectively if required. + */ + switch (new_plane_state->hw.fb->modifier) { + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + break; + default: + drm_dbg_kms(&i915->drm, + "Linear memory/CCS does not support async flips\n"); + return -EINVAL; + } + + if (old_plane_state->color_plane[0].stride != + new_plane_state->color_plane[0].stride) { + drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n"); + return -EINVAL; + } + + if (old_plane_state->hw.fb->modifier != + new_plane_state->hw.fb->modifier) { + drm_dbg_kms(&i915->drm, + "Framebuffer modifiers cannot be changed in async flip\n"); + return -EINVAL; + } + + if (old_plane_state->hw.fb->format != + new_plane_state->hw.fb->format) { + drm_dbg_kms(&i915->drm, + "Framebuffer format cannot be changed in async flip\n"); + return -EINVAL; + } + + if (old_plane_state->hw.rotation != + new_plane_state->hw.rotation) { + drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n"); + return -EINVAL; + } + + if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || + !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { + drm_dbg_kms(&i915->drm, + "Plane size/co-ordinates cannot be changed in async flip\n"); + return -EINVAL; + } + + if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { + drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n"); + return -EINVAL; + } + + if (old_plane_state->hw.pixel_blend_mode != + new_plane_state->hw.pixel_blend_mode) { + drm_dbg_kms(&i915->drm, + "Pixel blend mode cannot be changed in async flip\n"); + return -EINVAL; + } + + if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { + drm_dbg_kms(&i915->drm, + "Color encoding cannot be changed in async flip\n"); + return -EINVAL; + } + + if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { + drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n"); + return -EINVAL; + } + } + + return 0; +} + +/** * intel_atomic_check - validate state object * @dev: drm device * @_state: state to validate @@ -15040,6 +15353,12 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (new_crtc_state->uapi.async_flip) { + ret = intel_atomic_check_async(state); + if (ret) + goto fail; + } + if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) continue; @@ -15605,6 +15924,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_dbuf_pre_plane_update(state); + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (new_crtc_state->uapi.async_flip) + skl_enable_flip_done(crtc); + } + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.commit_modeset_enables(state); @@ -15626,6 +15950,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) drm_atomic_helper_wait_for_flip_done(dev, &state->base); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (new_crtc_state->uapi.async_flip) + skl_disable_flip_done(crtc); + if (new_crtc_state->hw.active && !needs_modeset(new_crtc_state) && !new_crtc_state->preload_luts && @@ -15757,14 +16084,6 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) plane->frontbuffer_bit); } -static void assert_global_state_locked(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - - for_each_intel_crtc(&dev_priv->drm, crtc) - drm_modeset_lock_assert_held(&crtc->base.mutex); -} - static int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, bool nonblock) @@ -15840,12 +16159,6 @@ static int intel_atomic_commit(struct drm_device *dev, intel_shared_dpll_swap_state(state); intel_atomic_track_fbs(state); - if (state->global_state_changed) { - assert_global_state_locked(dev_priv); - - dev_priv->active_pipes = state->active_pipes; - } - drm_atomic_state_get(&state->base); INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); @@ -16754,6 +17067,11 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; } + if (INTEL_GEN(dev_priv) >= 10) + drm_crtc_create_scaling_filter_property(&crtc->base, + BIT(DRM_SCALING_FILTER_DEFAULT) | + BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); + intel_color_init(crtc); intel_crtc_crc_init(crtc); @@ -16892,25 +17210,25 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_pps_init(dev_priv); - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) + if (!HAS_DISPLAY(dev_priv)) return; if (IS_ROCKETLAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */ - intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */ + intel_ddi_init(dev_priv, PORT_TC1); + intel_ddi_init(dev_priv, PORT_TC2); } else if (INTEL_GEN(dev_priv) >= 12) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); - intel_ddi_init(dev_priv, PORT_D); - intel_ddi_init(dev_priv, PORT_E); - intel_ddi_init(dev_priv, PORT_F); - intel_ddi_init(dev_priv, PORT_G); - intel_ddi_init(dev_priv, PORT_H); - intel_ddi_init(dev_priv, PORT_I); + intel_ddi_init(dev_priv, PORT_TC1); + intel_ddi_init(dev_priv, PORT_TC2); + intel_ddi_init(dev_priv, PORT_TC2); + intel_ddi_init(dev_priv, PORT_TC4); + intel_ddi_init(dev_priv, PORT_TC5); + intel_ddi_init(dev_priv, PORT_TC6); icl_dsi_init(dev_priv); - } else if (IS_ELKHARTLAKE(dev_priv)) { + } else if (IS_JSL_EHL(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); @@ -17745,6 +18063,8 @@ retry: } if (crtc_state->hw.active) { + struct intel_encoder *encoder; + /* * We've not yet detected sink capabilities * (audio,infoframes,etc.) and thus we don't want to @@ -17766,22 +18086,15 @@ retry: */ crtc_state->uapi.color_mgmt_changed = true; - /* - * FIXME hack to force full modeset when DSC is being - * used. - * - * As long as we do not have full state readout and - * config comparison of crtc_state->dsc, we have no way - * to ensure reliable fastset. Remove once we have - * readout for DSC. - */ - if (crtc_state->dsc.compression_enable) { - ret = drm_atomic_add_affected_connectors(state, - &crtc->base); - if (ret) - goto out; - crtc_state->uapi.mode_changed = true; - drm_dbg_kms(dev, "Force full modeset for DSC\n"); + for_each_intel_encoder_mask(dev, encoder, + crtc_state->uapi.encoder_mask) { + if (encoder->initial_fastset_check && + !encoder->initial_fastset_check(encoder, crtc_state)) { + ret = drm_atomic_add_affected_connectors(state, + &crtc->base); + if (ret) + goto out; + } } } } @@ -17820,6 +18133,9 @@ static void intel_mode_config_init(struct drm_i915_private *i915) mode_config->funcs = &intel_mode_funcs; + if (INTEL_GEN(i915) >= 9) + mode_config->async_page_flip = true; + /* * Maximum framebuffer dimensions, chosen to match * the maximum render engine surface size on gen4+. @@ -17878,6 +18194,27 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) { int ret; + if (i915_inject_probe_failure(i915)) + return -ENODEV; + + if (HAS_DISPLAY(i915)) { + ret = drm_vblank_init(&i915->drm, + INTEL_NUM_PIPES(i915)); + if (ret) + return ret; + } + + intel_bios_init(i915); + + ret = intel_vga_register(i915); + if (ret) + goto cleanup_bios; + + /* FIXME: completely on the wrong abstraction layer */ + intel_power_domains_init_hw(i915, false); + + intel_csr_ucode_init(i915); + i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); @@ -17886,15 +18223,15 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) ret = intel_cdclk_init(i915); if (ret) - return ret; + goto cleanup_vga_client_pw_domain_csr; ret = intel_dbuf_init(i915); if (ret) - return ret; + goto cleanup_vga_client_pw_domain_csr; ret = intel_bw_init(i915); if (ret) - return ret; + goto cleanup_vga_client_pw_domain_csr; init_llist_head(&i915->atomic_helper.free_list); INIT_WORK(&i915->atomic_helper.free_work, @@ -17905,10 +18242,19 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) intel_fbc_init(i915); return 0; + +cleanup_vga_client_pw_domain_csr: + intel_csr_ucode_fini(i915); + intel_power_domains_driver_remove(i915); + intel_vga_unregister(i915); +cleanup_bios: + intel_bios_driver_remove(i915); + + return ret; } -/* part #2: call after irq install */ -int intel_modeset_init(struct drm_i915_private *i915) +/* part #2: call after irq install, but before gem init */ +int intel_modeset_init_nogem(struct drm_i915_private *i915) { struct drm_device *dev = &i915->drm; enum pipe pipe; @@ -17925,7 +18271,7 @@ int intel_modeset_init(struct drm_i915_private *i915) INTEL_NUM_PIPES(i915), INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); - if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { + if (HAS_DISPLAY(i915)) { for_each_pipe(i915, pipe) { ret = intel_crtc_init(i915, pipe); if (ret) { @@ -18007,6 +18353,31 @@ int intel_modeset_init(struct drm_i915_private *i915) return 0; } +/* part #3: call after gem init */ +int intel_modeset_init(struct drm_i915_private *i915) +{ + int ret; + + intel_overlay_setup(i915); + + if (!HAS_DISPLAY(i915)) + return 0; + + ret = intel_fbdev_init(&i915->drm); + if (ret) + return ret; + + /* Only enable hotplug handling once the fbdev is fully set up. */ + intel_hpd_init(i915); + intel_hpd_poll_disable(i915); + + intel_init_ipc(i915); + + intel_psr_set_force_mode_changed(i915->psr.dp); + + return 0; +} + void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); @@ -18465,6 +18836,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) encoder->base.crtc = &crtc->base; encoder->get_config(encoder, crtc_state); + if (encoder->sync_state) + encoder->sync_state(encoder, crtc_state); } else { encoder->base.crtc = NULL; } @@ -18635,6 +19008,15 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, CHICKEN_PAR1_1, intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); } + + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) { + /* Display WA #1142:kbl,cfl,cml */ + intel_de_rmw(dev_priv, CHICKEN_PAR1_1, + KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22); + intel_de_rmw(dev_priv, CHICKEN_MISC_2, + KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14, + KBL_ARB_FILL_SPARE_14); + } } static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, @@ -18891,6 +19273,18 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) intel_fbc_cleanup_cfb(i915); } +/* part #3: call after gem init */ +void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) +{ + intel_csr_ucode_fini(i915); + + intel_power_domains_driver_remove(i915); + + intel_vga_unregister(i915); + + intel_bios_driver_remove(i915); +} + #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct intel_display_error_state { @@ -18951,7 +19345,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) + if (!HAS_DISPLAY(dev_priv)) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index e890c8fb779b..be774f216065 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -28,6 +28,7 @@ #include <drm/drm_util.h> enum link_m_n_set; +enum drm_scaling_filter; struct dpll; struct drm_connector; struct drm_device; @@ -207,6 +208,14 @@ enum port { PORT_H, PORT_I, + /* tgl+ */ + PORT_TC1 = PORT_D, + PORT_TC2, + PORT_TC3, + PORT_TC4, + PORT_TC5, + PORT_TC6, + I915_MAX_PORTS }; @@ -243,14 +252,14 @@ static inline const char *port_identifier(enum port port) } enum tc_port { - PORT_TC_NONE = -1, + TC_PORT_NONE = -1, - PORT_TC1 = 0, - PORT_TC2, - PORT_TC3, - PORT_TC4, - PORT_TC5, - PORT_TC6, + TC_PORT_1 = 0, + TC_PORT_2, + TC_PORT_3, + TC_PORT_4, + TC_PORT_5, + TC_PORT_6, I915_MAX_TC_PORTS }; @@ -272,8 +281,6 @@ enum dpio_phy { DPIO_PHY2, }; -#define I915_NUM_PHYS_VLV 2 - enum aux_ch { AUX_CH_A, AUX_CH_B, @@ -282,6 +289,16 @@ enum aux_ch { AUX_CH_E, /* ICL+ */ AUX_CH_F, AUX_CH_G, + AUX_CH_H, + AUX_CH_I, + + /* tgl+ */ + AUX_CH_USBC1 = AUX_CH_D, + AUX_CH_USBC2, + AUX_CH_USBC3, + AUX_CH_USBC4, + AUX_CH_USBC5, + AUX_CH_USBC6, }; #define aux_ch_name(a) ((a) + 'A') @@ -599,6 +616,9 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); +u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set); +void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe, + int id, int set, enum drm_scaling_filter filter); void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); @@ -629,9 +649,11 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, /* modesetting */ void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init_noirq(struct drm_i915_private *i915); +int intel_modeset_init_nogem(struct drm_i915_private *i915); int intel_modeset_init(struct drm_i915_private *i915); void intel_modeset_driver_remove(struct drm_i915_private *i915); void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); +void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915); void intel_display_resume(struct drm_device *dev); void intel_init_pch_refclk(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index f549381048b3..cfb4c1474982 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -518,8 +518,13 @@ static int i915_dmc_info(struct seq_file *m, void *unused) CSR_VERSION_MINOR(csr->version)); if (INTEL_GEN(dev_priv) >= 12) { - dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; - dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; + if (IS_DGFX(dev_priv)) { + dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; + } else { + dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; + dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; + } + /* * NOTE: DMC_DEBUG3 is a general purpose reg. * According to B.Specs:49196 DMC f/w reuses DC5/6 counter @@ -601,6 +606,11 @@ static void intel_hdcp_info(struct seq_file *m, { bool hdcp_cap, hdcp2_cap; + if (!intel_connector->hdcp.shim) { + seq_puts(m, "No Connector Support"); + goto out; + } + hdcp_cap = intel_hdcp_capable(intel_connector); hdcp2_cap = intel_hdcp2_capable(intel_connector); @@ -612,6 +622,7 @@ static void intel_hdcp_info(struct seq_file *m, if (!hdcp_cap && !hdcp2_cap) seq_puts(m, "None"); +out: seq_puts(m, "\n"); } @@ -620,6 +631,7 @@ static void intel_dp_info(struct seq_file *m, { struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); + const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr; seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); @@ -627,11 +639,7 @@ static void intel_dp_info(struct seq_file *m, intel_panel_info(m, &intel_connector->panel); drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, - &intel_dp->aux); - if (intel_connector->hdcp.shim) { - seq_puts(m, "\tHDCP version: "); - intel_hdcp_info(m, intel_connector); - } + edid ? edid->data : NULL, &intel_dp->aux); } static void intel_dp_mst_info(struct seq_file *m, @@ -649,10 +657,6 @@ static void intel_hdmi_info(struct seq_file *m, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); - if (intel_connector->hdcp.shim) { - seq_puts(m, "\tHDCP version: "); - intel_hdcp_info(m, intel_connector); - } } static void intel_lvds_info(struct seq_file *m, @@ -708,6 +712,9 @@ static void intel_connector_info(struct seq_file *m, break; } + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + seq_printf(m, "\tmodes:\n"); list_for_each_entry(mode, &connector->modes, head) intel_seq_print_mode(m, 2, mode); @@ -1069,10 +1076,18 @@ static void drrs_status_per_crtc(struct seq_file *m, drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + bool supported = false; + if (connector->state->crtc != &intel_crtc->base) continue; seq_printf(m, "%s:\n", connector->name); + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && + drrs->type == SEAMLESS_DRRS_SUPPORT) + supported = true; + + seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported)); } drm_connector_list_iter_end(&conn_iter); @@ -1083,7 +1098,7 @@ static void drrs_status_per_crtc(struct seq_file *m, mutex_lock(&drrs->mutex); /* DRRS Supported */ - seq_puts(m, "\tDRRS Supported: Yes\n"); + seq_puts(m, "\tDRRS Enabled: Yes\n"); /* disable_drrs() will make drrs->dp NULL */ if (!drrs->dp) { @@ -1118,7 +1133,7 @@ static void drrs_status_per_crtc(struct seq_file *m, mutex_unlock(&drrs->mutex); } else { /* DRRS not supported. Print the VBT parameter*/ - seq_puts(m, "\tDRRS Supported : No"); + seq_puts(m, "\tDRRS Enabled : No"); } seq_puts(m, "\n"); } @@ -2029,10 +2044,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) if (connector->status != connector_status_connected) return -ENODEV; - /* HDCP is supported by connector */ - if (!intel_connector->hdcp.shim) - return -EINVAL; - seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); intel_hdcp_info(m, intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 7946c6af4b1e..689922480661 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1424,6 +1424,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) return; intel_hpd_init(dev_priv); + intel_hpd_poll_disable(dev_priv); /* Re-enable the ADPA, if we have one */ for_each_intel_encoder(&dev_priv->drm, encoder) { @@ -1449,7 +1450,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) /* Prevent us from re-enabling polling on accident in late suspend */ if (!dev_priv->drm.dev->power.is_suspended) - intel_hpd_poll_init(dev_priv); + intel_hpd_poll_enable(dev_priv); } static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, @@ -3650,7 +3651,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DDI F IO power well", .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = CNL_DISP_PW_DDI_F_IO, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = CNL_PW_CTL_IDX_DDI_F, @@ -3660,7 +3661,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "AUX F", .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = CNL_DISP_PW_DDI_F_AUX, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = CNL_PW_CTL_IDX_AUX_F, @@ -4150,7 +4151,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .name = "TC cold off", .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, .ops = &tgl_tc_cold_off_ops, - .id = DISP_PW_ID_NONE, + .id = TGL_DISP_PW_TC_COLD_OFF, }, { .name = "AUX A", @@ -4492,7 +4493,10 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int max_dc; if (INTEL_GEN(dev_priv) >= 12) { - max_dc = 4; + if (IS_DG1(dev_priv)) + max_dc = 3; + else + max_dc = 4; /* * DC9 has a separate HW flow from the rest of the DC states, * not depending on the DMC firmware. It's needed by system @@ -4554,13 +4558,18 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, static int __set_power_wells(struct i915_power_domains *power_domains, const struct i915_power_well_desc *power_well_descs, - int power_well_count) + int power_well_descs_sz, u64 skip_mask) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); u64 power_well_ids = 0; - int i; + int power_well_count = 0; + int i, plt_idx = 0; + + for (i = 0; i < power_well_descs_sz; i++) + if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) + power_well_count++; power_domains->power_well_count = power_well_count; power_domains->power_wells = @@ -4570,10 +4579,14 @@ __set_power_wells(struct i915_power_domains *power_domains, if (!power_domains->power_wells) return -ENOMEM; - for (i = 0; i < power_well_count; i++) { + for (i = 0; i < power_well_descs_sz; i++) { enum i915_power_well_id id = power_well_descs[i].id; - power_domains->power_wells[i].desc = &power_well_descs[i]; + if (BIT_ULL(id) & skip_mask) + continue; + + power_domains->power_wells[plt_idx++].desc = + &power_well_descs[i]; if (id == DISP_PW_ID_NONE) continue; @@ -4586,9 +4599,12 @@ __set_power_wells(struct i915_power_domains *power_domains, return 0; } -#define set_power_wells(power_domains, __power_well_descs) \ +#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ __set_power_wells(power_domains, __power_well_descs, \ - ARRAY_SIZE(__power_well_descs)) + ARRAY_SIZE(__power_well_descs), skip_mask) + +#define set_power_wells(power_domains, __power_well_descs) \ + set_power_wells_mask(power_domains, __power_well_descs, 0) /** * intel_power_domains_init - initializes the power domain structures @@ -4622,23 +4638,21 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_ROCKETLAKE(dev_priv)) { + if (IS_DG1(dev_priv)) { + err = set_power_wells_mask(power_domains, tgl_power_wells, + BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); + } else if (IS_ROCKETLAKE(dev_priv)) { err = set_power_wells(power_domains, rkl_power_wells); } else if (IS_GEN(dev_priv, 12)) { err = set_power_wells(power_domains, tgl_power_wells); } else if (IS_GEN(dev_priv, 11)) { err = set_power_wells(power_domains, icl_power_wells); - } else if (IS_CANNONLAKE(dev_priv)) { + } else if (IS_CNL_WITH_PORT_F(dev_priv)) { err = set_power_wells(power_domains, cnl_power_wells); - - /* - * DDI and Aux IO are getting enabled for all ports - * regardless the presence or use. So, in order to avoid - * timeouts, lets remove them from the list - * for the SKUs without port F. - */ - if (!IS_CNL_WITH_PORT_F(dev_priv)) - power_domains->power_well_count -= 2; + } else if (IS_CANNONLAKE(dev_priv)) { + err = set_power_wells_mask(power_domains, cnl_power_wells, + BIT_ULL(CNL_DISP_PW_DDI_F_IO) | + BIT_ULL(CNL_DISP_PW_DDI_F_AUX)); } else if (IS_GEMINILAKE(dev_priv)) { err = set_power_wells(power_domains, glk_power_wells); } else if (IS_BROXTON(dev_priv)) { @@ -4758,6 +4772,17 @@ static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) gen9_dbuf_slices_update(dev_priv, 0); } +static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) +{ + const int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; + enum dbuf_slice slice; + + for (slice = DBUF_S1; slice < (DBUF_S1 + num_slices); slice++) + intel_de_rmw(dev_priv, DBUF_CTL_S(slice), + DBUF_TRACKER_STATE_SERVICE_MASK, + DBUF_TRACKER_STATE_SERVICE(8)); +} + static void icl_mbus_init(struct drm_i915_private *dev_priv) { unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; @@ -5263,8 +5288,9 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; int config, i; - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) - /* Wa_1409767108: tgl */ + if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) || + IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) + /* Wa_1409767108:tgl,dg1 */ table = wa_1409767108_buddy_page_masks; else table = tgl_buddy_page_masks; @@ -5326,6 +5352,9 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, /* 4. Enable CDCLK. */ intel_cdclk_init_hw(dev_priv); + if (INTEL_GEN(dev_priv) >= 12) + gen12_dbuf_slices_config(dev_priv); + /* 5. Enable DBUF. */ gen9_dbuf_enable(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 54c20c76057e..4aa0a09cf14f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -101,8 +101,11 @@ enum i915_power_well_id { SKL_DISP_PW_MISC_IO, SKL_DISP_PW_1, SKL_DISP_PW_2, + CNL_DISP_PW_DDI_F_IO, + CNL_DISP_PW_DDI_F_AUX, ICL_DISP_PW_3, SKL_DISP_DC_OFF, + TGL_DISP_PW_TC_COLD_OFF, }; #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 9349b15afff6..f6f0626649e0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -28,6 +28,7 @@ #include <linux/async.h> #include <linux/i2c.h> +#include <linux/pwm.h> #include <linux/sched/clock.h> #include <drm/drm_atomic.h> @@ -186,6 +187,21 @@ struct intel_encoder { * be set correctly before calling this function. */ void (*get_config)(struct intel_encoder *, struct intel_crtc_state *pipe_config); + + /* + * Optional hook called during init/resume to sync any state + * stored in the encoder (eg. DP link parameters) wrt. the HW state. + */ + void (*sync_state)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); + + /* + * Optional hook, returning true if this encoder allows a fastset + * during the initial commit, false otherwise. + */ + bool (*initial_fastset_check)(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); + /* * Acquires the power domains needed for an active encoder during * hardware state readout. @@ -198,6 +214,11 @@ struct intel_encoder { * device interrupts are disabled. */ void (*suspend)(struct intel_encoder *); + /* + * Called during system reboot/shutdown after all the + * encoders have been disabled and suspended. + */ + void (*shutdown)(struct intel_encoder *encoder); enum hpd_pin hpd_pin; enum intel_display_power_domain power_domain; /* for communication with audio component; protected by av_mutex */ @@ -223,6 +244,7 @@ struct intel_panel { bool util_pin_active_low; /* bxt+ */ u8 controller; /* bxt+ only */ struct pwm_device *pwm; + struct pwm_state pwm_state; /* DPCD backlight */ u8 pwmgen_bit_count; @@ -314,10 +336,12 @@ struct intel_hdcp_shim { /* Enables HDCP signalling on the port */ int (*toggle_signalling)(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, bool enable); /* Ensures the link is still protected */ - bool (*check_link)(struct intel_digital_port *dig_port); + bool (*check_link)(struct intel_digital_port *dig_port, + struct intel_connector *connector); /* Detects panel's hdcp capability. This is optional for HDMI. */ int (*hdcp_capable)(struct intel_digital_port *dig_port, @@ -479,8 +503,6 @@ struct intel_atomic_state { bool dpll_set, modeset; - u8 active_pipes; - struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; /* @@ -491,11 +513,6 @@ struct intel_atomic_state { bool rps_interactive; - /* - * active_pipes - */ - bool global_state_changed; - struct i915_sw_fence commit_ready; struct llist_node freed; @@ -518,6 +535,7 @@ struct intel_plane_state { unsigned int rotation; enum drm_color_encoding color_encoding; enum drm_color_range color_range; + enum drm_scaling_filter scaling_filter; } hw; struct i915_ggtt_view view; @@ -808,6 +826,7 @@ struct intel_crtc_state { bool active, enable; struct drm_property_blob *degamma_lut, *gamma_lut, *ctm; struct drm_display_mode mode, adjusted_mode; + enum drm_scaling_filter scaling_filter; } hw; /** @@ -1038,9 +1057,6 @@ struct intel_crtc_state { /* Output format RGB/YCBCR etc */ enum intel_output_format output_format; - /* Output down scaling is done in LSPCON device */ - bool lspcon_downsampling; - /* enable pipe gamma? */ bool gamma_enable; @@ -1186,6 +1202,9 @@ struct intel_plane { struct intel_plane_state *plane_state); int (*min_cdclk)(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); + void (*async_flip)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); }; struct intel_watermark_params { @@ -1273,8 +1292,8 @@ struct intel_dp { int link_rate; u8 lane_count; u8 sink_count; - bool link_mst; bool link_trained; + bool has_hdmi_sink; bool has_audio; bool reset_link_params; u8 dpcd[DP_RECEIVER_CAP_SIZE]; @@ -1282,6 +1301,8 @@ struct intel_dp { u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; + u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE]; + u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE]; u8 fec_capable; /* source rates */ int num_source_rates; @@ -1314,8 +1335,6 @@ struct intel_dp { unsigned long last_backlight_off; ktime_t panel_power_off_time; - struct notifier_block edp_notifier; - /* * Pipe whose power sequencer is currently locked into * this port. Only relevant on VLV/CHV. @@ -1338,14 +1357,6 @@ struct intel_dp { bool is_mst; int active_mst_links; - /* - * DP_TP_* registers may be either on port or transcoder register space. - */ - struct { - i915_reg_t dp_tp_ctl; - i915_reg_t dp_tp_status; - } regs; - /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; @@ -1365,17 +1376,31 @@ struct intel_dp { i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index); /* This is called before a link training is starterd */ - void (*prepare_link_retrain)(struct intel_dp *intel_dp); - void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat); - void (*set_idle_link_train)(struct intel_dp *intel_dp); - void (*set_signal_levels)(struct intel_dp *intel_dp); + void (*prepare_link_retrain)(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); + void (*set_link_train)(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + u8 dp_train_pat); + void (*set_idle_link_train)(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); + void (*set_signal_levels)(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); u8 (*preemph_max)(struct intel_dp *intel_dp); - u8 (*voltage_max)(struct intel_dp *intel_dp); + u8 (*voltage_max)(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); /* Displayport compliance testing */ struct intel_dp_compliance compliance; + /* Downstream facing port caps */ + struct { + int min_tmds_clock, max_tmds_clock; + int max_dotclock; + u8 max_bpc; + bool ycbcr_444_to_420; + } dfp; + /* Display stream compression testing */ bool force_dsc_en; @@ -1415,6 +1440,11 @@ struct intel_digital_port { enum phy_fia tc_phy_fia; u8 tc_phy_fia_idx; + /* protects num_hdcp_streams reference count */ + struct mutex hdcp_mutex; + /* the number of pipes using HDCP signalling out of this port */ + unsigned int num_hdcp_streams; + void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, @@ -1525,6 +1555,18 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) } } +static inline bool intel_encoder_is_mst(struct intel_encoder *encoder) +{ + return encoder->type == INTEL_OUTPUT_DP_MST; +} + +static inline struct intel_dp_mst_encoder * +enc_to_mst(struct intel_encoder *encoder) +{ + return container_of(&encoder->base, struct intel_dp_mst_encoder, + base.base); +} + static inline struct intel_digital_port * enc_to_dig_port(struct intel_encoder *encoder) { @@ -1533,6 +1575,8 @@ enc_to_dig_port(struct intel_encoder *encoder) if (intel_encoder_is_dig_port(intel_encoder)) return container_of(&encoder->base, struct intel_digital_port, base.base); + else if (intel_encoder_is_mst(intel_encoder)) + return enc_to_mst(encoder)->primary; else return NULL; } @@ -1543,13 +1587,6 @@ intel_attached_dig_port(struct intel_connector *connector) return enc_to_dig_port(intel_attached_encoder(connector)); } -static inline struct intel_dp_mst_encoder * -enc_to_mst(struct intel_encoder *encoder) -{ - return container_of(&encoder->base, struct intel_dp_mst_encoder, - base.base); -} - static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder) { return &enc_to_dig_port(encoder)->dp; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 284b15f84592..cf09aca7607b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -28,7 +28,6 @@ #include <linux/export.h> #include <linux/i2c.h> #include <linux/notifier.h> -#include <linux/reboot.h> #include <linux/slab.h> #include <linux/types.h> @@ -38,7 +37,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_hdcp.h> #include <drm/drm_probe_helper.h> #include "i915_debugfs.h" @@ -162,6 +160,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 162000, 270000, 540000, 810000 }; int i, max_rate; + int max_lttpr_rate; if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { @@ -175,6 +174,9 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) } max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); + max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); + if (max_lttpr_rate) + max_rate = min(max_rate, max_lttpr_rate); for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { if (dp_rates[i] > max_rate) @@ -220,6 +222,10 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) int source_max = dig_port->max_lanes; int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); int fia_max = intel_tc_port_fia_max_lane_count(dig_port); + int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); + + if (lttpr_max) + sink_max = min(sink_max, lttpr_max); return min3(source_max, sink_max, fia_max); } @@ -248,29 +254,6 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) return max_link_clock * max_lanes; } -static int -intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct intel_encoder *encoder = &dig_port->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int max_dotclk = dev_priv->max_dotclk_freq; - int ds_max_dotclk; - - int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; - - if (type != DP_DS_PORT_TYPE_VGA) - return max_dotclk; - - ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, - intel_dp->downstream_ports); - - if (ds_max_dotclk != 0) - max_dotclk = min(max_dotclk, ds_max_dotclk); - - return max_dotclk; -} - static int cnl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -301,13 +284,20 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); if (intel_phy_is_combo(dev_priv, phy) && - !IS_ELKHARTLAKE(dev_priv) && !intel_dp_is_edp(intel_dp)) return 540000; return 810000; } +static int ehl_max_source_rate(struct intel_dp *intel_dp) +{ + if (intel_dp_is_edp(intel_dp)) + return 540000; + + return 810000; +} + static void intel_dp_set_source_rates(struct intel_dp *intel_dp) { @@ -342,6 +332,8 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) size = ARRAY_SIZE(cnl_rates); if (IS_GEN(dev_priv, 10)) max_rate = cnl_max_source_rate(intel_dp); + else if (IS_JSL_EHL(dev_priv)) + max_rate = ehl_max_source_rate(intel_dp); else max_rate = icl_max_source_rate(intel_dp); } else if (IS_GEN9_LP(dev_priv)) { @@ -616,6 +608,54 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } +static enum intel_output_format +intel_dp_output_format(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); + const struct drm_display_info *info = &connector->display_info; + + if (!connector->ycbcr_420_allowed || + !drm_mode_is_420_only(info, mode)) + return INTEL_OUTPUT_FORMAT_RGB; + + if (intel_dp->dfp.ycbcr_444_to_420) + return INTEL_OUTPUT_FORMAT_YCBCR444; + else + return INTEL_OUTPUT_FORMAT_YCBCR420; +} + +int intel_dp_min_bpp(enum intel_output_format output_format) +{ + if (output_format == INTEL_OUTPUT_FORMAT_RGB) + return 6 * 3; + else + return 8 * 3; +} + +static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) +{ + /* + * bpp value was assumed to RGB format. And YCbCr 4:2:0 output + * format of the number of bytes per pixel will be half the number + * of bytes of RGB pixel. + */ + if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + bpp /= 2; + + return bpp; +} + +static int +intel_dp_mode_min_output_bpp(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + enum intel_output_format output_format = + intel_dp_output_format(connector, mode); + + return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); +} + static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, int hdisplay) { @@ -636,6 +676,34 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, } static enum drm_mode_status +intel_dp_mode_valid_downstream(struct intel_connector *connector, + const struct drm_display_mode *mode, + int target_clock) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + const struct drm_display_info *info = &connector->base.display_info; + int tmds_clock; + + if (intel_dp->dfp.max_dotclock && + target_clock > intel_dp->dfp.max_dotclock) + return MODE_CLOCK_HIGH; + + /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ + tmds_clock = target_clock; + if (drm_mode_is_420_only(info, mode)) + tmds_clock /= 2; + + if (intel_dp->dfp.min_tmds_clock && + tmds_clock < intel_dp->dfp.min_tmds_clock) + return MODE_CLOCK_LOW; + if (intel_dp->dfp.max_tmds_clock && + tmds_clock > intel_dp->dfp.max_tmds_clock) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { @@ -645,15 +713,14 @@ intel_dp_mode_valid(struct drm_connector *connector, struct drm_i915_private *dev_priv = to_i915(connector->dev); int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; - int max_dotclk; + int max_dotclk = dev_priv->max_dotclk_freq; u16 dsc_max_output_bpp = 0; u8 dsc_slice_count = 0; + enum drm_mode_status status; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); - if (intel_dp_is_edp(intel_dp) && fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; @@ -668,7 +735,8 @@ intel_dp_mode_valid(struct drm_connector *connector, max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); - mode_rate = intel_dp_link_required(target_clock, 18); + mode_rate = intel_dp_link_required(target_clock, + intel_dp_mode_min_output_bpp(connector, mode)); if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) return MODE_H_ILLEGAL; @@ -709,6 +777,11 @@ intel_dp_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; + status = intel_dp_mode_valid_downstream(intel_connector, + mode, target_clock); + if (status != MODE_OK) + return status; + return intel_mode_valid_max_plane_size(dev_priv, mode); } @@ -1135,41 +1208,6 @@ _pp_stat_reg(struct intel_dp *intel_dp) return regs.pp_stat; } -/* Reboot notifier handler to shutdown panel power to guarantee T12 timing - This function only applicable when panel PM state is not to be tracked */ -static int edp_notify_handler(struct notifier_block *this, unsigned long code, - void *unused) -{ - struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), - edp_notifier); - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - intel_wakeref_t wakeref; - - if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) - return 0; - - with_pps_lock(intel_dp, wakeref) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); - i915_reg_t pp_ctrl_reg, pp_div_reg; - u32 pp_div; - - pp_ctrl_reg = PP_CONTROL(pipe); - pp_div_reg = PP_DIVISOR(pipe); - pp_div = intel_de_read(dev_priv, pp_div_reg); - pp_div &= PP_REFERENCE_DIVIDER_MASK; - - /* 0x1F write to PP_DIV_REG sets max cycle delay */ - intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); - intel_de_write(dev_priv, pp_ctrl_reg, - PANEL_UNLOCK_REGS); - msleep(intel_dp->panel_power_cycle_delay); - } - } - - return 0; -} - static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1563,6 +1601,20 @@ intel_dp_aux_header(u8 txbuf[HEADER_SIZE], txbuf[3] = msg->size - 1; } +static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) +{ + /* + * If we're trying to send the HDCP Aksv, we need to set a the Aksv + * select bit to inform the hardware to send the Aksv after our header + * since we can't access that data from software. + */ + if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && + msg->address == DP_AUX_HDCP_AKSV) + return DP_AUX_CH_CTL_AUX_AKSV_SELECT; + + return 0; +} + static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -1570,6 +1622,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 txbuf[20], rxbuf[20]; size_t txsize, rxsize; + u32 flags = intel_dp_aux_xfer_flags(msg); int ret; intel_dp_aux_header(txbuf, msg); @@ -1590,7 +1643,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, - rxbuf, rxsize, 0); + rxbuf, rxsize, flags); if (ret > 0) { msg->reply = rxbuf[0] >> 4; @@ -1613,7 +1666,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return -E2BIG; ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, - rxbuf, rxsize, 0); + rxbuf, rxsize, flags); if (ret > 0) { msg->reply = rxbuf[0] >> 4; /* @@ -1721,7 +1774,6 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) case AUX_CH_D: case AUX_CH_E: case AUX_CH_F: - case AUX_CH_G: return DP_AUX_CH_CTL(aux_ch); default: MISSING_CASE(aux_ch); @@ -1742,7 +1794,52 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) case AUX_CH_D: case AUX_CH_E: case AUX_CH_F: - case AUX_CH_G: + return DP_AUX_CH_DATA(aux_ch, index); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_DATA(AUX_CH_A, index); + } +} + +static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_USBC1: + case AUX_CH_USBC2: + case AUX_CH_USBC3: + case AUX_CH_USBC4: + case AUX_CH_USBC5: + case AUX_CH_USBC6: + return DP_AUX_CH_CTL(aux_ch); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_CTL(AUX_CH_A); + } +} + +static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_USBC1: + case AUX_CH_USBC2: + case AUX_CH_USBC3: + case AUX_CH_USBC4: + case AUX_CH_USBC5: + case AUX_CH_USBC6: return DP_AUX_CH_DATA(aux_ch, index); default: MISSING_CASE(aux_ch); @@ -1762,8 +1859,12 @@ intel_dp_aux_init(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; + enum aux_ch aux_ch = dig_port->aux_ch; - if (INTEL_GEN(dev_priv) >= 9) { + if (INTEL_GEN(dev_priv) >= 12) { + intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; + intel_dp->aux_ch_data_reg = tgl_aux_data_reg; + } else if (INTEL_GEN(dev_priv) >= 9) { intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; intel_dp->aux_ch_data_reg = skl_aux_data_reg; } else if (HAS_PCH_SPLIT(dev_priv)) { @@ -1791,9 +1892,15 @@ intel_dp_aux_init(struct intel_dp *intel_dp) drm_dp_aux_init(&intel_dp->aux); /* Failure to allocate our preferred name is not critical */ - intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", - aux_ch_name(dig_port->aux_ch), - port_name(encoder->port)); + if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1) + intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s", + aux_ch - AUX_CH_USBC1 + '1', + encoder->base.name); + else + intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", + aux_ch_name(aux_ch), + encoder->base.name); + intel_dp->aux.transfer = intel_dp_aux_transfer; } @@ -1954,19 +2061,72 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); } -static int intel_dp_compute_bpp(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config) +static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || + (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && + intel_dp->dfp.ycbcr_444_to_420); +} + +static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, int bpc) +{ + int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; + + if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) + clock /= 2; + + return clock; +} + +static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, int bpc) +{ + int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); + + if (intel_dp->dfp.min_tmds_clock && + tmds_clock < intel_dp->dfp.min_tmds_clock) + return false; + + if (intel_dp->dfp.max_tmds_clock && + tmds_clock > intel_dp->dfp.max_tmds_clock) + return false; + + return true; +} + +static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int bpc) +{ + + return intel_hdmi_deep_color_possible(crtc_state, bpc, + intel_dp->has_hdmi_sink, + intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && + intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); +} + +static int intel_dp_max_bpp(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_connector *intel_connector = intel_dp->attached_connector; int bpp, bpc; - bpp = pipe_config->pipe_bpp; - bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); + bpc = crtc_state->pipe_bpp / 3; + + if (intel_dp->dfp.max_bpc) + bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); - if (bpc > 0) - bpp = min(bpp, 3*bpc); + if (intel_dp->dfp.min_tmds_clock) { + for (; bpc >= 10; bpc -= 2) { + if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) + break; + } + } + bpp = bpc * 3; if (intel_dp_is_edp(intel_dp)) { /* Get bpp from vbt only for panels that dont have bpp in edid */ if (intel_connector->base.display_info.bpc == 0 && @@ -2019,19 +2179,6 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, } } -static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) -{ - /* - * bpp value was assumed to RGB format. And YCbCr 4:2:0 output - * format of the number of bytes per pixel will be half the number - * of bytes of RGB pixel. - */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - bpp /= 2; - - return bpp; -} - /* Optimize link config in order: max bpp, min clock, min lanes */ static int intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, @@ -2043,7 +2190,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, int mode_rate, link_clock, link_avail; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { - int output_bpp = intel_dp_output_bpp(pipe_config, bpp); + int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, output_bpp); @@ -2254,14 +2401,6 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, return 0; } -int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) -{ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) - return 6 * 3; - else - return 8 * 3; -} - static int intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2287,8 +2426,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, limits.min_lane_count = 1; limits.max_lane_count = intel_dp_max_lane_count(intel_dp); - limits.min_bpp = intel_dp_min_bpp(pipe_config); - limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); + limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); + limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); if (intel_dp_is_edp(intel_dp)) { /* @@ -2353,26 +2492,6 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, return 0; } -static int -intel_dp_ycbcr420_config(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct drm_connector *connector = conn_state->connector; - const struct drm_display_info *info = &connector->display_info; - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - - if (!drm_mode_is_420_only(info, adjusted_mode) || - !intel_dp_get_colorimetry_status(intel_dp) || - !connector->ycbcr_420_allowed) - return 0; - - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - - return intel_pch_panel_fitting(crtc_state, conn_state); -} - bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -2575,6 +2694,34 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); } +static void +intel_dp_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + /* + * DRRS and PSR can't be enable together, so giving preference to PSR + * as it allows more power-savings by complete shutting down display, + * so to guarantee this, intel_dp_drrs_compute_config() must be called + * after intel_psr_compute_config(). + */ + if (pipe_config->has_psr) + return; + + if (!intel_connector->panel.downclock_mode || + dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + pipe_config->has_drrs = true; + intel_link_compute_m_n(output_bpp, pipe_config->lane_count, + intel_connector->panel.downclock_mode->clock, + pipe_config->port_clock, &pipe_config->dp_m2_n2, + constant_n, pipe_config->fec_enable); +} + int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2583,7 +2730,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); enum port port = encoder->port; struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_digital_connector_state *intel_conn_state = @@ -2595,17 +2741,15 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) pipe_config->has_pch_encoder = true; - pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + pipe_config->output_format = intel_dp_output_format(&intel_connector->base, + adjusted_mode); - if (lspcon->active) - lspcon_ycbcr420_config(&intel_connector->base, pipe_config); - else - ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, - conn_state); - if (ret) - return ret; + if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + ret = intel_pch_panel_fitting(pipe_config, conn_state); + if (ret) + return ret; + } - pipe_config->has_drrs = false; if (!intel_dp_port_has_audio(dev_priv, port)) pipe_config->has_audio = false; else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) @@ -2648,7 +2792,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (pipe_config->dsc.compression_enable) output_bpp = pipe_config->dsc.compressed_bpp; else - output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); + output_bpp = intel_dp_output_bpp(pipe_config->output_format, + pipe_config->pipe_bpp); intel_link_compute_m_n(output_bpp, pipe_config->lane_count, @@ -2657,21 +2802,12 @@ intel_dp_compute_config(struct intel_encoder *encoder, &pipe_config->dp_m_n, constant_n, pipe_config->fec_enable); - if (intel_connector->panel.downclock_mode != NULL && - dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { - pipe_config->has_drrs = true; - intel_link_compute_m_n(output_bpp, - pipe_config->lane_count, - intel_connector->panel.downclock_mode->clock, - pipe_config->port_clock, - &pipe_config->dp_m2_n2, - constant_n, pipe_config->fec_enable); - } - if (!HAS_DDI(dev_priv)) intel_dp_set_clock(encoder, pipe_config); intel_psr_compute_config(intel_dp, pipe_config); + intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, + constant_n); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); @@ -2679,13 +2815,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, } void intel_dp_set_link_params(struct intel_dp *intel_dp, - int link_rate, u8 lane_count, - bool link_mst) + int link_rate, int lane_count) { intel_dp->link_trained = false; intel_dp->link_rate = link_rate; intel_dp->lane_count = lane_count; - intel_dp->link_mst = link_mst; } static void intel_dp_prepare(struct intel_encoder *encoder, @@ -2697,10 +2831,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - intel_dp_set_link_params(intel_dp, pipe_config->port_clock, - pipe_config->lane_count, - intel_crtc_has_type(pipe_config, - INTEL_OUTPUT_DP_MST)); + intel_dp_set_link_params(intel_dp, + pipe_config->port_clock, + pipe_config->lane_count); /* * There are four kinds of DP registers: @@ -3392,32 +3525,33 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, enable ? "enable" : "disable"); } -/* If the sink supports it, try to set the power state appropriately */ -void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) +/* If the device supports it, try to set the power state appropriately */ +void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); int ret, i; /* Should have a valid DPCD by this point */ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) return; - if (mode != DRM_MODE_DPMS_ON) { + if (mode != DP_SET_POWER_D0) { if (downstream_hpd_needs_d0(intel_dp)) return; - ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, - DP_SET_POWER_D3); + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); } else { struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + lspcon_resume(dp_to_dig_port(intel_dp)); + /* * When turning on, we need to retry for 1ms to give the sink * time to wake up. */ for (i = 0; i < 3; i++) { - ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, - DP_SET_POWER_D0); + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); if (ret == 1) break; msleep(1); @@ -3428,8 +3562,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) } if (ret != 1) - drm_dbg_kms(&i915->drm, "failed to %s sink power state\n", - mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", + encoder->base.base.id, encoder->base.name, + mode == DP_SET_POWER_D0 ? "D0" : "D3"); } static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, @@ -3586,6 +3721,66 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } } +static bool +intel_dp_get_dpcd(struct intel_dp *intel_dp); + +/** + * intel_dp_sync_state - sync the encoder state during init/resume + * @encoder: intel encoder to sync + * @crtc_state: state for the CRTC connected to the encoder + * + * Sync any state stored in the encoder wrt. HW state during driver init + * and system resume. + */ +void intel_dp_sync_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + /* + * Don't clobber DPCD if it's been already read out during output + * setup (eDP) or detect. + */ + if (intel_dp->dpcd[DP_DPCD_REV] == 0) + intel_dp_get_dpcd(intel_dp); + + intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); + intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); +} + +bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + /* + * If BIOS has set an unsupported or non-standard link rate for some + * reason force an encoder recompute and full modeset. + */ + if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, + crtc_state->port_clock) < 0) { + drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); + crtc_state->uapi.connectors_changed = true; + return false; + } + + /* + * FIXME hack to force full modeset when DSC is being used. + * + * As long as we do not have full state readout and config comparison + * of crtc_state->dsc, we have no way to ensure reliable fastset. + * Remove once we have readout for DSC. + */ + if (crtc_state->dsc.compression_enable) { + drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); + crtc_state->uapi.mode_changed = true; + return false; + } + + return true; +} + static void intel_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -3603,7 +3798,7 @@ static void intel_disable_dp(struct intel_atomic_state *state, * ensure that we have vdd while we switch off the panel. */ intel_edp_panel_vdd_on(intel_dp); intel_edp_backlight_off(old_conn_state); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + intel_dp_set_power(intel_dp, DP_SET_POWER_D3); intel_edp_panel_off(intel_dp); } @@ -3671,6 +3866,7 @@ static void chv_post_disable_dp(struct intel_atomic_state *state, static void cpt_set_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -3678,7 +3874,7 @@ cpt_set_link_train(struct intel_dp *intel_dp, *DP &= ~DP_LINK_TRAIN_MASK_CPT; - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: *DP |= DP_LINK_TRAIN_OFF_CPT; break; @@ -3701,6 +3897,7 @@ cpt_set_link_train(struct intel_dp *intel_dp, static void g4x_set_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -3708,7 +3905,7 @@ g4x_set_link_train(struct intel_dp *intel_dp, *DP &= ~DP_LINK_TRAIN_MASK; - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: *DP |= DP_LINK_TRAIN_OFF; break; @@ -3730,13 +3927,14 @@ g4x_set_link_train(struct intel_dp *intel_dp, } static void intel_dp_enable_port(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* enable with pattern 1 (as per spec) */ - intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); + intel_dp_program_link_training_pattern(intel_dp, crtc_state, + DP_TRAINING_PATTERN_1); /* * Magic for VLV/CHV. We _must_ first set up the register @@ -3745,13 +3943,50 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, * fail when the power sequencer is freshly used for this port. */ intel_dp->DP |= DP_PORT_EN; - if (old_crtc_state->has_audio) + if (crtc_state->has_audio) intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); } +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 tmp; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) + return; + + if (!drm_dp_is_branch(intel_dp->dpcd)) + return; + + tmp = intel_dp->has_hdmi_sink ? + DP_HDMI_DVI_OUTPUT_CONFIG : 0; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) + drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", + enableddisabled(intel_dp->has_hdmi_sink)); + + tmp = intel_dp->dfp.ycbcr_444_to_420 ? + DP_CONVERSION_TO_YCBCR420_ENABLE : 0; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) + drm_dbg_kms(&i915->drm, + "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", + enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); + + tmp = 0; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0) + drm_dbg_kms(&i915->drm, + "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n", + enableddisabled(false)); +} + static void intel_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, @@ -3788,9 +4023,10 @@ static void intel_enable_dp(struct intel_atomic_state *state, lane_mask); } - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); - intel_dp_start_link_train(intel_dp); - intel_dp_stop_link_train(intel_dp); + intel_dp_set_power(intel_dp, DP_SET_POWER_D0); + intel_dp_configure_protocol_converter(intel_dp); + intel_dp_start_link_train(intel_dp, pipe_config); + intel_dp_stop_link_train(intel_dp, pipe_config); if (pipe_config->has_audio) { drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", @@ -3988,38 +4224,30 @@ static void chv_dp_post_pll_disable(struct intel_atomic_state *state, chv_phy_post_pll_disable(encoder, old_crtc_state); } -/* - * Fetch AUX CH registers 0x202 - 0x207 which contain - * link status information - */ -bool -intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) -{ - return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, - DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; -} - -static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp) +static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; } -static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp) +static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; } -static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp) +static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp) { return DP_TRAIN_PRE_EMPH_LEVEL_2; } -static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp) +static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp) { return DP_TRAIN_PRE_EMPH_LEVEL_3; } -static void vlv_set_signal_levels(struct intel_dp *intel_dp) +static void vlv_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; unsigned long demph_reg_value, preemph_reg_value, @@ -4099,11 +4327,13 @@ static void vlv_set_signal_levels(struct intel_dp *intel_dp) return; } - vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, + vlv_set_phy_signal_level(encoder, crtc_state, + demph_reg_value, preemph_reg_value, uniqtranscale_reg_value, 0); } -static void chv_set_signal_levels(struct intel_dp *intel_dp) +static void chv_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u32 deemph_reg_value, margin_reg_value; @@ -4180,8 +4410,9 @@ static void chv_set_signal_levels(struct intel_dp *intel_dp) return; } - chv_set_phy_signal_level(encoder, deemph_reg_value, - margin_reg_value, uniq_trans_scale); + chv_set_phy_signal_level(encoder, crtc_state, + deemph_reg_value, margin_reg_value, + uniq_trans_scale); } static u32 g4x_signal_levels(u8 train_set) @@ -4222,7 +4453,8 @@ static u32 g4x_signal_levels(u8 train_set) } static void -g4x_set_signal_levels(struct intel_dp *intel_dp) +g4x_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 train_set = intel_dp->train_set[0]; @@ -4269,7 +4501,8 @@ static u32 snb_cpu_edp_signal_levels(u8 train_set) } static void -snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) +snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 train_set = intel_dp->train_set[0]; @@ -4320,7 +4553,8 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set) } static void -ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) +ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 train_set = intel_dp->train_set[0]; @@ -4338,7 +4572,8 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) intel_de_posting_read(dev_priv, intel_dp->output_reg); } -void intel_dp_set_signal_levels(struct intel_dp *intel_dp) +void intel_dp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 train_set = intel_dp->train_set[0]; @@ -4352,28 +4587,23 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp) train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? " (max)" : ""); - intel_dp->set_signal_levels(intel_dp); + intel_dp->set_signal_levels(intel_dp, crtc_state); } void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); - if (dp_train_pat & train_pat_mask) + if ((intel_dp_training_pattern_symbol(dp_train_pat)) != + DP_TRAINING_PATTERN_DISABLE) drm_dbg_kms(&dev_priv->drm, "Using DP training pattern TPS%d\n", - dp_train_pat & train_pat_mask); - - intel_dp->set_link_train(intel_dp, dp_train_pat); -} + intel_dp_training_pattern_symbol(dp_train_pat)); -void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) -{ - if (intel_dp->set_idle_link_train) - intel_dp->set_idle_link_train(intel_dp); + intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); } static void @@ -4594,6 +4824,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) { int ret; + intel_dp_lttpr_init(intel_dp); + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) return false; @@ -5266,33 +5498,14 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) return test_result; } -static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) -{ - struct drm_dp_phy_test_params *data = - &intel_dp->compliance.test_data.phytest; - - if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { - DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); - return DP_TEST_NAK; - } - - /* - * link_mst is set to false to avoid executing mst related code - * during compliance testing. - */ - intel_dp->link_mst = false; - - return DP_TEST_ACK; -} - -static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) +static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; - struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; u32 pattern_val; @@ -5352,7 +5565,8 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) } static void -intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) +intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; @@ -5378,7 +5592,8 @@ intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) } static void -intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) +intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; @@ -5404,27 +5619,30 @@ intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) trans_ddi_func_ctl_value); } -void intel_dp_process_phy_request(struct intel_dp *intel_dp) +static void intel_dp_process_phy_request(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; u8 link_status[DP_LINK_STATUS_SIZE]; - if (!intel_dp_get_link_status(intel_dp, link_status)) { + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, + link_status) < 0) { DRM_DEBUG_KMS("failed to get link status\n"); return; } /* retrieve vswing & pre-emphasis setting */ - intel_dp_get_adjust_train(intel_dp, link_status); + intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, + link_status); - intel_dp_autotest_phy_ddi_disable(intel_dp); + intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); - intel_dp_set_signal_levels(intel_dp); + intel_dp_set_signal_levels(intel_dp, crtc_state); - intel_dp_phy_pattern_update(intel_dp); + intel_dp_phy_pattern_update(intel_dp, crtc_state); - intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); + intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); drm_dp_set_phy_test_pattern(&intel_dp->aux, data, link_status[DP_DPCD_REV]); @@ -5432,15 +5650,18 @@ void intel_dp_process_phy_request(struct intel_dp *intel_dp) static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { - u8 test_result; + struct drm_dp_phy_test_params *data = + &intel_dp->compliance.test_data.phytest; - test_result = intel_dp_prepare_phytest(intel_dp); - if (test_result != DP_TEST_ACK) - DRM_ERROR("Phy test preparation failed\n"); + if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { + DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); + return DP_TEST_NAK; + } - intel_dp_process_phy_request(intel_dp); + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = true; - return test_result; + return DP_TEST_ACK; } static void intel_dp_handle_test_request(struct intel_dp *intel_dp) @@ -5571,12 +5792,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) if (intel_psr_enabled(intel_dp)) return false; - if (!intel_dp_get_link_status(intel_dp, link_status)) + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, + link_status) < 0) return false; /* * Validate the cached values of intel_dp->link_rate and * intel_dp->lane_count before attempting to retrain. + * + * FIXME would be nice to user the crtc state here, but since + * we need to call this from the short HPD handler that seems + * a bit hard. */ if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, intel_dp->lane_count)) @@ -5710,8 +5936,20 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, intel_crtc_pch_transcoder(crtc), false); } - intel_dp_start_link_train(intel_dp); - intel_dp_stop_link_train(intel_dp); + for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + /* retrain on the MST master transcoder */ + if (INTEL_GEN(dev_priv) >= 12 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && + !intel_dp_mst_is_master_trans(crtc_state)) + continue; + + intel_dp_start_link_train(intel_dp, crtc_state); + intel_dp_stop_link_train(intel_dp, crtc_state); + break; + } for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { const struct intel_crtc_state *crtc_state = @@ -5729,6 +5967,118 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, return 0; } +static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u32 *crtc_mask) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct drm_connector_list_iter conn_iter; + struct intel_connector *connector; + int ret = 0; + + *crtc_mask = 0; + + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + struct drm_connector_state *conn_state = + connector->base.state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (!intel_dp_has_connector(intel_dp, conn_state)) + continue; + + crtc = to_intel_crtc(conn_state->crtc); + if (!crtc) + continue; + + ret = drm_modeset_lock(&crtc->base.mutex, ctx); + if (ret) + break; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); + + if (!crtc_state->hw.active) + continue; + + if (conn_state->commit && + !try_wait_for_completion(&conn_state->commit->hw_done)) + continue; + + *crtc_mask |= drm_crtc_mask(&crtc->base); + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static int intel_dp_do_phy_test(struct intel_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_crtc *crtc; + u32 crtc_mask; + int ret; + + ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, + ctx); + if (ret) + return ret; + + ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); + if (ret) + return ret; + + if (crtc_mask == 0) + return 0; + + drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", + encoder->base.base.id, encoder->base.name); + + for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + /* test on the MST master transcoder */ + if (INTEL_GEN(dev_priv) >= 12 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && + !intel_dp_mst_is_master_trans(crtc_state)) + continue; + + intel_dp_process_phy_request(intel_dp, crtc_state); + break; + } + + return 0; +} + +static void intel_dp_phy_test(struct intel_encoder *encoder) +{ + struct drm_modeset_acquire_ctx ctx; + int ret; + + drm_modeset_acquire_init(&ctx, 0); + + for (;;) { + ret = intel_dp_do_phy_test(encoder, &ctx); + + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + continue; + } + + break; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + drm_WARN(encoder->base.dev, ret, + "Acquiring modeset locks failed with %i\n", ret); +} + /* * If display is now connected check links status, * there has been known issues of link loss triggering @@ -5745,10 +6095,18 @@ static enum intel_hotplug_state intel_dp_hotplug(struct intel_encoder *encoder, struct intel_connector *connector) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; + if (intel_dp->compliance.test_active && + intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { + intel_dp_phy_test(encoder); + /* just do the PHY test and nothing else */ + return INTEL_HOTPLUG_UNCHANGED; + } + state = intel_encoder_hotplug(encoder, connector); drm_modeset_acquire_init(&ctx, 0); @@ -5853,11 +6211,23 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) intel_psr_short_pulse(intel_dp); - if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { + switch (intel_dp->compliance.test_type) { + case DP_TEST_LINK_TRAINING: drm_dbg_kms(&dev_priv->drm, "Link Training Compliance Test requested\n"); /* Send a Hotplug Uevent to userspace to start modeset */ drm_kms_helper_hotplug_event(&dev_priv->drm); + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + drm_dbg_kms(&dev_priv->drm, + "PHY test pattern Compliance Test requested\n"); + /* + * Schedule long hpd to do the test + * + * FIXME get rid of the ad-hoc phy test modeset code + * and properly incorporate it into the normal modeset. + */ + return false; } return true; @@ -5868,15 +6238,14 @@ static enum drm_connector_status intel_dp_detect_dpcd(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u8 *dpcd = intel_dp->dpcd; u8 type; if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) return connector_status_connected; - if (lspcon->active) - lspcon_resume(lspcon); + lspcon_resume(dig_port); if (!intel_dp_get_dpcd(intel_dp)) return connector_status_disconnected; @@ -6028,16 +6397,105 @@ intel_dp_get_edid(struct intel_dp *intel_dp) } static void +intel_dp_update_dfp(struct intel_dp *intel_dp, + const struct edid *edid) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + + intel_dp->dfp.max_bpc = + drm_dp_downstream_max_bpc(intel_dp->dpcd, + intel_dp->downstream_ports, edid); + + intel_dp->dfp.max_dotclock = + drm_dp_downstream_max_dotclock(intel_dp->dpcd, + intel_dp->downstream_ports); + + intel_dp->dfp.min_tmds_clock = + drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, + intel_dp->downstream_ports, + edid); + intel_dp->dfp.max_tmds_clock = + drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, + intel_dp->downstream_ports, + edid); + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n", + connector->base.base.id, connector->base.name, + intel_dp->dfp.max_bpc, + intel_dp->dfp.max_dotclock, + intel_dp->dfp.min_tmds_clock, + intel_dp->dfp.max_tmds_clock); +} + +static void +intel_dp_update_420(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420; + + /* No YCbCr output support on gmch platforms */ + if (HAS_GMCH(i915)) + return; + + /* + * ILK doesn't seem capable of DP YCbCr output. The + * displayed image is severly corrupted. SNB+ is fine. + */ + if (IS_GEN(i915, 5)) + return; + + is_branch = drm_dp_is_branch(intel_dp->dpcd); + ycbcr_420_passthrough = + drm_dp_downstream_420_passthrough(intel_dp->dpcd, + intel_dp->downstream_ports); + /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ + ycbcr_444_to_420 = + dp_to_dig_port(intel_dp)->lspcon.active || + drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, + intel_dp->downstream_ports); + + if (INTEL_GEN(i915) >= 11) { + /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ + intel_dp->dfp.ycbcr_444_to_420 = + ycbcr_444_to_420 && !ycbcr_420_passthrough; + + connector->base.ycbcr_420_allowed = + !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; + } else { + /* 4:4:4->4:2:0 conversion is the only way */ + intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; + + connector->base.ycbcr_420_allowed = ycbcr_444_to_420; + } + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", + connector->base.base.id, connector->base.name, + yesno(connector->base.ycbcr_420_allowed), + yesno(intel_dp->dfp.ycbcr_444_to_420)); +} + +static void intel_dp_set_edid(struct intel_dp *intel_dp) { - struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_connector *connector = intel_dp->attached_connector; struct edid *edid; intel_dp_unset_edid(intel_dp); edid = intel_dp_get_edid(intel_dp); - intel_connector->detect_edid = edid; + connector->detect_edid = edid; + + intel_dp_update_dfp(intel_dp, edid); + intel_dp_update_420(intel_dp); + + if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { + intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + intel_dp->has_audio = drm_detect_monitor_audio(edid); + } - intel_dp->has_audio = drm_detect_monitor_audio(edid); drm_dp_cec_set_edid(&intel_dp->aux, edid); intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); } @@ -6045,14 +6503,23 @@ intel_dp_set_edid(struct intel_dp *intel_dp) static void intel_dp_unset_edid(struct intel_dp *intel_dp) { - struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_connector *connector = intel_dp->attached_connector; drm_dp_cec_unset_edid(&intel_dp->aux); - kfree(intel_connector->detect_edid); - intel_connector->detect_edid = NULL; + kfree(connector->detect_edid); + connector->detect_edid = NULL; + intel_dp->has_hdmi_sink = false; intel_dp->has_audio = false; intel_dp->edid_quirks = 0; + + intel_dp->dfp.max_bpc = 0; + intel_dp->dfp.max_dotclock = 0; + intel_dp->dfp.min_tmds_clock = 0; + intel_dp->dfp.max_tmds_clock = 0; + + intel_dp->dfp.ycbcr_444_to_420 = false; + connector->base.ycbcr_420_allowed = false; } static int @@ -6071,6 +6538,9 @@ intel_dp_detect(struct drm_connector *connector, drm_WARN_ON(&dev_priv->drm, !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); + if (!INTEL_DISPLAY_ENABLED(dev_priv)) + return connector_status_disconnected; + /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); @@ -6211,7 +6681,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, fall back to fixed mode */ - if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && + if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && intel_connector->panel.fixed_mode) { struct drm_display_mode *mode; @@ -6223,6 +6693,19 @@ static int intel_dp_get_modes(struct drm_connector *connector) } } + if (!edid) { + struct intel_dp *intel_dp = intel_attached_dp(intel_connector); + struct drm_display_mode *mode; + + mode = drm_dp_downstream_mode(connector->dev, + intel_dp->dpcd, + intel_dp->downstream_ports); + if (mode) { + drm_mode_probed_add(connector, mode); + return 1; + } + } + return 0; } @@ -6273,11 +6756,6 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder) */ with_pps_lock(intel_dp, wakeref) edp_panel_vdd_off_sync(intel_dp); - - if (intel_dp->edp_notifier.notifier_call) { - unregister_reboot_notifier(&intel_dp->edp_notifier); - intel_dp->edp_notifier.notifier_call = NULL; - } } intel_dp_aux_fini(intel_dp); @@ -6308,628 +6786,18 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) edp_panel_vdd_off_sync(intel_dp); } -static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) -{ - long ret; - -#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) - ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, - msecs_to_jiffies(timeout)); - - if (!ret) - DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); -} - -static -int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, - u8 *an) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base)); - static const struct drm_dp_aux_msg msg = { - .request = DP_AUX_NATIVE_WRITE, - .address = DP_AUX_HDCP_AKSV, - .size = DRM_HDCP_KSV_LEN, - }; - u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; - ssize_t dpcd_ret; - int ret; - - /* Output An first, that's easy */ - dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, - an, DRM_HDCP_AN_LEN); - if (dpcd_ret != DRM_HDCP_AN_LEN) { - drm_dbg_kms(&i915->drm, - "Failed to write An over DP/AUX (%zd)\n", - dpcd_ret); - return dpcd_ret >= 0 ? -EIO : dpcd_ret; - } - - /* - * Since Aksv is Oh-So-Secret, we can't access it in software. So in - * order to get it on the wire, we need to create the AUX header as if - * we were writing the data, and then tickle the hardware to output the - * data once the header is sent out. - */ - intel_dp_aux_header(txbuf, &msg); - - ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, - rxbuf, sizeof(rxbuf), - DP_AUX_CH_CTL_AUX_AKSV_SELECT); - if (ret < 0) { - drm_dbg_kms(&i915->drm, - "Write Aksv over DP/AUX failed (%d)\n", ret); - return ret; - } else if (ret == 0) { - drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); - return -EIO; - } - - reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; - if (reply != DP_AUX_NATIVE_REPLY_ACK) { - drm_dbg_kms(&i915->drm, - "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", - reply); - return -EIO; - } - return 0; -} - -static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, - u8 *bksv) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, - DRM_HDCP_KSV_LEN); - if (ret != DRM_HDCP_KSV_LEN) { - drm_dbg_kms(&i915->drm, - "Read Bksv from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, - u8 *bstatus) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - /* - * For some reason the HDMI and DP HDCP specs call this register - * definition by different names. In the HDMI spec, it's called BSTATUS, - * but in DP it's called BINFO. - */ - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, - bstatus, DRM_HDCP_BSTATUS_LEN); - if (ret != DRM_HDCP_BSTATUS_LEN) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static -int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, - u8 *bcaps) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, - bcaps, 1); - if (ret != 1) { - drm_dbg_kms(&i915->drm, - "Read bcaps from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - - return 0; -} - -static -int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, - bool *repeater_present) -{ - ssize_t ret; - u8 bcaps; - - ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); - if (ret) - return ret; - - *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; - return 0; -} - -static -int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, - u8 *ri_prime) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, - ri_prime, DRM_HDCP_RI_LEN); - if (ret != DRM_HDCP_RI_LEN) { - drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", - ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static -int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, - bool *ksv_ready) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - u8 bstatus; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, - &bstatus, 1); - if (ret != 1) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - *ksv_ready = bstatus & DP_BSTATUS_READY; - return 0; -} - -static -int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, - int num_downstream, u8 *ksv_fifo) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - int i; - - /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ - for (i = 0; i < num_downstream; i += 3) { - size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_AUX_HDCP_KSV_FIFO, - ksv_fifo + i * DRM_HDCP_KSV_LEN, - len); - if (ret != len) { - drm_dbg_kms(&i915->drm, - "Read ksv[%d] from DP/AUX failed (%zd)\n", - i, ret); - return ret >= 0 ? -EIO : ret; - } - } - return 0; -} - -static -int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, - int i, u32 *part) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) - return -EINVAL; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_AUX_HDCP_V_PRIME(i), part, - DRM_HDCP_V_PRIME_PART_LEN); - if (ret != DRM_HDCP_V_PRIME_PART_LEN) { - drm_dbg_kms(&i915->drm, - "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); - return ret >= 0 ? -EIO : ret; - } - return 0; -} - -static -int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, - bool enable) -{ - /* Not used for single stream DisplayPort setups */ - return 0; -} - -static -bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - u8 bstatus; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, - &bstatus, 1); - if (ret != 1) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return false; - } - - return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); -} - -static -int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, - bool *hdcp_capable) -{ - ssize_t ret; - u8 bcaps; - - ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); - if (ret) - return ret; - - *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; - return 0; -} - -struct hdcp2_dp_errata_stream_type { - u8 msg_id; - u8 stream_type; -} __packed; - -struct hdcp2_dp_msg_data { - u8 msg_id; - u32 offset; - bool msg_detectable; - u32 timeout; - u32 timeout2; /* Added for non_paired situation */ -}; - -static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { - { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, - { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, - false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, - { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, - false, 0, 0 }, - { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, - false, 0, 0 }, - { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, - true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, - HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, - { HDCP_2_2_AKE_SEND_PAIRING_INFO, - DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, - HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, - { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, - { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, - false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, - { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, - 0, 0 }, - { HDCP_2_2_REP_SEND_RECVID_LIST, - DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, - HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, - { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, - 0, 0 }, - { HDCP_2_2_REP_STREAM_MANAGE, - DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, - 0, 0 }, - { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, - false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, -/* local define to shovel this through the write_2_2 interface */ -#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 - { HDCP_2_2_ERRATA_DP_STREAM_TYPE, - DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, - 0, 0 }, -}; - -static int -intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, - u8 *rx_status) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, - HDCP_2_2_DP_RXSTATUS_LEN); - if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { - drm_dbg_kms(&i915->drm, - "Read bstatus from DP/AUX failed (%zd)\n", ret); - return ret >= 0 ? -EIO : ret; - } - - return 0; -} - -static -int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, - u8 msg_id, bool *msg_ready) -{ - u8 rx_status; - int ret; - - *msg_ready = false; - ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); - if (ret < 0) - return ret; - - switch (msg_id) { - case HDCP_2_2_AKE_SEND_HPRIME: - if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) - *msg_ready = true; - break; - case HDCP_2_2_AKE_SEND_PAIRING_INFO: - if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) - *msg_ready = true; - break; - case HDCP_2_2_REP_SEND_RECVID_LIST: - if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) - *msg_ready = true; - break; - default: - DRM_ERROR("Unidentified msg_id: %d\n", msg_id); - return -EINVAL; - } - - return 0; -} - -static ssize_t -intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, - const struct hdcp2_dp_msg_data *hdcp2_msg_data) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_dp *dp = &dig_port->dp; - struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; - u8 msg_id = hdcp2_msg_data->msg_id; - int ret, timeout; - bool msg_ready = false; - - if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) - timeout = hdcp2_msg_data->timeout2; - else - timeout = hdcp2_msg_data->timeout; - - /* - * There is no way to detect the CERT, LPRIME and STREAM_READY - * availability. So Wait for timeout and read the msg. - */ - if (!hdcp2_msg_data->msg_detectable) { - mdelay(timeout); - ret = 0; - } else { - /* - * As we want to check the msg availability at timeout, Ignoring - * the timeout at wait for CP_IRQ. - */ - intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); - ret = hdcp2_detect_msg_availability(dig_port, - msg_id, &msg_ready); - if (!msg_ready) - ret = -ETIMEDOUT; - } - - if (ret) - drm_dbg_kms(&i915->drm, - "msg_id %d, ret %d, timeout(mSec): %d\n", - hdcp2_msg_data->msg_id, ret, timeout); - - return ret; -} - -static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) - if (hdcp2_dp_msg_data[i].msg_id == msg_id) - return &hdcp2_dp_msg_data[i]; - - return NULL; -} - -static -int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, - void *buf, size_t size) -{ - struct intel_dp *dp = &dig_port->dp; - struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; - unsigned int offset; - u8 *byte = buf; - ssize_t ret, bytes_to_write, len; - const struct hdcp2_dp_msg_data *hdcp2_msg_data; - - hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); - if (!hdcp2_msg_data) - return -EINVAL; - - offset = hdcp2_msg_data->offset; - - /* No msg_id in DP HDCP2.2 msgs */ - bytes_to_write = size - 1; - byte++; - - hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); - - while (bytes_to_write) { - len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? - DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; - - ret = drm_dp_dpcd_write(&dig_port->dp.aux, - offset, (void *)byte, len); - if (ret < 0) - return ret; - - bytes_to_write -= ret; - byte += ret; - offset += ret; - } - - return size; -} - -static -ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) -{ - u8 rx_info[HDCP_2_2_RXINFO_LEN]; - u32 dev_cnt; - ssize_t ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_HDCP_2_2_REG_RXINFO_OFFSET, - (void *)rx_info, HDCP_2_2_RXINFO_LEN); - if (ret != HDCP_2_2_RXINFO_LEN) - return ret >= 0 ? -EIO : ret; - - dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | - HDCP_2_2_DEV_COUNT_LO(rx_info[1])); - - if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) - dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; - - ret = sizeof(struct hdcp2_rep_send_receiverid_list) - - HDCP_2_2_RECEIVER_IDS_MAX_LEN + - (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); - - return ret; -} - -static -int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, - u8 msg_id, void *buf, size_t size) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - unsigned int offset; - u8 *byte = buf; - ssize_t ret, bytes_to_recv, len; - const struct hdcp2_dp_msg_data *hdcp2_msg_data; - - hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); - if (!hdcp2_msg_data) - return -EINVAL; - offset = hdcp2_msg_data->offset; - - ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); - if (ret < 0) - return ret; - - if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { - ret = get_receiver_id_list_size(dig_port); - if (ret < 0) - return ret; - - size = ret; - } - bytes_to_recv = size - 1; - - /* DP adaptation msgs has no msg_id */ - byte++; - - while (bytes_to_recv) { - len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? - DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, - (void *)byte, len); - if (ret < 0) { - drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", - msg_id, ret); - return ret; - } - - bytes_to_recv -= ret; - byte += ret; - offset += ret; - } - byte = buf; - *byte = msg_id; - - return size; -} - -static -int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, - bool is_repeater, u8 content_type) -{ - int ret; - struct hdcp2_dp_errata_stream_type stream_type_msg; - - if (is_repeater) - return 0; - - /* - * Errata for DP: As Stream type is used for encryption, Receiver - * should be communicated with stream type for the decryption of the - * content. - * Repeater will be communicated with stream type as a part of it's - * auth later in time. - */ - stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; - stream_type_msg.stream_type = content_type; - - ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, - sizeof(stream_type_msg)); - - return ret < 0 ? ret : 0; - -} - -static -int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port) -{ - u8 rx_status; - int ret; - - ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); - if (ret) - return ret; - - if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) - ret = HDCP_REAUTH_REQUEST; - else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) - ret = HDCP_LINK_INTEGRITY_FAILURE; - else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) - ret = HDCP_TOPOLOGY_CHANGE; - - return ret; -} - -static -int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, - bool *capable) +void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) { - u8 rx_caps[3]; - int ret; - - *capable = false; - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_HDCP_2_2_REG_RX_CAPS_OFFSET, - rx_caps, HDCP_2_2_RXCAPS_LEN); - if (ret != HDCP_2_2_RXCAPS_LEN) - return ret >= 0 ? -EIO : ret; + struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); + intel_wakeref_t wakeref; - if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && - HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) - *capable = true; + if (!intel_dp_is_edp(intel_dp)) + return; - return 0; + with_pps_lock(intel_dp, wakeref) + wait_panel_power_cycle(intel_dp); } -static const struct intel_hdcp_shim intel_dp_hdcp_shim = { - .write_an_aksv = intel_dp_hdcp_write_an_aksv, - .read_bksv = intel_dp_hdcp_read_bksv, - .read_bstatus = intel_dp_hdcp_read_bstatus, - .repeater_present = intel_dp_hdcp_repeater_present, - .read_ri_prime = intel_dp_hdcp_read_ri_prime, - .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, - .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, - .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, - .toggle_signalling = intel_dp_hdcp_toggle_signalling, - .check_link = intel_dp_hdcp_check_link, - .hdcp_capable = intel_dp_hdcp_capable, - .write_2_2_msg = intel_dp_hdcp2_write_msg, - .read_2_2_msg = intel_dp_hdcp2_read_msg, - .config_stream_type = intel_dp_hdcp2_config_stream_type, - .check_2_2_link = intel_dp_hdcp2_check_link, - .hdcp_2_2_capable = intel_dp_hdcp2_capable, - .protocol = HDCP_PROTOCOL_DP, -}; - static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -6970,15 +6838,11 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); - struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); intel_wakeref_t wakeref; if (!HAS_DDI(dev_priv)) intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); - if (lspcon->active) - lspcon_resume(lspcon); - intel_dp->reset_link_params = true; if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && @@ -7640,6 +7504,15 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, refresh_rate); } +static void +intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + dev_priv->drrs.busy_frontbuffer_bits = 0; + dev_priv->drrs.dp = intel_dp; +} + /** * intel_edp_drrs_enable - init drrs struct if supported * @intel_dp: DP struct @@ -7652,31 +7525,40 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (!crtc_state->has_drrs) { - drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); + if (!crtc_state->has_drrs) return; - } - if (dev_priv->psr.enabled) { - drm_dbg_kms(&dev_priv->drm, - "PSR enabled. Not enabling DRRS.\n"); - return; - } + drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); mutex_lock(&dev_priv->drrs.mutex); + if (dev_priv->drrs.dp) { - drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); + drm_warn(&dev_priv->drm, "DRRS already enabled\n"); goto unlock; } - dev_priv->drrs.busy_frontbuffer_bits = 0; - - dev_priv->drrs.dp = intel_dp; + intel_edp_drrs_enable_locked(intel_dp); unlock: mutex_unlock(&dev_priv->drrs.mutex); } +static void +intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { + int refresh; + + refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); + intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); + } + + dev_priv->drrs.dp = NULL; +} + /** * intel_edp_drrs_disable - Disable DRRS * @intel_dp: DP struct @@ -7697,16 +7579,45 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp, return; } - if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, old_crtc_state, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - dev_priv->drrs.dp = NULL; + intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); mutex_unlock(&dev_priv->drrs.mutex); cancel_delayed_work_sync(&dev_priv->drrs.work); } +/** + * intel_edp_drrs_update - Update DRRS state + * @intel_dp: Intel DP + * @crtc_state: new CRTC state + * + * This function will update DRRS states, disabling or enabling DRRS when + * executing fastsets. For full modeset, intel_edp_drrs_disable() and + * intel_edp_drrs_enable() should be called instead. + */ +void +intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + mutex_lock(&dev_priv->drrs.mutex); + + /* New state matches current one? */ + if (crtc_state->has_drrs == !!dev_priv->drrs.dp) + goto unlock; + + if (crtc_state->has_drrs) + intel_edp_drrs_enable_locked(intel_dp); + else + intel_edp_drrs_disable_locked(intel_dp, crtc_state); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + static void intel_edp_drrs_downclock_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -7996,9 +7907,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, mutex_unlock(&dev->mode_config.mutex); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_dp->edp_notifier.notifier_call = edp_notify_handler; - register_reboot_notifier(&intel_dp->edp_notifier); - /* * Figure out the current pipe for the initial backlight setup. * If the current pipe isn't valid, try the PPS pipe, and if that @@ -8138,10 +8046,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, connector->interlace_allowed = true; connector->doublescan_allowed = 0; - if (INTEL_GEN(dev_priv) >= 11) - connector->ycbcr_420_allowed = true; - - intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_dp_aux_init(intel_dp); @@ -8166,7 +8070,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_add_properties(intel_dp, connector); if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { - int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); + int ret = intel_dp_init_hdcp(dig_port, intel_connector); if (ret) drm_dbg_kms(&dev_priv->drm, "HDCP init failed, skipping.\n"); @@ -8210,6 +8114,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; encoder = &intel_encoder->base; + mutex_init(&dig_port->hdcp_mutex); + if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) @@ -8219,8 +8125,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->compute_config = intel_dp_compute_config; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; + intel_encoder->sync_state = intel_dp_sync_state; + intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check; intel_encoder->update_pipe = intel_panel_update_backlight; intel_encoder->suspend = intel_dp_encoder_suspend; + intel_encoder->shutdown = intel_dp_encoder_shutdown; if (IS_CHERRYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_enable = chv_pre_enable_dp; @@ -8260,17 +8169,15 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { - dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; + dig_port->dp.preemph_max = intel_dp_preemph_max_3; dig_port->dp.voltage_max = intel_dp_voltage_max_3; } else { - dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; + dig_port->dp.preemph_max = intel_dp_preemph_max_2; dig_port->dp.voltage_max = intel_dp_voltage_max_2; } dig_port->dp.output_reg = output_reg; dig_port->max_lanes = 4; - dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); - dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); intel_encoder->type = INTEL_OUTPUT_DP; intel_encoder->power_domain = intel_port_to_power_domain(port); @@ -8284,6 +8191,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, } intel_encoder->cloneable = 0; intel_encoder->port = port; + intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); dig_port->hpd_pulse = intel_dp_hpd_pulse; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 0a3af3410d52..3f862b4fd34f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -10,6 +10,7 @@ #include "i915_reg.h" +enum intel_output_format; enum pipe; enum port; struct drm_connector_state; @@ -17,6 +18,7 @@ struct drm_encoder; struct drm_i915_private; struct drm_modeset_acquire_ctx; struct drm_dp_vsc_sdp; +struct intel_atomic_state; struct intel_connector; struct intel_crtc_state; struct intel_digital_port; @@ -34,7 +36,7 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, struct link_config_limits *limits); bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state); +int intel_dp_min_bpp(enum intel_output_format output_format); bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t dp_reg, enum port port, enum pipe *pipe); @@ -43,18 +45,19 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, - int link_rate, u8 lane_count, - bool link_mst); + int link_rate, int lane_count); int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, u8 lane_count); int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); -void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); +void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp); void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable); void intel_dp_encoder_reset(struct drm_encoder *encoder); void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); +void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder); void intel_dp_encoder_flush_work(struct drm_encoder *encoder); int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -81,6 +84,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_edp_drrs_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, @@ -88,16 +93,15 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, u8 dp_train_pat); void -intel_dp_set_signal_levels(struct intel_dp *intel_dp); -void intel_dp_set_idle_link_train(struct intel_dp *intel_dp); +intel_dp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); -bool -intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); @@ -118,7 +122,6 @@ void intel_read_dp_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, unsigned int type); bool intel_digital_port_connected(struct intel_encoder *encoder); -void intel_dp_process_phy_request(struct intel_dp *intel_dp); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) { @@ -127,4 +130,17 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count) u32 intel_dp_mode_to_fec_clock(u32 mode_clock); +void intel_ddi_update_pipe(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + +int intel_dp_init_hdcp(struct intel_digital_port *dig_port, + struct intel_connector *intel_connector); + +bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); +void intel_dp_sync_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index acbd7eb66cbe..51d27fc98d48 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -52,17 +52,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) } } -/* - * Read the current backlight value from DPCD register(s) based - * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported - */ -static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) +static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 read_val[2] = { 0x0 }; u8 mode_reg; - u16 level = 0; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, @@ -70,15 +64,29 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) drm_dbg_kms(&i915->drm, "Failed to read the DPCD register 0x%x\n", DP_EDP_BACKLIGHT_MODE_SET_REGISTER); - return 0; + return false; } + return (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) == + DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; +} + +/* + * Read the current backlight value from DPCD register(s) based + * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported + */ +static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 read_val[2] = { 0x0 }; + u16 level = 0; + /* * If we're not in DPCD control mode yet, the programmed brightness * value is meaningless and we should assume max brightness */ - if ((mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) != - DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) + if (!intel_dp_aux_backlight_dpcd_mode(connector)) return connector->panel.backlight.max; if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, @@ -319,7 +327,8 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, panel->backlight.min = 0; panel->backlight.level = intel_dp_aux_get_backlight(connector); - panel->backlight.enabled = panel->backlight.level != 0; + panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) && + panel->backlight.level != 0; return 0; } @@ -334,8 +343,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) * the panel can support backlight control over the aux channel */ if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && - (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && - !(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) { drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); return true; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c new file mode 100644 index 000000000000..03424d20e9f7 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -0,0 +1,703 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2020 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ + +#include <drm/drm_dp_helper.h> +#include <drm/drm_dp_mst_helper.h> +#include <drm/drm_hdcp.h> +#include <drm/drm_print.h> + +#include "intel_display_types.h" +#include "intel_ddi.h" +#include "intel_dp.h" +#include "intel_hdcp.h" + +static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) +{ + long ret; + +#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) + ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, + msecs_to_jiffies(timeout)); + + if (!ret) + DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); +} + +static +int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, + u8 *an) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + u8 aksv[DRM_HDCP_KSV_LEN] = {}; + ssize_t dpcd_ret; + + /* Output An first, that's easy */ + dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, + an, DRM_HDCP_AN_LEN); + if (dpcd_ret != DRM_HDCP_AN_LEN) { + drm_dbg_kms(&i915->drm, + "Failed to write An over DP/AUX (%zd)\n", + dpcd_ret); + return dpcd_ret >= 0 ? -EIO : dpcd_ret; + } + + /* + * Since Aksv is Oh-So-Secret, we can't access it in software. So we + * send an empty buffer of the correct length through the DP helpers. On + * the other side, in the transfer hook, we'll generate a flag based on + * the destination address which will tickle the hardware to output the + * Aksv on our behalf after the header is sent. + */ + dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV, + aksv, DRM_HDCP_KSV_LEN); + if (dpcd_ret != DRM_HDCP_KSV_LEN) { + drm_dbg_kms(&i915->drm, + "Failed to write Aksv over DP/AUX (%zd)\n", + dpcd_ret); + return dpcd_ret >= 0 ? -EIO : dpcd_ret; + } + return 0; +} + +static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, + u8 *bksv) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, + DRM_HDCP_KSV_LEN); + if (ret != DRM_HDCP_KSV_LEN) { + drm_dbg_kms(&i915->drm, + "Read Bksv from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, + u8 *bstatus) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + /* + * For some reason the HDMI and DP HDCP specs call this register + * definition by different names. In the HDMI spec, it's called BSTATUS, + * but in DP it's called BINFO. + */ + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, + bstatus, DRM_HDCP_BSTATUS_LEN); + if (ret != DRM_HDCP_BSTATUS_LEN) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, + u8 *bcaps) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, + bcaps, 1); + if (ret != 1) { + drm_dbg_kms(&i915->drm, + "Read bcaps from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, + bool *repeater_present) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); + if (ret) + return ret; + + *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; + return 0; +} + +static +int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, + u8 *ri_prime) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, + ri_prime, DRM_HDCP_RI_LEN); + if (ret != DRM_HDCP_RI_LEN) { + drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", + ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, + bool *ksv_ready) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + u8 bstatus; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + *ksv_ready = bstatus & DP_BSTATUS_READY; + return 0; +} + +static +int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, + int num_downstream, u8 *ksv_fifo) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + int i; + + /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ + for (i = 0; i < num_downstream; i += 3) { + size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_AUX_HDCP_KSV_FIFO, + ksv_fifo + i * DRM_HDCP_KSV_LEN, + len); + if (ret != len) { + drm_dbg_kms(&i915->drm, + "Read ksv[%d] from DP/AUX failed (%zd)\n", + i, ret); + return ret >= 0 ? -EIO : ret; + } + } + return 0; +} + +static +int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, + int i, u32 *part) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) + return -EINVAL; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_AUX_HDCP_V_PRIME(i), part, + DRM_HDCP_V_PRIME_PART_LEN); + if (ret != DRM_HDCP_V_PRIME_PART_LEN) { + drm_dbg_kms(&i915->drm, + "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, + bool enable) +{ + /* Not used for single stream DisplayPort setups */ + return 0; +} + +static +bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port, + struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + u8 bstatus; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return false; + } + + return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); +} + +static +int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, + bool *hdcp_capable) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); + if (ret) + return ret; + + *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; + return 0; +} + +struct hdcp2_dp_errata_stream_type { + u8 msg_id; + u8 stream_type; +} __packed; + +struct hdcp2_dp_msg_data { + u8 msg_id; + u32 offset; + bool msg_detectable; + u32 timeout; + u32 timeout2; /* Added for non_paired situation */ +}; + +static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { + { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, + { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, + false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, + { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, + false, 0, 0 }, + { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, + false, 0, 0 }, + { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, + true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, + HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, + { HDCP_2_2_AKE_SEND_PAIRING_INFO, + DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, + HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, + { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, + { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, + false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, + { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, + 0, 0 }, + { HDCP_2_2_REP_SEND_RECVID_LIST, + DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, + HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, + { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, + 0, 0 }, + { HDCP_2_2_REP_STREAM_MANAGE, + DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, + 0, 0 }, + { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, + false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, +/* local define to shovel this through the write_2_2 interface */ +#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 + { HDCP_2_2_ERRATA_DP_STREAM_TYPE, + DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, + 0, 0 }, +}; + +static int +intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, + u8 *rx_status) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, + HDCP_2_2_DP_RXSTATUS_LEN); + if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { + drm_dbg_kms(&i915->drm, + "Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, + u8 msg_id, bool *msg_ready) +{ + u8 rx_status; + int ret; + + *msg_ready = false; + ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); + if (ret < 0) + return ret; + + switch (msg_id) { + case HDCP_2_2_AKE_SEND_HPRIME: + if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_AKE_SEND_PAIRING_INFO: + if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_REP_SEND_RECVID_LIST: + if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + *msg_ready = true; + break; + default: + DRM_ERROR("Unidentified msg_id: %d\n", msg_id); + return -EINVAL; + } + + return 0; +} + +static ssize_t +intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, + const struct hdcp2_dp_msg_data *hdcp2_msg_data) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + u8 msg_id = hdcp2_msg_data->msg_id; + int ret, timeout; + bool msg_ready = false; + + if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) + timeout = hdcp2_msg_data->timeout2; + else + timeout = hdcp2_msg_data->timeout; + + /* + * There is no way to detect the CERT, LPRIME and STREAM_READY + * availability. So Wait for timeout and read the msg. + */ + if (!hdcp2_msg_data->msg_detectable) { + mdelay(timeout); + ret = 0; + } else { + /* + * As we want to check the msg availability at timeout, Ignoring + * the timeout at wait for CP_IRQ. + */ + intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); + ret = hdcp2_detect_msg_availability(dig_port, + msg_id, &msg_ready); + if (!msg_ready) + ret = -ETIMEDOUT; + } + + if (ret) + drm_dbg_kms(&i915->drm, + "msg_id %d, ret %d, timeout(mSec): %d\n", + hdcp2_msg_data->msg_id, ret, timeout); + + return ret; +} + +static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) + if (hdcp2_dp_msg_data[i].msg_id == msg_id) + return &hdcp2_dp_msg_data[i]; + + return NULL; +} + +static +int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, + void *buf, size_t size) +{ + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_write, len; + const struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); + if (!hdcp2_msg_data) + return -EINVAL; + + offset = hdcp2_msg_data->offset; + + /* No msg_id in DP HDCP2.2 msgs */ + bytes_to_write = size - 1; + byte++; + + hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); + + while (bytes_to_write) { + len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; + + ret = drm_dp_dpcd_write(&dig_port->dp.aux, + offset, (void *)byte, len); + if (ret < 0) + return ret; + + bytes_to_write -= ret; + byte += ret; + offset += ret; + } + + return size; +} + +static +ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) +{ + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u32 dev_cnt; + ssize_t ret; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_HDCP_2_2_REG_RXINFO_OFFSET, + (void *)rx_info, HDCP_2_2_RXINFO_LEN); + if (ret != HDCP_2_2_RXINFO_LEN) + return ret >= 0 ? -EIO : ret; + + dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | + HDCP_2_2_DEV_COUNT_LO(rx_info[1])); + + if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) + dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; + + ret = sizeof(struct hdcp2_rep_send_receiverid_list) - + HDCP_2_2_RECEIVER_IDS_MAX_LEN + + (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); + + return ret; +} + +static +int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, + u8 msg_id, void *buf, size_t size) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_recv, len; + const struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); + if (!hdcp2_msg_data) + return -EINVAL; + offset = hdcp2_msg_data->offset; + + ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); + if (ret < 0) + return ret; + + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { + ret = get_receiver_id_list_size(dig_port); + if (ret < 0) + return ret; + + size = ret; + } + bytes_to_recv = size - 1; + + /* DP adaptation msgs has no msg_id */ + byte++; + + while (bytes_to_recv) { + len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; + + ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, + (void *)byte, len); + if (ret < 0) { + drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", + msg_id, ret); + return ret; + } + + bytes_to_recv -= ret; + byte += ret; + offset += ret; + } + byte = buf; + *byte = msg_id; + + return size; +} + +static +int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, + bool is_repeater, u8 content_type) +{ + int ret; + struct hdcp2_dp_errata_stream_type stream_type_msg; + + if (is_repeater) + return 0; + + /* + * Errata for DP: As Stream type is used for encryption, Receiver + * should be communicated with stream type for the decryption of the + * content. + * Repeater will be communicated with stream type as a part of it's + * auth later in time. + */ + stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; + stream_type_msg.stream_type = content_type; + + ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, + sizeof(stream_type_msg)); + + return ret < 0 ? ret : 0; + +} + +static +int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port) +{ + u8 rx_status; + int ret; + + ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); + if (ret) + return ret; + + if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) + ret = HDCP_REAUTH_REQUEST; + else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) + ret = HDCP_LINK_INTEGRITY_FAILURE; + else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + ret = HDCP_TOPOLOGY_CHANGE; + + return ret; +} + +static +int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, + bool *capable) +{ + u8 rx_caps[3]; + int ret; + + *capable = false; + ret = drm_dp_dpcd_read(&dig_port->dp.aux, + DP_HDCP_2_2_REG_RX_CAPS_OFFSET, + rx_caps, HDCP_2_2_RXCAPS_LEN); + if (ret != HDCP_2_2_RXCAPS_LEN) + return ret >= 0 ? -EIO : ret; + + if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && + HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) + *capable = true; + + return 0; +} + +static const struct intel_hdcp_shim intel_dp_hdcp_shim = { + .write_an_aksv = intel_dp_hdcp_write_an_aksv, + .read_bksv = intel_dp_hdcp_read_bksv, + .read_bstatus = intel_dp_hdcp_read_bstatus, + .repeater_present = intel_dp_hdcp_repeater_present, + .read_ri_prime = intel_dp_hdcp_read_ri_prime, + .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, + .toggle_signalling = intel_dp_hdcp_toggle_signalling, + .check_link = intel_dp_hdcp_check_link, + .hdcp_capable = intel_dp_hdcp_capable, + .write_2_2_msg = intel_dp_hdcp2_write_msg, + .read_2_2_msg = intel_dp_hdcp2_read_msg, + .config_stream_type = intel_dp_hdcp2_config_stream_type, + .check_2_2_link = intel_dp_hdcp2_check_link, + .hdcp_2_2_capable = intel_dp_hdcp2_capable, + .protocol = HDCP_PROTOCOL_DP, +}; + +static int +intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, + bool enable) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + int ret; + + if (!enable) + usleep_range(6, 60); /* Bspec says >= 6us */ + + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, + cpu_transcoder, enable); + if (ret) + drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n", + enable ? "Enable" : "Disable", ret); + return ret; +} + +static +bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port, + struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *intel_dp = &dig_port->dp; + struct drm_dp_query_stream_enc_status_ack_reply reply; + int ret; + + if (!intel_dp_hdcp_check_link(dig_port, connector)) + return false; + + ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr, + connector->port, &reply); + if (ret) { + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] failed QSES ret=%d\n", + connector->base.base.id, connector->base.name, ret); + return false; + } + + return reply.auth_completed && reply.encryption_enabled; +} + +static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { + .write_an_aksv = intel_dp_hdcp_write_an_aksv, + .read_bksv = intel_dp_hdcp_read_bksv, + .read_bstatus = intel_dp_hdcp_read_bstatus, + .repeater_present = intel_dp_hdcp_repeater_present, + .read_ri_prime = intel_dp_hdcp_read_ri_prime, + .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, + .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling, + .check_link = intel_dp_mst_hdcp_check_link, + .hdcp_capable = intel_dp_hdcp_capable, + + .protocol = HDCP_PROTOCOL_DP, +}; + +int intel_dp_init_hdcp(struct intel_digital_port *dig_port, + struct intel_connector *intel_connector) +{ + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_encoder *intel_encoder = &dig_port->base; + enum port port = intel_encoder->port; + struct intel_dp *intel_dp = &dig_port->dp; + + if (!is_hdcp_supported(dev_priv, port)) + return 0; + + if (intel_connector->mst_port) + return intel_hdcp_init(intel_connector, port, + &intel_dp_mst_hdcp_shim); + else if (!intel_dp_is_edp(intel_dp)) + return intel_hdcp_init(intel_connector, port, + &intel_dp_hdcp_shim); + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index f2c8b56be9ea..91d3979902d0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -34,6 +34,152 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) link_status[3], link_status[4], link_status[5]); } +static int intel_dp_lttpr_count(struct intel_dp *intel_dp) +{ + int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); + + /* + * Pretend no LTTPRs in case of LTTPR detection error, or + * if too many (>8) LTTPRs are detected. This translates to link + * training in transparent mode. + */ + return count <= 0 ? 0 : count; +} + +static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) +{ + intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; +} + +static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy, + char *buf, size_t buf_size) +{ + if (dp_phy == DP_PHY_DPRX) + snprintf(buf, buf_size, "DPRX"); + else + snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1); + + return buf; +} + +static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1]; +} + +static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); + char phy_name[10]; + + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); + + if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) { + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "failed to read the PHY caps for %s\n", + phy_name); + return; + } + + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "%s PHY capabilities: %*ph\n", + phy_name, + (int)sizeof(intel_dp->lttpr_phy_caps[0]), + phy_caps); +} + +static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp) +{ + if (drm_dp_read_lttpr_common_caps(&intel_dp->aux, + intel_dp->lttpr_common_caps) < 0) { + memset(intel_dp->lttpr_common_caps, 0, + sizeof(intel_dp->lttpr_common_caps)); + return false; + } + + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "LTTPR common capabilities: %*ph\n", + (int)sizeof(intel_dp->lttpr_common_caps), + intel_dp->lttpr_common_caps); + + return true; +} + +static bool +intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) +{ + u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : + DP_PHY_REPEATER_MODE_NON_TRANSPARENT; + + return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; +} + +/** + * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode + * @intel_dp: Intel DP struct + * + * Read the LTTPR common capabilities, switch to non-transparent link training + * mode if any is detected and read the PHY capabilities for all detected + * LTTPRs. In case of an LTTPR detection error or if the number of + * LTTPRs is more than is supported (8), fall back to the no-LTTPR, + * transparent mode link training mode. + * + * Returns: + * >0 if LTTPRs were detected and the non-transparent LT mode was set + * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a + * detection failure and the transparent LT mode was set + */ +int intel_dp_lttpr_init(struct intel_dp *intel_dp) +{ + int lttpr_count; + bool ret; + int i; + + if (intel_dp_is_edp(intel_dp)) + return 0; + + ret = intel_dp_read_lttpr_common_caps(intel_dp); + + /* + * See DP Standard v2.0 3.6.6.1. about the explicit disabling of + * non-transparent mode and the disable->enable non-transparent mode + * sequence. + */ + intel_dp_set_lttpr_transparent_mode(intel_dp, true); + + if (!ret) + return 0; + + lttpr_count = intel_dp_lttpr_count(intel_dp); + + /* + * In case of unsupported number of LTTPRs or failing to switch to + * non-transparent mode fall-back to transparent link training mode, + * still taking into account any LTTPR common lane- rate/count limits. + */ + if (lttpr_count == 0) + return 0; + + if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); + + intel_dp_set_lttpr_transparent_mode(intel_dp, true); + intel_dp_reset_lttpr_count(intel_dp); + + return 0; + } + + for (i = 0; i < lttpr_count; i++) + intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i)); + + return lttpr_count; +} +EXPORT_SYMBOL(intel_dp_lttpr_init); + static u8 dp_voltage_max(u8 preemph) { switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { @@ -49,36 +195,109 @@ static u8 dp_voltage_max(u8 preemph) } } -void intel_dp_get_adjust_train(struct intel_dp *intel_dp, - const u8 link_status[DP_LINK_STATUS_SIZE]) +static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); + + if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps)) + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + else + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; +} + +static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); + + if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps)) + return DP_TRAIN_PRE_EMPH_LEVEL_3; + else + return DP_TRAIN_PRE_EMPH_LEVEL_2; +} + +static bool +intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int lttpr_count = intel_dp_lttpr_count(intel_dp); + + drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX); + + return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); +} + +static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 voltage_max; + + /* + * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from + * the DPRX_PHY we train. + */ + if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) + voltage_max = intel_dp->voltage_max(intel_dp, crtc_state); + else + voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1); + + drm_WARN_ON_ONCE(&i915->drm, + voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && + voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); + + return voltage_max; +} + +static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 preemph_max; + + /* + * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from + * the DPRX_PHY we train. + */ + if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) + preemph_max = intel_dp->preemph_max(intel_dp); + else + preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1); + + drm_WARN_ON_ONCE(&i915->drm, + preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && + preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); + + return preemph_max; +} + +void +intel_dp_get_adjust_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, + const u8 link_status[DP_LINK_STATUS_SIZE]) +{ u8 v = 0; u8 p = 0; int lane; u8 voltage_max; u8 preemph_max; - for (lane = 0; lane < intel_dp->lane_count; lane++) { + for (lane = 0; lane < crtc_state->lane_count; lane++) { v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); } - preemph_max = intel_dp->preemph_max(intel_dp); - drm_WARN_ON_ONCE(&i915->drm, - preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && - preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); - + preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); if (p >= preemph_max) p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; v = min(v, dp_voltage_max(p)); - voltage_max = intel_dp->voltage_max(intel_dp); - drm_WARN_ON_ONCE(&i915->drm, - voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && - voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); - + voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy); if (v >= voltage_max) v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; @@ -86,59 +305,70 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp, intel_dp->train_set[lane] = v | p; } +static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + return dp_phy == DP_PHY_DPRX ? + DP_TRAINING_PATTERN_SET : + DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); +} + static bool intel_dp_set_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, u8 dp_train_pat) { + int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); u8 buf[sizeof(intel_dp->train_set) + 1]; - int ret, len; + int len; - intel_dp_program_link_training_pattern(intel_dp, dp_train_pat); + intel_dp_program_link_training_pattern(intel_dp, crtc_state, + dp_train_pat); buf[0] = dp_train_pat; - if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == - DP_TRAINING_PATTERN_DISABLE) { - /* don't write DP_TRAINING_LANEx_SET on disable */ - len = 1; - } else { - /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ - memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); - len = intel_dp->lane_count + 1; - } - - ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, - buf, len); + /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ + memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count); + len = crtc_state->lane_count + 1; - return ret == len; + return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; } static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, - u8 dp_train_pat) + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, + u8 dp_train_pat) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); - intel_dp_set_signal_levels(intel_dp); - return intel_dp_set_link_train(intel_dp, dp_train_pat); + intel_dp_set_signal_levels(intel_dp, crtc_state); + return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); } static bool -intel_dp_update_link_train(struct intel_dp *intel_dp) +intel_dp_update_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) { + int reg = dp_phy == DP_PHY_DPRX ? + DP_TRAINING_LANE0_SET : + DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); int ret; - intel_dp_set_signal_levels(intel_dp); + intel_dp_set_signal_levels(intel_dp, crtc_state); - ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, - intel_dp->train_set, intel_dp->lane_count); + ret = drm_dp_dpcd_write(&intel_dp->aux, reg, + intel_dp->train_set, crtc_state->lane_count); - return ret == intel_dp->lane_count; + return ret == crtc_state->lane_count; } -static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp) +static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { int lane; - for (lane = 0; lane < intel_dp->lane_count; lane++) + for (lane = 0; lane < crtc_state->lane_count; lane++) if ((intel_dp->train_set[lane] & DP_TRAIN_MAX_SWING_REACHED) == 0) return false; @@ -146,21 +376,22 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp) return true; } -/* Enable corresponding port and start training pattern 1 */ +/* + * Prepare link training by configuring the link parameters. On DDI platforms + * also enable the port here. + */ static bool -intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) +intel_dp_prepare_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 voltage; - int voltage_tries, cr_tries, max_cr_tries; - bool max_vswing_reached = false; u8 link_config[2]; u8 link_bw, rate_select; if (intel_dp->prepare_link_retrain) - intel_dp->prepare_link_retrain(intel_dp); + intel_dp->prepare_link_retrain(intel_dp, crtc_state); - intel_dp_compute_rate(intel_dp, intel_dp->link_rate, + intel_dp_compute_rate(intel_dp, crtc_state->port_clock, &link_bw, &rate_select); if (link_bw) @@ -172,7 +403,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) /* Write the link configuration data */ link_config[0] = link_bw; - link_config[1] = intel_dp->lane_count; + link_config[1] = crtc_state->lane_count; if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); @@ -188,8 +419,34 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) intel_dp->DP |= DP_PORT_EN; + return true; +} + +static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + if (dp_phy == DP_PHY_DPRX) + drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); + else + drm_dp_lttpr_link_train_clock_recovery_delay(); +} + +/* + * Perform the link training clock recovery phase on the given DP PHY using + * training pattern 1. + */ +static bool +intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 voltage; + int voltage_tries, cr_tries, max_cr_tries; + bool max_vswing_reached = false; + /* clock recovery */ - if (!intel_dp_reset_link_train(intel_dp, + if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) { drm_err(&i915->drm, "failed to enable link training\n"); @@ -213,14 +470,15 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { u8 link_status[DP_LINK_STATUS_SIZE]; - drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); + intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy); - if (!intel_dp_get_link_status(intel_dp, link_status)) { + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, + link_status) < 0) { drm_err(&i915->drm, "failed to get link status\n"); return false; } - if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { drm_dbg_kms(&i915->drm, "clock recovery OK\n"); return true; } @@ -239,8 +497,9 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Update training set as requested by target */ - intel_dp_get_adjust_train(intel_dp, link_status); - if (!intel_dp_update_link_train(intel_dp)) { + intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, + link_status); + if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, "failed to update link training\n"); return false; @@ -252,7 +511,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) else voltage_tries = 1; - if (intel_dp_link_max_vswing_reached(intel_dp)) + if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) max_vswing_reached = true; } @@ -266,7 +525,9 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) * or for 1.4 devices that support it, training Pattern 3 for HBR2 * or 1.2 devices that support it, Training Pattern 2 otherwise. */ -static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) +static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) { bool source_tps3, sink_tps3, source_tps4, sink_tps4; @@ -275,12 +536,14 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) * for all downstream devices that support HBR3. There are no known eDP * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 * specification. + * LTTPRs must support TPS4. */ source_tps4 = intel_dp_source_supports_hbr3(intel_dp); - sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd); + sink_tps4 = dp_phy != DP_PHY_DPRX || + drm_dp_tps4_supported(intel_dp->dpcd); if (source_tps4 && sink_tps4) { return DP_TRAINING_PATTERN_4; - } else if (intel_dp->link_rate == 810000) { + } else if (crtc_state->port_clock == 810000) { if (!source_tps4) drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "8.1 Gbps link rate without source HBR3/TPS4 support\n"); @@ -294,10 +557,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) * all sinks follow the spec. */ source_tps3 = intel_dp_source_supports_hbr2(intel_dp); - sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd); + sink_tps3 = dp_phy != DP_PHY_DPRX || + drm_dp_tps3_supported(intel_dp->dpcd); if (source_tps3 && sink_tps3) { return DP_TRAINING_PATTERN_3; - } else if (intel_dp->link_rate >= 540000) { + } else if (crtc_state->port_clock >= 540000) { if (!source_tps3) drm_dbg_kms(&dp_to_i915(intel_dp)->drm, ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); @@ -309,8 +573,28 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) return DP_TRAINING_PATTERN_2; } +static void +intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + if (dp_phy == DP_PHY_DPRX) { + drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); + } else { + const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); + + drm_dp_lttpr_link_train_channel_eq_delay(phy_caps); + } +} + +/* + * Perform the link training channel equalization phase on the given DP PHY + * using one of training pattern 2, 3 or 4 depending on the source and + * sink capabilities. + */ static bool -intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) +intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); int tries; @@ -318,22 +602,23 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; - training_pattern = intel_dp_training_pattern(intel_dp); + training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ if (training_pattern != DP_TRAINING_PATTERN_4) training_pattern |= DP_LINK_SCRAMBLING_DISABLE; /* channel equalization */ - if (!intel_dp_set_link_train(intel_dp, + if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, training_pattern)) { drm_err(&i915->drm, "failed to start channel equalization\n"); return false; } for (tries = 0; tries < 5; tries++) { - - drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); - if (!intel_dp_get_link_status(intel_dp, link_status)) { + intel_dp_link_training_channel_equalization_delay(intel_dp, + dp_phy); + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, + link_status) < 0) { drm_err(&i915->drm, "failed to get link status\n"); break; @@ -341,7 +626,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) /* Make sure clock is still ok */ if (!drm_dp_clock_recovery_ok(link_status, - intel_dp->lane_count)) { + crtc_state->lane_count)) { intel_dp_dump_link_status(link_status); drm_dbg_kms(&i915->drm, "Clock recovery check failed, cannot " @@ -350,7 +635,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) } if (drm_dp_channel_eq_ok(link_status, - intel_dp->lane_count)) { + crtc_state->lane_count)) { channel_eq = true; drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training " "successful\n"); @@ -358,8 +643,9 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) } /* Update training set as requested by target */ - intel_dp_get_adjust_train(intel_dp, link_status); - if (!intel_dp_update_link_train(intel_dp)) { + intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, + link_status); + if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, "failed to update link training\n"); break; @@ -373,54 +659,142 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) "Channel equalization failed 5 times\n"); } - intel_dp_set_idle_link_train(intel_dp); - return channel_eq; +} +static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); + u8 val = DP_TRAINING_PATTERN_DISABLE; + + return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1; } -void intel_dp_stop_link_train(struct intel_dp *intel_dp) +/** + * intel_dp_stop_link_train - stop link training + * @intel_dp: DP struct + * @crtc_state: state for CRTC attached to the encoder + * + * Stop the link training of the @intel_dp port, disabling the test pattern + * symbol generation on the port and disabling the training pattern in + * the sink's DPCD. + * + * What symbols are output on the port after this point is + * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern + * with the pipe being disabled, on older platforms it's HW specific if/how an + * idle pattern is generated, as the pipe is already enabled here for those. + * + * This function must be called after intel_dp_start_link_train(). + */ +void intel_dp_stop_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { intel_dp->link_trained = true; - intel_dp_set_link_train(intel_dp, - DP_TRAINING_PATTERN_DISABLE); + intel_dp_program_link_training_pattern(intel_dp, + crtc_state, + DP_TRAINING_PATTERN_DISABLE); + intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); } -void -intel_dp_start_link_train(struct intel_dp *intel_dp) +static bool +intel_dp_link_train_phy(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) { struct intel_connector *intel_connector = intel_dp->attached_connector; + char phy_name[10]; + bool ret = false; - if (!intel_dp_link_training_clock_recovery(intel_dp)) - goto failure_handling; - if (!intel_dp_link_training_channel_equalization(intel_dp)) - goto failure_handling; + if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) + goto out; - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); - return; + if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy)) + goto out; + + ret = true; - failure_handling: +out: drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", + "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s", intel_connector->base.base.id, intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); + ret ? "passed" : "failed", + crtc_state->port_clock, crtc_state->lane_count, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); + + return ret; +} + +static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; if (intel_dp->hobl_active) { drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "Link Training failed with HOBL active, not enabling it from now on"); intel_dp->hobl_failed = true; } else if (intel_dp_get_link_train_fallback_values(intel_dp, - intel_dp->link_rate, - intel_dp->lane_count)) { + crtc_state->port_clock, + crtc_state->lane_count)) { return; } /* Schedule a Hotplug Uevent to userspace to start modeset */ schedule_work(&intel_connector->modeset_retry_work); } + +/* Perform the link training on all LTTPRs and the DPRX on a link. */ +static bool +intel_dp_link_train_all_phys(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int lttpr_count) +{ + bool ret = true; + int i; + + intel_dp_prepare_link_train(intel_dp, crtc_state); + + for (i = lttpr_count - 1; i >= 0; i--) { + enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); + + ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy); + intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy); + + if (!ret) + break; + } + + if (ret) + intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); + + if (intel_dp->set_idle_link_train) + intel_dp->set_idle_link_train(intel_dp, crtc_state); + + return ret; +} + +/** + * intel_dp_start_link_train - start link training + * @intel_dp: DP struct + * @crtc_state: state for CRTC attached to the encoder + * + * Start the link training of the @intel_dp port, scheduling a fallback + * retraining with reduced link rate/lane parameters if the link training + * fails. + * After calling this function intel_dp_stop_link_train() must be called. + */ +void intel_dp_start_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + /* + * TODO: Reiniting LTTPRs here won't be needed once proper connector + * HW state readout is added. + */ + int lttpr_count = intel_dp_lttpr_init(intel_dp); + + if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count)) + intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 01f1dabbb060..86905aa24db7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -8,11 +8,24 @@ #include <drm/drm_dp_helper.h> +struct intel_crtc_state; struct intel_dp; +int intel_dp_lttpr_init(struct intel_dp *intel_dp); + void intel_dp_get_adjust_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]); -void intel_dp_start_link_train(struct intel_dp *intel_dp); -void intel_dp_stop_link_train(struct intel_dp *intel_dp); +void intel_dp_start_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_dp_stop_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); + +/* Get the TPSx symbol type of the value programmed to DP_TRAINING_PATTERN_SET */ +static inline u8 intel_dp_training_pattern_symbol(u8 pattern) +{ + return pattern & ~DP_LINK_SCRAMBLING_DISABLE; +} #endif /* __INTEL_DP_LINK_TRAINING_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index a2d91a499700..c8fcec4d0788 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -37,6 +37,7 @@ #include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" +#include "intel_hdcp.h" static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, @@ -129,7 +130,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, limits.min_lane_count = limits.max_lane_count = intel_dp_max_lane_count(intel_dp); - limits.min_bpp = intel_dp_min_bpp(pipe_config); + limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); /* * FIXME: If all the streams can't fit into the link with * their current pipe_bpp we should reduce pipe_bpp across @@ -317,19 +318,23 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, return ret; } -static void clear_act_sent(struct intel_dp *intel_dp) +static void clear_act_sent(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); - intel_de_write(i915, intel_dp->regs.dp_tp_status, + intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_ACT_SENT); } -static void wait_for_act_sent(struct intel_dp *intel_dp) +static void wait_for_act_sent(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + struct intel_dp *intel_dp = &intel_mst->primary->dp; - if (intel_de_wait_for_set(i915, intel_dp->regs.dp_tp_status, + if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state), DP_TP_STATUS_ACT_SENT, 1)) drm_err(&i915->drm, "Timed out waiting for ACT sent\n"); @@ -352,6 +357,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, drm_dbg_kms(&i915->drm, "active links %d\n", intel_dp->active_mst_links); + intel_hdcp_disable(intel_mst->connector); + drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); @@ -389,7 +396,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, drm_dp_update_payload_part2(&intel_dp->mst_mgr); - clear_act_sent(intel_dp); + clear_act_sent(encoder, old_crtc_state); val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder)); @@ -398,7 +405,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val); - wait_for_act_sent(intel_dp); + wait_for_act_sent(encoder, old_crtc_state); drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); @@ -485,7 +492,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_dp->active_mst_links); if (first_mst_stream) - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_set_power(intel_dp, DP_SET_POWER_D0); drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); @@ -532,7 +539,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); - clear_act_sent(intel_dp); + clear_act_sent(encoder, pipe_config); intel_ddi_enable_transcoder_func(encoder, pipe_config); @@ -546,7 +553,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, drm_dbg_kms(&dev_priv->drm, "active links %d\n", intel_dp->active_mst_links); - wait_for_act_sent(intel_dp); + wait_for_act_sent(encoder, pipe_config); drm_dp_update_payload_part2(&intel_dp->mst_mgr); @@ -556,6 +563,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, if (pipe_config->has_audio) intel_audio_codec_enable(encoder, pipe_config, conn_state); + + /* Enable hdcp if it's desired */ + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector), + pipe_config->cpu_transcoder, + (u8)conn_state->hdcp_content_type); } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, @@ -577,6 +591,15 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, intel_ddi_get_config(&dig_port->base, pipe_config); } +static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + struct intel_digital_port *dig_port = intel_mst->primary; + + return intel_dp_initial_fastset_check(&dig_port->base, crtc_state); +} + static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -709,9 +732,13 @@ static int intel_dp_mst_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; @@ -799,6 +826,14 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); + + /* TODO: Figure out how to make HDCP work on GEN12+ */ + if (INTEL_GEN(dev_priv) < 12) { + ret = intel_dp_init_hdcp(dig_port, intel_connector); + if (ret) + DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + } + /* * Reuse the prop from the SST connector because we're * not allowed to create new props after device registration. @@ -865,11 +900,13 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; + intel_encoder->update_pipe = intel_ddi_update_pipe; intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; intel_encoder->pre_enable = intel_mst_pre_enable_dp; intel_encoder->enable = intel_mst_enable_dp; intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; intel_encoder->get_config = intel_dp_mst_enc_get_config; + intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check; return intel_mst; diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 7910522273b2..514c4a7adffc 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -644,16 +644,16 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) return mask; } - void chv_set_phy_signal_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, u32 deemph_reg_value, u32 margin_reg_value, bool uniq_trans_scale) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); - enum pipe pipe = intel_crtc->pipe; + enum pipe pipe = crtc->pipe; u32 val; int i; @@ -666,7 +666,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - if (intel_crtc->config->lane_count > 2) { + if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); @@ -679,7 +679,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); - if (intel_crtc->config->lane_count > 2) { + if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; @@ -687,7 +687,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, } /* Program swing deemph */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { + for (i = 0; i < crtc_state->lane_count; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); val &= ~DPIO_SWING_DEEMPH9P5_MASK; val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; @@ -695,7 +695,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, } /* Program swing margin */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { + for (i = 0; i < crtc_state->lane_count; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); val &= ~DPIO_SWING_MARGIN000_MASK; @@ -718,7 +718,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, * For now, for this unique transition scale selection, set bit * 27 for ch0 and ch1. */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { + for (i = 0; i < crtc_state->lane_count; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); if (uniq_trans_scale) val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; @@ -732,7 +732,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - if (intel_crtc->config->lane_count > 2) { + if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); @@ -992,14 +992,15 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder, } void vlv_set_phy_signal_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, u32 demph_reg_value, u32 preemph_reg_value, u32 uniqtranscale_reg_value, u32 tx3_demph) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); - enum pipe pipe = intel_crtc->pipe; + enum pipe pipe = crtc->pipe; vlv_dpio_get(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h index f418aab90b7e..6473440e7457 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h @@ -32,6 +32,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); void chv_set_phy_signal_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, u32 deemph_reg_value, u32 margin_reg_value, bool uniq_trans_scale); void chv_data_lane_soft_reset(struct intel_encoder *encoder, @@ -46,6 +47,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state); void vlv_set_phy_signal_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, u32 demph_reg_value, u32 preemph_reg_value, u32 uniqtranscale_reg_value, u32 tx3_demph); void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index c9013f8f766f..eaef7a2d041f 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -147,6 +147,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, pll->info->name, onoff(state), onoff(cur_state)); } +static i915_reg_t +intel_combo_pll_enable_reg(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + if (IS_DG1(i915)) + return DG1_DPLL_ENABLE(pll->info->id); + else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4)) + return MG_PLL_ENABLE(0); + + return CNL_DPLL_ENABLE(pll->info->id); +} + /** * intel_prepare_shared_dpll - call a dpll's prepare hook * @crtc_state: CRTC, and its state, which has a shared dpll @@ -1590,9 +1602,19 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, case DPLL_CFGCR2_PDIV_3: p0 = 3; break; + case DPLL_CFGCR2_PDIV_7_INVALID: + /* + * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0, + * handling it the same way as PDIV_7. + */ + drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n"); + fallthrough; case DPLL_CFGCR2_PDIV_7: p0 = 7; break; + default: + MISSING_CASE(p0); + return 0; } switch (p2) { @@ -1608,6 +1630,9 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, case DPLL_CFGCR2_KDIV_1: p2 = 1; break; + default: + MISSING_CASE(p2); + return 0; } dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) * @@ -2610,11 +2635,22 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) return true; } +/* + * Display WA #22010492432: tgl + * Program half of the nominal DCO divider fraction value. + */ +static bool +tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) +{ + return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400; +} + static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, const struct intel_shared_dpll *pll, int ref_clock) { const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + u32 dco_fraction; u32 p0, p1, p2, dco_freq; p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; @@ -2657,8 +2693,13 @@ static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; - dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> - DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; + dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + DPLL_CFGCR0_DCO_FRACTION_SHIFT; + + if (tgl_combo_pll_div_frac_wa_needed(dev_priv)) + dco_fraction *= 2; + + dco_freq += (dco_fraction * ref_clock) / 0x8000; if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0)) return 0; @@ -2936,16 +2977,6 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { /* the following params are unused */ }; -/* - * Display WA #22010492432: tgl - * Divide the nominal .dco_fraction value by 2. - */ -static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = { - .dco_integer = 0x54, .dco_fraction = 0x1800, - /* the following params are unused */ - .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, -}; - static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { @@ -2979,14 +3010,12 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, MISSING_CASE(dev_priv->dpll.ref_clks.nssc); fallthrough; case 19200: + case 38400: *pll_params = tgl_tbt_pll_19_2MHz_values; break; case 24000: *pll_params = tgl_tbt_pll_24MHz_values; break; - case 38400: - *pll_params = tgl_tbt_pll_38_4MHz_values; - break; } } else { switch (dev_priv->dpll.ref_clks.nssc) { @@ -3053,9 +3082,14 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, const struct skl_wrpll_params *pll_params, struct intel_dpll_hw_state *pll_state) { + u32 dco_fraction = pll_params->dco_fraction; + memset(pll_state, 0, sizeof(*pll_state)); - pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) | + if (tgl_combo_pll_div_frac_wa_needed(i915)) + dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2); + + pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) | pll_params->dco_integer; pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) | @@ -3512,12 +3546,22 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); - if (IS_ROCKETLAKE(dev_priv)) { + if (IS_DG1(dev_priv)) { + if (port == PORT_D || port == PORT_E) { + dpll_mask = + BIT(DPLL_ID_DG1_DPLL2) | + BIT(DPLL_ID_DG1_DPLL3); + } else { + dpll_mask = + BIT(DPLL_ID_DG1_DPLL0) | + BIT(DPLL_ID_DG1_DPLL1); + } + } else if (IS_ROCKETLAKE(dev_priv)) { dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); - } else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) { + } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) { dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | BIT(DPLL_ID_ICL_DPLL1) | @@ -3808,7 +3852,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!(val & PLL_ENABLE)) goto out; - if (IS_ROCKETLAKE(dev_priv)) { + if (IS_DG1(dev_priv)) { + hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id)); + } else if (IS_ROCKETLAKE(dev_priv)) { hw_state->cfgcr0 = intel_de_read(dev_priv, RKL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, @@ -3819,7 +3866,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->cfgcr1 = intel_de_read(dev_priv, TGL_DPLL_CFGCR1(id)); } else { - if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { + if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) { hw_state->cfgcr0 = intel_de_read(dev_priv, ICL_DPLL_CFGCR0(4)); hw_state->cfgcr1 = intel_de_read(dev_priv, @@ -3842,12 +3889,7 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); - - if (IS_ELKHARTLAKE(dev_priv) && - pll->info->id == DPLL_ID_EHL_DPLL4) { - enable_reg = MG_PLL_ENABLE(0); - } + i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg); } @@ -3866,14 +3908,17 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; i915_reg_t cfgcr0_reg, cfgcr1_reg; - if (IS_ROCKETLAKE(dev_priv)) { + if (IS_DG1(dev_priv)) { + cfgcr0_reg = DG1_DPLL_CFGCR0(id); + cfgcr1_reg = DG1_DPLL_CFGCR1(id); + } else if (IS_ROCKETLAKE(dev_priv)) { cfgcr0_reg = RKL_DPLL_CFGCR0(id); cfgcr1_reg = RKL_DPLL_CFGCR1(id); } else if (INTEL_GEN(dev_priv) >= 12) { cfgcr0_reg = TGL_DPLL_CFGCR0(id); cfgcr1_reg = TGL_DPLL_CFGCR1(id); } else { - if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { + if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) { cfgcr0_reg = ICL_DPLL_CFGCR0(4); cfgcr1_reg = ICL_DPLL_CFGCR1(4); } else { @@ -4045,11 +4090,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, static void combo_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); - if (IS_ELKHARTLAKE(dev_priv) && + if (IS_JSL_EHL(dev_priv) && pll->info->id == DPLL_ID_EHL_DPLL4) { - enable_reg = MG_PLL_ENABLE(0); /* * We need to disable DC states when this DPLL is enabled. @@ -4157,19 +4201,14 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, static void combo_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll); - if (IS_ELKHARTLAKE(dev_priv) && - pll->info->id == DPLL_ID_EHL_DPLL4) { - enable_reg = MG_PLL_ENABLE(0); - icl_pll_disable(dev_priv, pll, enable_reg); + icl_pll_disable(dev_priv, pll, enable_reg); + if (IS_JSL_EHL(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, pll->wakeref); - return; - } - - icl_pll_disable(dev_priv, pll, enable_reg); } static void tbt_pll_disable(struct drm_i915_private *dev_priv, @@ -4316,6 +4355,22 @@ static const struct intel_dpll_mgr rkl_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct dpll_info dg1_plls[] = { + { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 }, + { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 }, + { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 }, + { }, +}; + +static const struct intel_dpll_mgr dg1_pll_mgr = { + .dpll_info = dg1_plls, + .get_dplls = icl_get_dplls, + .put_dplls = icl_put_dplls, + .update_ref_clks = icl_update_dpll_ref_clks, + .dump_hw_state = icl_dump_hw_state, +}; + /** * intel_shared_dpll_init - Initialize shared DPLLs * @dev: drm device @@ -4329,11 +4384,13 @@ void intel_shared_dpll_init(struct drm_device *dev) const struct dpll_info *dpll_info; int i; - if (IS_ROCKETLAKE(dev_priv)) + if (IS_DG1(dev_priv)) + dpll_mgr = &dg1_pll_mgr; + else if (IS_ROCKETLAKE(dev_priv)) dpll_mgr = &rkl_pll_mgr; else if (INTEL_GEN(dev_priv) >= 12) dpll_mgr = &tgl_pll_mgr; - else if (IS_ELKHARTLAKE(dev_priv)) + else if (IS_JSL_EHL(dev_priv)) dpll_mgr = &ehl_pll_mgr; else if (INTEL_GEN(dev_priv) >= 11) dpll_mgr = &icl_pll_mgr; @@ -4475,7 +4532,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, pll->on = pll->info->funcs->get_hw_state(i915, pll, &pll->state.hw_state); - if (IS_ELKHARTLAKE(i915) && pll->on && + if (IS_JSL_EHL(i915) && pll->on && pll->info->id == DPLL_ID_EHL_DPLL4) { pll->wakeref = intel_display_power_get(i915, POWER_DOMAIN_DPLL_DC_OFF); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 5d9a2bc371e7..205542fb8dc7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -154,6 +154,23 @@ enum intel_dpll_id { * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6) */ DPLL_ID_TGL_MGPLL6 = 8, + + /** + * @DPLL_ID_DG1_DPLL0: DG1 combo PHY DPLL0 + */ + DPLL_ID_DG1_DPLL0 = 0, + /** + * @DPLL_ID_DG1_DPLL1: DG1 combo PHY DPLL1 + */ + DPLL_ID_DG1_DPLL1 = 1, + /** + * @DPLL_ID_DG1_DPLL2: DG1 combo PHY DPLL2 + */ + DPLL_ID_DG1_DPLL2 = 2, + /** + * @DPLL_ID_DG1_DPLL3: DG1 combo PHY DPLL3 + */ + DPLL_ID_DG1_DPLL3 = 3, }; #define I915_NUM_PLLS 9 diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index 19f78a4022d3..625f2f1ae061 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -167,6 +167,7 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) /* icl_dsi.c */ void icl_dsi_init(struct drm_i915_private *dev_priv); +void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); /* intel_dsi.c */ int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 307ed8ae9a19..237dbb1ba0ee 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -313,9 +313,15 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state, static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector, bool force) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 135f5e8a4d70..a5b072816a7b 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -907,6 +907,13 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } + /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ + if (INTEL_GEN(dev_priv) >= 11 && + (cache->plane.src_h + cache->plane.adjusted_y) % 4) { + fbc->no_fbc_reason = "plane height + offset is non-modulo of 4"; + return false; + } + return true; } diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index bd39eb6a21b8..842c04e63214 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -451,8 +451,7 @@ int intel_fbdev_init(struct drm_device *dev) struct intel_fbdev *ifbdev; int ret; - if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) || - !INTEL_DISPLAY_ENABLED(dev_priv))) + if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv))) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index a8d119b6b45c..b0d71bbbf2ad 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -90,11 +90,20 @@ static const struct gmbus_pin gmbus_pins_icp[] = { [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO }, }; +static const struct gmbus_pin gmbus_pins_dg1[] = { + [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, + [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, + [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, +}; + /* pin is expected to be valid */ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, unsigned int pin) { - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + return &gmbus_pins_dg1[pin]; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) return &gmbus_pins_icp[pin]; else if (HAS_PCH_CNP(dev_priv)) return &gmbus_pins_cnp[pin]; @@ -113,7 +122,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, { unsigned int size; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + size = ARRAY_SIZE(gmbus_pins_dg1); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) size = ARRAY_SIZE(gmbus_pins_icp); else if (HAS_PCH_CNP(dev_priv)) size = ARRAY_SIZE(gmbus_pins_cnp); @@ -834,7 +845,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) unsigned int pin; int ret; - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) + if (!HAS_DISPLAY(dev_priv)) return 0; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 1a0d49af2a08..b2a4bbcfdcd2 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -148,9 +148,8 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *power_well; enum i915_power_well_id id; + intel_wakeref_t wakeref; bool enabled = false; /* @@ -162,17 +161,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) else id = SKL_DISP_PW_1; - mutex_lock(&power_domains->lock); - /* PG1 (power well #1) needs to be enabled */ - for_each_power_well(dev_priv, power_well) { - if (power_well->desc->id == id) { - enabled = power_well->desc->ops->is_enabled(dev_priv, - power_well); - break; - } - } - mutex_unlock(&power_domains->lock); + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) + enabled = intel_display_power_well_is_enabled(dev_priv, id); /* * Another req for hdcp key loadability is enabled state of pll for @@ -713,7 +704,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) intel_de_write(dev_priv, HDCP_REP_CTL, intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port)); - ret = shim->toggle_signalling(dig_port, true); + ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); if (ret) return ret; @@ -801,6 +792,19 @@ static int _intel_hdcp_disable(struct intel_connector *connector) drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); + /* + * If there are other connectors on this port using HDCP, don't disable + * it. Instead, toggle the HDCP signalling off on that particular + * connector/pipe and exit. + */ + if (dig_port->num_hdcp_streams > 0) { + ret = hdcp->shim->toggle_signalling(dig_port, + cpu_transcoder, false); + if (ret) + DRM_ERROR("Failed to disable HDCP signalling\n"); + return ret; + } + hdcp->hdcp_encrypted = false; intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0); if (intel_de_wait_for_clear(dev_priv, @@ -816,7 +820,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) intel_de_write(dev_priv, HDCP_REP_CTL, intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl); - ret = hdcp->shim->toggle_signalling(dig_port, false); + ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); return ret; @@ -876,6 +880,34 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) return container_of(hdcp, struct intel_connector, hdcp); } +static void intel_hdcp_update_value(struct intel_connector *connector, + u64 value, bool update_property) +{ + struct drm_device *dev = connector->base.dev; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + + drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); + + if (hdcp->value == value) + return; + + drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex)); + + if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0)) + dig_port->num_hdcp_streams--; + } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + dig_port->num_hdcp_streams++; + } + + hdcp->value = value; + if (update_property) { + drm_connector_get(&connector->base); + schedule_work(&hdcp->prop_work); + } +} + /* Implements Part 3 of the HDCP authorization procedure */ static int intel_hdcp_check_link(struct intel_connector *connector) { @@ -887,6 +919,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector) int ret = 0; mutex_lock(&hdcp->mutex); + mutex_lock(&dig_port->hdcp_mutex); + cpu_transcoder = hdcp->cpu_transcoder; /* Check_link valid only when HDCP1.4 is enabled */ @@ -903,15 +937,16 @@ static int intel_hdcp_check_link(struct intel_connector *connector) connector->base.name, connector->base.base.id, intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port))); ret = -ENXIO; - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } - if (hdcp->shim->check_link(dig_port)) { + if (hdcp->shim->check_link(dig_port, connector)) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, true); } goto out; } @@ -923,20 +958,23 @@ static int intel_hdcp_check_link(struct intel_connector *connector) ret = _intel_hdcp_disable(connector); if (ret) { drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } ret = _intel_hdcp_enable(connector); if (ret) { drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } out: + mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); return ret; } @@ -962,6 +1000,8 @@ static void intel_hdcp_prop_work(struct work_struct *work) mutex_unlock(&hdcp->mutex); drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); + + drm_connector_put(&connector->base); } bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) @@ -1405,10 +1445,9 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector) } static -int hdcp2_propagate_stream_management_info(struct intel_connector *connector) +int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_stream_manage stream_manage; @@ -1417,6 +1456,9 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector) const struct intel_hdcp_shim *shim = hdcp->shim; int ret; + if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) + return -ERANGE; + /* Prepare RepeaterAuth_Stream_Manage msg */ msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); @@ -1432,28 +1474,21 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector) ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage, sizeof(msgs.stream_manage)); if (ret < 0) - return ret; + goto out; ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY, &msgs.stream_ready, sizeof(msgs.stream_ready)); if (ret < 0) - return ret; + goto out; hdcp->port_data.seq_num_m = hdcp->seq_num_m; hdcp->port_data.streams[0].stream_type = hdcp->content_type; - ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); - if (ret < 0) - return ret; +out: hdcp->seq_num_m++; - if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { - drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n"); - return -1; - } - - return 0; + return ret; } static @@ -1524,17 +1559,6 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) return 0; } -static int hdcp2_authenticate_repeater(struct intel_connector *connector) -{ - int ret; - - ret = hdcp2_authenticate_repeater_topology(connector); - if (ret < 0) - return ret; - - return hdcp2_propagate_stream_management_info(connector); -} - static int hdcp2_authenticate_sink(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); @@ -1571,7 +1595,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) } if (hdcp->is_repeater) { - ret = hdcp2_authenticate_repeater(connector); + ret = hdcp2_authenticate_repeater_topology(connector); if (ret < 0) { drm_dbg_kms(&i915->drm, "Repeater Auth Failed. Err: %d\n", ret); @@ -1579,11 +1603,6 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) } } - hdcp->port_data.streams[0].stream_type = hdcp->content_type; - ret = hdcp2_authenticate_port(connector); - if (ret < 0) - return ret; - return ret; } @@ -1600,7 +1619,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { - ret = hdcp->shim->toggle_signalling(dig_port, true); + ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, + true); if (ret) { drm_err(&dev_priv->drm, "Failed to enable HDCP signalling. %d\n", @@ -1650,7 +1670,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { - ret = hdcp->shim->toggle_signalling(dig_port, false); + ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, + false); if (ret) { drm_err(&dev_priv->drm, "Failed to disable HDCP signalling. %d\n", @@ -1662,15 +1683,59 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) return ret; } +static int +hdcp2_propagate_stream_management_info(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + int i, tries = 3, ret; + + if (!connector->hdcp.is_repeater) + return 0; + + for (i = 0; i < tries; i++) { + ret = _hdcp2_propagate_stream_management_info(connector); + if (!ret) + break; + + /* Lets restart the auth incase of seq_num_m roll over */ + if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { + drm_dbg_kms(&i915->drm, + "seq_num_m roll over.(%d)\n", ret); + break; + } + + drm_dbg_kms(&i915->drm, + "HDCP2 stream management %d of %d Failed.(%d)\n", + i + 1, tries, ret); + } + + return ret; +} + static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; int ret, i, tries = 3; for (i = 0; i < tries; i++) { ret = hdcp2_authenticate_sink(connector); - if (!ret) - break; + if (!ret) { + ret = hdcp2_propagate_stream_management_info(connector); + if (ret) { + drm_dbg_kms(&i915->drm, + "Stream management failed.(%d)\n", + ret); + break; + } + hdcp->port_data.streams[0].stream_type = + hdcp->content_type; + ret = hdcp2_authenticate_port(connector); + if (!ret) + break; + drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n", + ret); + } /* Clearing the mei hdcp session */ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", @@ -1679,7 +1744,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); } - if (i != tries) { + if (!ret) { /* * Ensuring the required 200mSec min time interval between * Session Key Exchange and encryption. @@ -1766,16 +1831,18 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) "HDCP2.2 link stopped the encryption, %x\n", intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port))); ret = -ENXIO; - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } ret = hdcp->shim->check_2_2_link(dig_port); if (ret == HDCP_LINK_PROTECTED) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, + true); } goto out; } @@ -1788,8 +1855,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) "HDCP2.2 Downstream topology change\n"); ret = hdcp2_authenticate_repeater_topology(connector); if (!ret) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, + true); goto out; } drm_dbg_kms(&dev_priv->drm, @@ -1807,8 +1875,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) drm_err(&dev_priv->drm, "[%s:%d] Failed to disable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, true); goto out; } @@ -1818,8 +1886,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) "[%s:%d] Failed to enable hdcp2.2 (%d)\n", connector->base.name, connector->base.base.id, ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED, + true); goto out; } @@ -1835,6 +1904,9 @@ static void intel_hdcp_check_work(struct work_struct *work) check_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + if (drm_connector_is_unregistered(&connector->base)) + return; + if (!intel_hdcp2_check_link(connector)) schedule_delayed_work(&hdcp->check_work, DRM_HDCP2_CHECK_PERIOD_MS); @@ -1896,6 +1968,7 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) } static int initialize_hdcp_port_data(struct intel_connector *connector, + enum port port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1903,8 +1976,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, struct hdcp_port_data *data = &hdcp->port_data; if (INTEL_GEN(dev_priv) < 12) - data->fw_ddi = - intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port); + data->fw_ddi = intel_get_mei_fw_ddi_index(port); else /* * As per ME FW API expectation, for GEN 12+, fw_ddi is filled @@ -1974,14 +2046,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv) } } -static void intel_hdcp2_init(struct intel_connector *connector, +static void intel_hdcp2_init(struct intel_connector *connector, enum port port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; - ret = initialize_hdcp_port_data(connector, shim); + ret = initialize_hdcp_port_data(connector, port, shim); if (ret) { drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); return; @@ -1991,6 +2063,7 @@ static void intel_hdcp2_init(struct intel_connector *connector, } int intel_hdcp_init(struct intel_connector *connector, + enum port port, const struct intel_hdcp_shim *shim) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -2000,8 +2073,8 @@ int intel_hdcp_init(struct intel_connector *connector, if (!shim) return -EINVAL; - if (is_hdcp2_supported(dev_priv)) - intel_hdcp2_init(connector, shim); + if (is_hdcp2_supported(dev_priv) && !connector->mst_port) + intel_hdcp2_init(connector, port, shim); ret = drm_connector_attach_content_protection_property(&connector->base, @@ -2025,6 +2098,7 @@ int intel_hdcp_enable(struct intel_connector *connector, enum transcoder cpu_transcoder, u8 content_type) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; int ret = -EINVAL; @@ -2033,14 +2107,14 @@ int intel_hdcp_enable(struct intel_connector *connector, return -ENOENT; mutex_lock(&hdcp->mutex); + mutex_lock(&dig_port->hdcp_mutex); drm_WARN_ON(&dev_priv->drm, hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = content_type; + hdcp->cpu_transcoder = cpu_transcoder; - if (INTEL_GEN(dev_priv) >= 12) { - hdcp->cpu_transcoder = cpu_transcoder; + if (INTEL_GEN(dev_priv) >= 12) hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); - } /* * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup @@ -2063,16 +2137,19 @@ int intel_hdcp_enable(struct intel_connector *connector, if (!ret) { schedule_delayed_work(&hdcp->check_work, check_link_interval); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED, + true); } + mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); return ret; } int intel_hdcp_disable(struct intel_connector *connector) { + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; int ret = 0; @@ -2080,15 +2157,20 @@ int intel_hdcp_disable(struct intel_connector *connector) return -ENOENT; mutex_lock(&hdcp->mutex); + mutex_lock(&dig_port->hdcp_mutex); - if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; - if (hdcp->hdcp2_encrypted) - ret = _intel_hdcp2_disable(connector); - else if (hdcp->hdcp_encrypted) - ret = _intel_hdcp_disable(connector); - } + if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + goto out; + intel_hdcp_update_value(connector, + DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); + if (hdcp->hdcp2_encrypted) + ret = _intel_hdcp2_disable(connector); + else if (hdcp->hdcp_encrypted) + ret = _intel_hdcp_disable(connector); + +out: + mutex_unlock(&dig_port->hdcp_mutex); mutex_unlock(&hdcp->mutex); cancel_delayed_work_sync(&hdcp->check_work); return ret; @@ -2102,11 +2184,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_hdcp *hdcp = &connector->hdcp; - bool content_protection_type_changed = + bool content_protection_type_changed, desired_and_not_enabled = false; + + if (!connector->hdcp.shim) + return; + + content_protection_type_changed = (conn_state->hdcp_content_type != hdcp->content_type && conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED); - bool desired_and_not_enabled = false; /* * During the HDCP encryption session if Type change is requested, @@ -2159,12 +2245,39 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) void intel_hdcp_cleanup(struct intel_connector *connector) { - if (!connector->hdcp.shim) + struct intel_hdcp *hdcp = &connector->hdcp; + + if (!hdcp->shim) return; - mutex_lock(&connector->hdcp.mutex); - kfree(connector->hdcp.port_data.streams); - mutex_unlock(&connector->hdcp.mutex); + /* + * If the connector is registered, it's possible userspace could kick + * off another HDCP enable, which would re-spawn the workers. + */ + drm_WARN_ON(connector->base.dev, + connector->base.registration_state == DRM_CONNECTOR_REGISTERED); + + /* + * Now that the connector is not registered, check_work won't be run, + * but cancel any outstanding instances of it + */ + cancel_delayed_work_sync(&hdcp->check_work); + + /* + * We don't cancel prop_work in the same way as check_work since it + * requires connection_mutex which could be held while calling this + * function. Instead, we rely on the connector references grabbed before + * scheduling prop_work to ensure the connector is alive when prop_work + * is run. So if we're in the destroy path (which is where this + * function should be called), we're "guaranteed" that prop_work is not + * active (tl;dr This Should Never Happen). + */ + drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); + + mutex_lock(&hdcp->mutex); + kfree(hdcp->port_data.streams); + hdcp->shim = NULL; + mutex_unlock(&hdcp->mutex); } void intel_hdcp_atomic_check(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 86bbaec120cc..1bbf5b67ed0a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -22,7 +22,7 @@ enum transcoder; void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); -int intel_hdcp_init(struct intel_connector *connector, +int intel_hdcp_init(struct intel_connector *connector, enum port port, const struct intel_hdcp_shim *hdcp_shim); int intel_hdcp_enable(struct intel_connector *connector, enum transcoder cpu_transcoder, u8 content_type); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index de2ce5632b94..f90838bc74fb 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1477,7 +1477,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, return ret; } -static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) +static int kbl_repositioning_enc_en_signal(struct intel_connector *connector, + enum transcoder cpu_transcoder) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); @@ -1494,13 +1495,15 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) usleep_range(25, 50); } - ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder, + false); if (ret) { drm_err(&dev_priv->drm, "Disable HDCP signalling failed (%d)\n", ret); return ret; } - ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder, + true); if (ret) { drm_err(&dev_priv->drm, "Enable HDCP signalling failed (%d)\n", ret); @@ -1512,6 +1515,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) static int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, + enum transcoder cpu_transcoder, bool enable) { struct intel_hdmi *hdmi = &dig_port->hdmi; @@ -1522,7 +1526,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, if (!enable) usleep_range(6, 60); /* Bspec says >= 6us */ - ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder, + enable); if (ret) { drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n", enable ? "Enable" : "Disable", ret); @@ -1534,17 +1539,17 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, * opportunity and enc_en signalling in KABYLAKE. */ if (IS_KABYLAKE(dev_priv) && enable) - return kbl_repositioning_enc_en_signal(connector); + return kbl_repositioning_enc_en_signal(connector, + cpu_transcoder); return 0; } static -bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port) +bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port, + struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_connector *connector = - dig_port->hdmi.attached_connector; enum port port = dig_port->base.port; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; int ret; @@ -1572,13 +1577,14 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port) } static -bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port) +bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port, + struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int retry; for (retry = 0; retry < 3; retry++) - if (intel_hdmi_hdcp_check_link_once(dig_port)) + if (intel_hdmi_hdcp_check_link_once(dig_port, connector)) return true; drm_err(&i915->drm, "Link check failed\n"); @@ -2271,35 +2277,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return intel_mode_valid_max_plane_size(dev_priv, mode); } -static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, - int bpc) +bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, + int bpc, bool has_hdmi_sink, bool ycbcr420_output) { - struct drm_i915_private *dev_priv = - to_i915(crtc_state->uapi.crtc->dev); struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_connector_state *connector_state; struct drm_connector *connector; - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; int i; - if (HAS_GMCH(dev_priv)) - return false; - - if (bpc == 10 && INTEL_GEN(dev_priv) < 11) - return false; - if (crtc_state->pipe_bpp < bpc * 3) return false; - if (!crtc_state->has_hdmi_sink) - return false; - - /* - * HDMI deep color affects the clocks, so it's only possible - * when not cloning with other encoder types. - */ - if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI) + if (!has_hdmi_sink) return false; for_each_new_connector_in_state(state, connector, connector_state, i) { @@ -2308,7 +2297,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, if (connector_state->crtc != crtc_state->uapi.crtc) continue; - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + if (ycbcr420_output) { const struct drm_hdmi_info *hdmi = &info->hdmi; if (bpc == 12 && !(hdmi->y420_dc_modes & @@ -2327,6 +2316,30 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, } } + return true; +} + +static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, + int bpc) +{ + struct drm_i915_private *dev_priv = + to_i915(crtc_state->uapi.crtc->dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (HAS_GMCH(dev_priv)) + return false; + + if (bpc == 10 && INTEL_GEN(dev_priv) < 11) + return false; + + /* + * HDMI deep color affects the clocks, so it's only possible + * when not cloning with other encoder types. + */ + if (crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI)) + return false; + /* Display Wa_1405510057:icl,ehl */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && bpc == 10 && IS_GEN(dev_priv, 11) && @@ -2334,7 +2347,10 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, adjusted_mode->crtc_hblank_start) % 8 == 2) return false; - return true; + return intel_hdmi_deep_color_possible(crtc_state, bpc, + crtc_state->has_hdmi_sink, + crtc_state->output_format == + INTEL_OUTPUT_FORMAT_YCBCR420); } static int @@ -2459,6 +2475,23 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, } } +static bool intel_hdmi_has_audio(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + + if (!crtc_state->has_hdmi_sink) + return false; + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + return intel_hdmi->has_audio; + else + return intel_conn_state->force_audio == HDMI_AUDIO_ON; +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2468,8 +2501,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct drm_connector *connector = conn_state->connector; struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; - struct intel_digital_connector_state *intel_conn_state = - to_intel_digital_connector_state(conn_state); int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -2495,13 +2526,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv)) pipe_config->has_pch_encoder = true; - if (pipe_config->has_hdmi_sink) { - if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - pipe_config->has_audio = intel_hdmi->has_audio; - else - pipe_config->has_audio = - intel_conn_state->force_audio == HDMI_AUDIO_ON; - } + pipe_config->has_audio = + intel_hdmi_has_audio(encoder, pipe_config, conn_state); ret = intel_hdmi_compute_clock(encoder, pipe_config); if (ret) @@ -2667,6 +2693,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (!INTEL_DISPLAY_ENABLED(dev_priv)) + return connector_status_disconnected; + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); if (INTEL_GEN(dev_priv) >= 11 && @@ -2746,8 +2775,9 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, vlv_phy_pre_encoder_enable(encoder, pipe_config); /* HDMI 1.0V-2dB */ - vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, - 0x2b247878); + vlv_set_phy_signal_level(encoder, pipe_config, + 0x2b245f5f, 0x00002000, + 0x5578b83a, 0x2b247878); dig_port->set_infoframes(encoder, pipe_config->has_infoframe, @@ -2824,7 +2854,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state, /* FIXME: Program the support xxx V-dB */ /* Use 800mV-0dB */ - chv_set_phy_signal_level(encoder, 128, 102, false); + chv_set_phy_signal_level(encoder, pipe_config, 128, 102, false); dig_port->set_infoframes(encoder, pipe_config->has_infoframe, @@ -3110,6 +3140,11 @@ static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) return GMBUS_PIN_1_BXT + phy; } +static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +{ + return intel_port_to_phy(dev_priv, port) + 1; +} + static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { @@ -3147,7 +3182,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) return ddc_pin; } - if (IS_ROCKETLAKE(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + ddc_pin = dg1_port_to_ddc_pin(dev_priv, port); + else if (IS_ROCKETLAKE(dev_priv)) ddc_pin = rkl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_MCC(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); @@ -3185,7 +3222,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port) dig_port->set_infoframes = g4x_set_infoframes; dig_port->infoframes_enabled = g4x_infoframes_enabled; } else if (HAS_DDI(dev_priv)) { - if (dig_port->lspcon.active) { + if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) { dig_port->write_infoframe = lspcon_write_infoframe; dig_port->read_infoframe = lspcon_read_infoframe; dig_port->set_infoframes = lspcon_set_infoframes; @@ -3250,7 +3287,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) connector->ycbcr_420_allowed = true; - intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); intel_connector->polled = DRM_CONNECTOR_POLL_HPD; if (HAS_DDI(dev_priv)) @@ -3264,7 +3300,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, intel_hdmi->attached_connector = intel_connector; if (is_hdcp_supported(dev_priv, port)) { - int ret = intel_hdcp_init(intel_connector, + int ret = intel_hdcp_init(intel_connector, port, &intel_hdmi_hdcp_shim); if (ret) drm_dbg_kms(&dev_priv->drm, @@ -3335,6 +3371,8 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; + mutex_init(&dig_port->hdcp_mutex); + drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); @@ -3382,6 +3420,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; + intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); /* * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems * to work on real hardware. And since g4x can send infoframes to diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 5b348dcab77a..15eb0ccde76e 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -48,5 +48,7 @@ void intel_read_infoframe(struct intel_encoder *encoder, union hdmi_infoframe *frame); bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); +bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc, + bool has_hdmi_sink, bool ycbcr420_output); #endif /* __INTEL_HDMI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 3f1d7b804a66..f46a1b7190b8 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -81,33 +81,12 @@ * * It is only valid and used by digital port encoder. * - * Return pin that is associatade with @port and HDP_NONE if no pin is - * hard associated with that @port. + * Return pin that is associatade with @port. */ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, enum port port) { - enum phy phy = intel_port_to_phy(dev_priv, port); - - /* - * RKL + TGP PCH is a special case; we effectively choose the hpd_pin - * based on the DDI rather than the PHY (i.e., the last two outputs - * shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs - * from the behavior of both TGL+TGP and RKL+CMP. - */ - if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv)) - return HPD_PORT_A + port - PORT_A; - - switch (phy) { - case PHY_F: - return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F; - case PHY_A ... PHY_E: - case PHY_G ... PHY_I: - return HPD_PORT_A + phy - PHY_A; - default: - MISSING_CASE(phy); - return HPD_NONE; - } + return HPD_PORT_A + port - PORT_A; } #define HPD_STORM_DETECT_PERIOD 1000 @@ -234,6 +213,12 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) } } +static void intel_hpd_irq_setup(struct drm_i915_private *i915) +{ + if (i915->display_irqs_enabled && i915->display.hpd_irq_setup) + i915->display.hpd_irq_setup(i915); +} + static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -269,8 +254,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) dev_priv->hotplug.stats[pin].state = HPD_ENABLED; } - if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); + intel_hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); @@ -503,7 +487,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * only the one of them (DP) will have ->hpd_pulse(). */ for_each_intel_encoder(&dev_priv->drm, encoder) { - bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); enum port port = encoder->port; bool long_hpd; @@ -511,7 +494,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (!(BIT(pin) & pin_mask)) continue; - if (!has_hpd_pulse) + if (!intel_encoder_has_hpd_pulse(encoder)) continue; long_hpd = long_mask & BIT(pin); @@ -578,8 +561,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * Disable any IRQs that storms were detected on. Polling enablement * happens later in our hotplug work. */ - if (storm_detected && dev_priv->display_irqs_enabled) - dev_priv->display.hpd_irq_setup(dev_priv); + if (storm_detected) + intel_hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); /* @@ -606,7 +589,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * This is a separate step from interrupt enabling to simplify the locking rules * in the driver load and resume code. * - * Also see: intel_hpd_poll_init(), which enables connector polling + * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). */ void intel_hpd_init(struct drm_i915_private *dev_priv) { @@ -617,19 +600,13 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) dev_priv->hotplug.stats[i].state = HPD_ENABLED; } - WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); - schedule_work(&dev_priv->hotplug.poll_init_work); - /* * Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked checks happy. */ - if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) { - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - } + spin_lock_irq(&dev_priv->irq_lock); + intel_hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); } static void i915_hpd_poll_init_work(struct work_struct *work) @@ -676,12 +653,12 @@ static void i915_hpd_poll_init_work(struct work_struct *work) } /** - * intel_hpd_poll_init - enables/disables polling for connectors with hpd + * intel_hpd_poll_enable - enable polling for connectors with hpd * @dev_priv: i915 device instance * - * This function enables polling for all connectors, regardless of whether or - * not they support hotplug detection. Under certain conditions HPD may not be - * functional. On most Intel GPUs, this happens when we enter runtime suspend. + * This function enables polling for all connectors which support HPD. + * Under certain conditions HPD may not be functional. On most Intel GPUs, + * this happens when we enter runtime suspend. * On Valleyview and Cherryview systems, this also happens when we shut off all * of the powerwells. * @@ -689,9 +666,9 @@ static void i915_hpd_poll_init_work(struct work_struct *work) * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate * worker. * - * Also see: intel_hpd_init(), which restores hpd handling. + * Also see: intel_hpd_init() and intel_hpd_poll_disable(). */ -void intel_hpd_poll_init(struct drm_i915_private *dev_priv) +void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) { WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); @@ -704,6 +681,31 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv) schedule_work(&dev_priv->hotplug.poll_init_work); } +/** + * intel_hpd_poll_disable - disable polling for connectors with hpd + * @dev_priv: i915 device instance + * + * This function disables polling for all connectors which support HPD. + * Under certain conditions HPD may not be functional. On most Intel GPUs, + * this happens when we enter runtime suspend. + * On Valleyview and Cherryview systems, this also happens when we shut off all + * of the powerwells. + * + * Since this function can get called in contexts where we're already holding + * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate + * worker. + * + * Also used during driver init to initialize connector->polled + * appropriately for all connectors. + * + * Also see: intel_hpd_init() and intel_hpd_poll_enable(). + */ +void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) +{ + WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); + schedule_work(&dev_priv->hotplug.poll_init_work); +} + void intel_hpd_init_work(struct drm_i915_private *dev_priv) { INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index a704d7c94d16..b87e95d606e6 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -14,7 +14,8 @@ struct intel_digital_port; struct intel_encoder; enum port; -void intel_hpd_poll_init(struct drm_i915_private *dev_priv); +void intel_hpd_poll_enable(struct drm_i915_private *dev_priv); +void intel_hpd_poll_disable(struct drm_i915_private *dev_priv); enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, struct intel_connector *connector); void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index dc1b35559afd..e37d45e531df 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -184,21 +184,6 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) return true; } -void lspcon_ycbcr420_config(struct drm_connector *connector, - struct intel_crtc_state *crtc_state) -{ - const struct drm_display_info *info = &connector->display_info; - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - - if (drm_mode_is_420_only(info, adjusted_mode) && - connector->ycbcr_420_allowed) { - crtc_state->port_clock /= 2; - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; - crtc_state->lspcon_downsampling = true; - } -} - static bool lspcon_probe(struct intel_lspcon *lspcon) { int retry; @@ -492,14 +477,19 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, return; } - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { - if (crtc_state->lspcon_downsampling) - frame.avi.colorspace = HDMI_COLORSPACE_YUV420; - else - frame.avi.colorspace = HDMI_COLORSPACE_YUV444; - } else { + /* + * Currently there is no interface defined to + * check user preference between RGB/YCBCR444 + * or YCBCR420. So the only possible case for + * YCBCR444 usage is driving YCBCR420 output + * with LSPCON, when pipe is configured for + * YCBCR444 output and LSPCON takes care of + * downsampling it. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) + frame.avi.colorspace = HDMI_COLORSPACE_YUV420; + else frame.avi.colorspace = HDMI_COLORSPACE_RGB; - } drm_hdmi_avi_infoframe_quant_range(&frame.avi, conn_state->connector, @@ -525,44 +515,17 @@ u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, return 0; } -void lspcon_resume(struct intel_lspcon *lspcon) -{ - enum drm_lspcon_mode expected_mode; - - if (lspcon_wake_native_aux_ch(lspcon)) { - expected_mode = DRM_LSPCON_MODE_PCON; - lspcon_resume_in_pcon_wa(lspcon); - } else { - expected_mode = DRM_LSPCON_MODE_LS; - } - - if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON) - return; - - if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) - DRM_ERROR("LSPCON resume failed\n"); - else - DRM_DEBUG_KMS("LSPCON resume success\n"); -} - void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) { lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON); } -bool lspcon_init(struct intel_digital_port *dig_port) +static bool lspcon_init(struct intel_digital_port *dig_port) { struct intel_dp *dp = &dig_port->dp; struct intel_lspcon *lspcon = &dig_port->lspcon; - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_connector *connector = &dp->attached_connector->base; - if (!HAS_LSPCON(dev_priv)) { - DRM_ERROR("LSPCON is not supported on this platform\n"); - return false; - } - lspcon->active = false; lspcon->mode = DRM_LSPCON_MODE_INVALID; @@ -586,3 +549,37 @@ bool lspcon_init(struct intel_digital_port *dig_port) DRM_DEBUG_KMS("Success: LSPCON init\n"); return true; } + +void lspcon_resume(struct intel_digital_port *dig_port) +{ + struct intel_lspcon *lspcon = &dig_port->lspcon; + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum drm_lspcon_mode expected_mode; + + if (!intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) + return; + + if (!lspcon->active) { + if (!lspcon_init(dig_port)) { + DRM_ERROR("LSPCON init failed on port %c\n", + port_name(dig_port->base.port)); + return; + } + } + + if (lspcon_wake_native_aux_ch(lspcon)) { + expected_mode = DRM_LSPCON_MODE_PCON; + lspcon_resume_in_pcon_wa(lspcon); + } else { + expected_mode = DRM_LSPCON_MODE_LS; + } + + if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON) + return; + + if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) + DRM_ERROR("LSPCON resume failed\n"); + else + DRM_DEBUG_KMS("LSPCON resume success\n"); +} diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h index 1cffe8a42a08..b03dcb7076d8 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.h +++ b/drivers/gpu/drm/i915/display/intel_lspcon.h @@ -15,8 +15,7 @@ struct intel_digital_port; struct intel_encoder; struct intel_lspcon; -bool lspcon_init(struct intel_digital_port *dig_port); -void lspcon_resume(struct intel_lspcon *lspcon); +void lspcon_resume(struct intel_digital_port *dig_port); void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); void lspcon_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -32,7 +31,5 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state); u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); -void lspcon_ycbcr420_config(struct drm_connector *connector, - struct intel_crtc_state *crtc_state); #endif /* __INTEL_LSPCON_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 1888611244db..c6c7c0b9989b 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -371,6 +371,15 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state, intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state); } +static void intel_lvds_shutdown(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_CYCLE_DELAY_ACTIVE, 5000)) + drm_err(&dev_priv->drm, + "timed out waiting for panel power cycle delay\n"); +} + static enum drm_mode_status intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) @@ -456,12 +465,6 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, return 0; } -static enum drm_connector_status -intel_lvds_detect(struct drm_connector *connector, bool force) -{ - return connector_status_connected; -} - /* * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ @@ -490,7 +493,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { - .detect = intel_lvds_detect, + .detect = intel_panel_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, @@ -903,6 +906,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; intel_encoder->update_pipe = intel_panel_update_backlight; + intel_encoder->shutdown = intel_lvds_shutdown; intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector_attach_encoder(intel_connector, intel_encoder); diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index de995362f428..4f77cf849171 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -1007,12 +1007,8 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) int ret; ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "Failed to get panel details from OpRegion (%d)\n", - ret); + if (ret) return ret; - } ret = (panel_details >> 8) & 0xff; if (ret > 0x10) { diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 4072d7062efd..9f23bac0d792 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -40,8 +40,6 @@ #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" -#define CRC_PMIC_PWM_PERIOD_NS 21333 - void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) @@ -594,10 +592,10 @@ static u32 bxt_get_backlight(struct intel_connector *connector) static u32 pwm_get_backlight(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; - int duty_ns; + struct pwm_state state; - duty_ns = pwm_get_duty_cycle(panel->backlight.pwm); - return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS); + pwm_get_state(panel->backlight.pwm, &state); + return pwm_get_relative_duty_cycle(&state, 100); } static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level) @@ -671,9 +669,9 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; - int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100); - pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS); + pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } static void @@ -842,10 +840,8 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; - /* Disable the backlight */ - intel_panel_actually_set_backlight(old_conn_state, 0); - usleep_range(2000, 3000); - pwm_disable(panel->backlight.pwm); + panel->backlight.pwm_state.enabled = false; + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -1177,9 +1173,12 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state, { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; + int level = panel->backlight.level; - pwm_enable(panel->backlight.pwm); - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + level = intel_panel_compute_brightness(connector, level); + pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); + panel->backlight.pwm_state.enabled = true; + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -1543,18 +1542,9 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); } -static u32 get_backlight_max_vbt(struct intel_connector *connector) +static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; - u32 pwm; - - if (!panel->backlight.hz_to_pwm) { - drm_dbg_kms(&dev_priv->drm, - "backlight frequency conversion not supported\n"); - return 0; - } if (pwm_freq_hz) { drm_dbg_kms(&dev_priv->drm, @@ -1567,6 +1557,22 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) pwm_freq_hz); } + return pwm_freq_hz; +} + +static u32 get_backlight_max_vbt(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv); + u32 pwm; + + if (!panel->backlight.hz_to_pwm) { + drm_dbg_kms(&dev_priv->drm, + "backlight frequency conversion not supported\n"); + return 0; + } + pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); if (!pwm) { drm_dbg_kms(&dev_priv->drm, @@ -1891,8 +1897,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_panel *panel = &connector->panel; const char *desc; - u32 level, ns; - int retval; + u32 level; /* Get the right PWM chip for DSI backlight according to VBT */ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { @@ -1910,30 +1915,28 @@ static int pwm_setup_backlight(struct intel_connector *connector, return -ENODEV; } - /* - * FIXME: pwm_apply_args() should be removed when switching to - * the atomic PWM API. - */ - pwm_apply_args(panel->backlight.pwm); - - panel->backlight.min = 0; /* 0% */ panel->backlight.max = 100; /* 100% */ - level = intel_panel_compute_brightness(connector, 100); - ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100); + panel->backlight.min = get_backlight_min_vbt(connector); - retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS); - if (retval < 0) { - drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n"); - pwm_put(panel->backlight.pwm); - panel->backlight.pwm = NULL; - return retval; - } + if (pwm_is_enabled(panel->backlight.pwm)) { + /* PWM is already enabled, use existing settings */ + pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state); + + level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state, + 100); + level = intel_panel_compute_brightness(connector, level); + panel->backlight.level = clamp(level, panel->backlight.min, + panel->backlight.max); + panel->backlight.enabled = true; - level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100, - CRC_PMIC_PWM_PERIOD_NS); - panel->backlight.level = - intel_panel_compute_brightness(connector, level); - panel->backlight.enabled = panel->backlight.level != 0; + drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", + NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, + get_vbt_pwm_freq(dev_priv), level); + } else { + /* Set period from VBT frequency, leave other settings at 0. */ + panel->backlight.pwm_state.period = + NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv); + } drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n", desc); @@ -2092,6 +2095,17 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) } } +enum drm_connector_status +intel_panel_detect(struct drm_connector *connector, bool force) +{ + struct drm_i915_private *i915 = to_i915(connector->dev); + + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + + return connector_status_connected; +} + int intel_panel_init(struct intel_panel *panel, struct drm_display_mode *fixed_mode, struct drm_display_mode *downclock_mode) diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 968b95281cb4..5b813fe90557 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -23,6 +23,8 @@ int intel_panel_init(struct intel_panel *panel, struct drm_display_mode *fixed_mode, struct drm_display_mode *downclock_mode); void intel_panel_fini(struct intel_panel *panel); +enum drm_connector_status +intel_panel_detect(struct drm_connector *connector, bool force); void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 2b004ee9619c..1576c3722d0b 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -91,19 +91,14 @@ static bool psr_global_enabled(struct drm_i915_private *i915) } } -static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *crtc_state) +static bool psr2_global_enabled(struct drm_i915_private *dev_priv) { - /* Cannot enable DSC and PSR2 simultaneously */ - drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable && - crtc_state->has_psr2); - switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; default: - return crtc_state->has_psr2; + return true; } } @@ -555,7 +550,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (dev_priv->psr.psr2_sel_fetch_enabled) { /* WA 1408330847 */ - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, @@ -729,6 +724,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (!psr2_global_enabled(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n"); + return false; + } + /* * DSC and PSR2 cannot be enabled simultaneously. If a requested * resolution requires DSC to be enabled, priority is given to DSC @@ -817,8 +817,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, if (intel_dp != dev_priv->psr.dp) return; - if (!psr_global_enabled(dev_priv)) + if (!psr_global_enabled(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); return; + } + /* * HSW spec explicitly says PSR is tied to port A. * BDW+ platforms have a instance of PSR registers per transcoder but @@ -942,7 +945,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); } - if (HAS_PSR_HW_TRACKING(dev_priv)) + if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, dev_priv->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); @@ -959,7 +962,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled); - dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); + dev_priv->psr.psr2_enabled = crtc_state->has_psr2; dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; @@ -1029,15 +1032,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp); mutex_lock(&dev_priv->psr.lock); - - if (!psr_global_enabled(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); - goto unlock; - } - intel_psr_enable_locked(dev_priv, crtc_state, conn_state); - -unlock: mutex_unlock(&dev_priv->psr.lock); } @@ -1109,7 +1104,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) /* WA 1408330847 */ if (dev_priv->psr.psr2_sel_fetch_enabled && - (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || + (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) || IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); @@ -1152,7 +1147,21 @@ void intel_psr_disable(struct intel_dp *intel_dp, static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 9) + if (IS_TIGERLAKE(dev_priv)) + /* + * Writes to CURSURFLIVE in TGL are causing IOMMU errors and + * visual glitches that are often reproduced when executing + * CPU intensive workloads while a eDP 4K panel is attached. + * + * Manually exiting PSR causes the frontbuffer to be updated + * without glitches and the IOMMU errors are also gone but + * this comes at the cost of less time with PSR active. + * + * So using this workaround until this issue is root caused + * and a better fix is found. + */ + intel_psr_exit(dev_priv); + else if (INTEL_GEN(dev_priv) >= 9) /* * Display WA #0884: skl+ * This documented WA for bxt can be safely applied @@ -1171,6 +1180,38 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) intel_psr_exit(dev_priv); } +void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + u32 val; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + val = plane_state ? plane_state->ctl : 0; + val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE; + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val); + if (!val || plane->id == PLANE_CURSOR) + return; + + val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1; + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); + + val = plane_state->color_plane[color_plane].y << 16; + val |= plane_state->color_plane[color_plane].x; + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), + val); + + /* Sizes are 0 based */ + val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16; + val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); +} + void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1185,16 +1226,91 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st crtc_state->psr2_man_track_ctl); } -void intel_psr2_sel_fetch_update(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, + struct drm_rect *clip, bool full_update) +{ + u32 val = PSR2_MAN_TRK_CTL_ENABLE; + + if (full_update) { + val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; + goto exit; + } + + if (clip->y1 == -1) + goto exit; + + val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; + val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); + val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1); +exit: + crtc_state->psr2_man_track_ctl = val; +} + +static void clip_area_update(struct drm_rect *overlap_damage_area, + struct drm_rect *damage_area) +{ + if (overlap_damage_area->y1 == -1) { + overlap_damage_area->y1 = damage_area->y1; + overlap_damage_area->y2 = damage_area->y2; + return; + } + + if (damage_area->y1 < overlap_damage_area->y1) + overlap_damage_area->y1 = damage_area->y1; + + if (damage_area->y2 > overlap_damage_area->y2) + overlap_damage_area->y2 = damage_area->y2; +} + +int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct intel_plane_state *new_plane_state, *old_plane_state; + struct drm_rect pipe_clip = { .y1 = -1 }; + struct intel_plane *plane; + bool full_update = false; + int i, ret; if (!crtc_state->enable_psr2_sel_fetch) - return; + return 0; + + ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); + if (ret) + return ret; + + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + struct drm_rect temp; + + if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) + continue; + + /* + * TODO: Not clear how to handle planes with negative position, + * also planes are not updated if they have a negative X + * position so for now doing a full update in this cases + */ + if (new_plane_state->uapi.dst.y1 < 0 || + new_plane_state->uapi.dst.x1 < 0) { + full_update = true; + break; + } + + if (!new_plane_state->uapi.visible) + continue; - crtc_state->psr2_man_track_ctl = PSR2_MAN_TRK_CTL_ENABLE | - PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; + /* + * For now doing a selective fetch in the whole plane area, + * optimizations will come in the future. + */ + temp.y1 = new_plane_state->uapi.dst.y1; + temp.y2 = new_plane_state->uapi.dst.y2; + clip_area_update(&pipe_clip, &temp); + } + + psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update); + return 0; } /** @@ -1222,8 +1338,8 @@ void intel_psr_update(struct intel_dp *intel_dp, mutex_lock(&dev_priv->psr.lock); - enable = crtc_state->has_psr && psr_global_enabled(dev_priv); - psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); + enable = crtc_state->has_psr; + psr2_enable = crtc_state->has_psr2; if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ @@ -1320,11 +1436,12 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) { + struct drm_connector_list_iter conn_iter; struct drm_device *dev = &dev_priv->drm; struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; - struct intel_crtc *crtc; - int err; + struct drm_connector *conn; + int err = 0; state = drm_atomic_state_alloc(dev); if (!state) @@ -1334,25 +1451,38 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) state->acquire_ctx = &ctx; retry: - for_each_intel_crtc(dev, crtc) { - struct intel_crtc_state *crtc_state = - intel_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) { - err = PTR_ERR(crtc_state); - goto error; + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(conn, &conn_iter) { + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + + if (conn->connector_type != DRM_MODE_CONNECTOR_eDP) + continue; + + conn_state = drm_atomic_get_connector_state(state, conn); + if (IS_ERR(conn_state)) { + err = PTR_ERR(conn_state); + break; } - if (crtc_state->hw.active && crtc_state->has_psr) { - /* Mark mode as changed to trigger a pipe->update() */ - crtc_state->uapi.mode_changed = true; + if (!conn_state->crtc) + continue; + + crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc); + if (IS_ERR(crtc_state)) { + err = PTR_ERR(crtc_state); break; } + + /* Mark mode as changed to trigger a pipe->update() */ + crtc_state->mode_changed = true; } + drm_connector_list_iter_end(&conn_iter); - err = drm_atomic_commit(state); + if (err == 0) + err = drm_atomic_commit(state); -error: if (err == -EDEADLK) { drm_atomic_state_clear(state); err = drm_modeset_backoff(&ctx); @@ -1754,7 +1884,7 @@ void intel_psr_atomic_check(struct drm_connector *connector, return; intel_connector = to_intel_connector(connector); - dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector)); + dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder)); if (dev_priv->psr.dp != &dig_port->dp) return; diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 6a83c8e682e6..3eca9dcec3c0 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -15,6 +15,8 @@ struct intel_crtc_state; struct intel_dp; struct intel_crtc; struct intel_atomic_state; +struct intel_plane_state; +struct intel_plane; #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) void intel_psr_init_dpcd(struct intel_dp *intel_dp); @@ -45,8 +47,12 @@ void intel_psr_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp); -void intel_psr2_sel_fetch_update(struct intel_atomic_state *state, - struct intel_crtc *crtc); +int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); +void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 5e9fb349c829..4eaa4aa86ecd 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2084,14 +2084,18 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo, static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { - u16 response; + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; + u16 response; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, &response, 2)) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 6b72223981be..b6deeb338477 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -47,6 +47,7 @@ #include "intel_frontbuffer.h" #include "intel_pm.h" #include "intel_psr.h" +#include "intel_dsi.h" #include "intel_sprite.h" int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, @@ -93,6 +94,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) DEFINE_WAIT(wait); u32 psr_status; + if (new_crtc_state->uapi.async_flip) + return; + vblank_start = adjusted_mode->crtc_vblank_start; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) vblank_start = DIV_ROUND_UP(vblank_start, 2); @@ -200,8 +204,19 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) ktime_t end_vbl_time = ktime_get(); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + if (new_crtc_state->uapi.async_flip) + return; + trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end); + /* + * Incase of mipi dsi command mode, we need to set frame update + * request for every commit. + */ + if (INTEL_GEN(dev_priv) >= 11 && + intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) + icl_dsi_frame_update(new_crtc_state); + /* We're still in the vblank-evade critical section, this can't race. * Would be slightly nice to just grab the vblank count and arm the * event outside of the critical section - the spinlock might spin for a @@ -429,6 +444,7 @@ skl_program_scaler(struct intel_plane *plane, u16 y_hphase, uv_rgb_hphase; u16 y_vphase, uv_rgb_vphase; int hscale, vscale; + u32 ps_ctrl; hscale = drm_rect_calc_hscale(&plane_state->uapi.src, &plane_state->uapi.dst, @@ -455,8 +471,13 @@ skl_program_scaler(struct intel_plane *plane, uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); } - intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), - PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode); + ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0); + ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode; + + skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0, + plane_state->hw.scaling_filter); + + intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id), PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id), @@ -604,6 +625,29 @@ icl_program_input_csc(struct intel_plane *plane, } static void +skl_plane_async_flip(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + unsigned long irqflags; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + u32 surf_addr = plane_state->color_plane[0].offset; + u32 plane_ctl = plane_state->ctl; + + plane_ctl |= skl_plane_ctl_crtc(crtc_state); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), + intel_plane_ggtt_offset(plane_state) + surf_addr); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void skl_program_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, @@ -617,8 +661,6 @@ skl_program_plane(struct intel_plane *plane, u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; int aux_plane = intel_main_to_aux_plane(fb, color_plane); - u32 aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr; - u32 aux_stride = skl_plane_stride(plane_state, aux_plane); int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 x = plane_state->color_plane[color_plane].x; @@ -626,7 +668,7 @@ skl_program_plane(struct intel_plane *plane, u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; u8 alpha = plane_state->hw.alpha >> 8; - u32 plane_color_ctl = 0; + u32 plane_color_ctl = 0, aux_dist = 0; unsigned long irqflags; u32 keymsk, keymax; u32 plane_ctl = plane_state->ctl; @@ -653,6 +695,13 @@ skl_program_plane(struct intel_plane *plane, crtc_y = 0; } + if (aux_plane) { + aux_dist = plane_state->color_plane[aux_plane].offset - surf_addr; + + if (INTEL_GEN(dev_priv) < 12) + aux_dist |= skl_plane_stride(plane_state, aux_plane); + } + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); @@ -661,8 +710,6 @@ skl_program_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); - if (INTEL_GEN(dev_priv) < 12) - aux_dist |= aux_stride; intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); if (icl_is_hdr_plane(dev_priv, plane_id)) @@ -690,6 +737,9 @@ skl_program_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x); + if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing @@ -1626,8 +1676,7 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, hscale = drm_rect_calc_hscale(&plane_state->uapi.src, &plane_state->uapi.dst, 0, INT_MAX); - if (hscale < 0x10000) - return pixel_rate; + hscale = max(hscale, 0x10000u); /* Decimation steps at 2x,4x,8x,16x */ decimate = ilog2(hscale >> 16); @@ -1640,8 +1689,8 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, limit -= decimate; /* -10% for RGB */ - if (fb->format->cpp[0] >= 4) - limit--; /* -10% for RGB */ + if (!fb->format->is_yuv) + limit--; /* * We should also do -10% if sprite scaling is enabled @@ -2843,9 +2892,9 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, enum plane_id plane_id) { - /* Wa_14010477008:tgl[a0..c0],rkl[all] */ - if (IS_ROCKETLAKE(dev_priv) || - IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) + /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ + if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) || + IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) return false; return plane_id < PLANE_SPRITE4; @@ -3090,6 +3139,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->get_hw_state = skl_plane_get_hw_state; plane->check_plane = skl_plane_check; plane->min_cdclk = skl_plane_min_cdclk; + plane->async_flip = skl_plane_async_flip; if (INTEL_GEN(dev_priv) >= 11) formats = icl_get_plane_formats(dev_priv, pipe, @@ -3161,6 +3211,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) >= 12) drm_plane_enable_fb_damage_clips(&plane->base); + if (INTEL_GEN(dev_priv) >= 10) + drm_plane_create_scaling_filter_property(&plane->base, + BIT(DRM_SCALING_FILTER_DEFAULT) | + BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 8f67aef18b2d..4346bc1a747a 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -228,9 +228,9 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, return; /* If live status mismatches the VBT flag, trust the live status. */ - drm_err(&i915->drm, - "Port %s: live status %08x mismatches the legacy port flag, fix flag\n", - dig_port->tc_port_name, live_status_mask); + drm_dbg_kms(&i915->drm, + "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n", + dig_port->tc_port_name, live_status_mask, valid_hpd_mask); dig_port->tc_legacy_port = !dig_port->tc_legacy_port; } @@ -652,7 +652,7 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) enum port port = dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(i915, port); - if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE)) + if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE)) return; snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 777032d9697b..7a7b99b015a5 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1706,6 +1706,9 @@ intel_tv_detect(struct drm_connector *connector, drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; + if (force) { struct intel_load_detect_pipe tmp; int ret; diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 6faabd4f6d49..49b4b5fca941 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -293,8 +293,12 @@ struct bdb_general_features { #define DVO_PORT_HDMIE 12 /* 193 */ #define DVO_PORT_DPF 13 /* N/A */ #define DVO_PORT_HDMIF 14 /* N/A */ -#define DVO_PORT_DPG 15 -#define DVO_PORT_HDMIG 16 +#define DVO_PORT_DPG 15 /* 217 */ +#define DVO_PORT_HDMIG 16 /* 217 */ +#define DVO_PORT_DPH 17 /* 217 */ +#define DVO_PORT_HDMIH 18 /* 217 */ +#define DVO_PORT_DPI 19 /* 217 */ +#define DVO_PORT_HDMII 20 /* 217 */ #define DVO_PORT_MIPIA 21 /* 171 */ #define DVO_PORT_MIPIB 22 /* 171 */ #define DVO_PORT_MIPIC 23 /* 171 */ @@ -330,6 +334,8 @@ enum vbt_gmbus_ddi { #define DP_AUX_E 0x50 #define DP_AUX_F 0x60 #define DP_AUX_G 0x70 +#define DP_AUX_H 0x80 +#define DP_AUX_I 0x90 #define VBT_DP_MAX_LINK_RATE_HBR3 0 #define VBT_DP_MAX_LINK_RATE_HBR2 1 @@ -776,7 +782,7 @@ struct lfp_backlight_data_entry { u8 active_low_pwm:1; u8 obsolete1:5; u16 pwm_freq_hz; - u8 min_brightness; + u8 min_brightness; /* Obsolete from 234+ */ u8 obsolete2; u8 obsolete3; } __packed; @@ -786,11 +792,19 @@ struct lfp_backlight_control_method { u8 controller:4; } __packed; +struct lfp_brightness_level { + u16 level; + u16 reserved; +} __packed; + struct bdb_lfp_backlight_data { u8 entry_size; struct lfp_backlight_data_entry data[16]; - u8 level[16]; + u8 level[16]; /* Obsolete from 234+ */ struct lfp_backlight_control_method backlight_control[16]; + struct lfp_brightness_level brightness_level[16]; /* 234+ */ + struct lfp_brightness_level brightness_min_level[16]; /* 234+ */ + u8 brightness_precision_bits[16]; /* 236+ */ } __packed; /* @@ -821,6 +835,7 @@ struct bdb_lfp_power { u16 lace_enabled_status; struct agressiveness_profile_entry aggressivenes[16]; u16 hobl; /* 232+ */ + u16 vrr_feature_enabled; /* 233+ */ } __packed; /* diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 052e0b31a2da..d52f9c177908 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -985,6 +985,13 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); } +static void intel_dsi_shutdown(struct intel_encoder *encoder) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + + intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); +} + static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { @@ -1585,6 +1592,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs }; static const struct drm_connector_funcs intel_dsi_connector_funcs = { + .detect = intel_panel_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, @@ -1842,6 +1850,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_encoder->get_hw_state = intel_dsi_get_hw_state; intel_encoder->get_config = intel_dsi_get_config; intel_encoder->update_pipe = intel_panel_update_backlight; + intel_encoder->shutdown = intel_dsi_shutdown; intel_connector->get_hw_state = intel_connector_get_hw_state; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index d0a514301575..4070b00c3690 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -483,7 +483,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) { drm_err(&dev_priv->drm, - "Cant get a suitable ratio from DSI PLL ratios\n"); + "Can't get a suitable ratio from DSI PLL ratios\n"); return -ECHRNG; } else drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n"); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index cf5ecbde9e06..4fd38101bb56 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx) return rcu_dereference_protected(ctx->engines, true); } -static bool __reset_engine(struct intel_engine_cs *engine) -{ - struct intel_gt *gt = engine->gt; - bool success = false; - - if (!intel_has_reset_engine(gt)) - return false; - - if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) { - success = intel_engine_reset(engine, NULL) == 0; - clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags); - } - - return success; -} - static void __reset_context(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { @@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine) * kill the banned context, we fallback to doing a local reset * instead. */ - if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && - !intel_engine_pulse(engine)) - return true; - - /* If we are unable to send a pulse, try resetting this engine. */ - return __reset_engine(engine); + return intel_engine_pulse(engine) == 0; } static bool @@ -460,8 +437,8 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active) spin_lock(&locked->active.lock); } - if (!i915_request_completed(rq)) { - if (i915_request_is_active(rq) && rq->fence.error != -EIO) + if (i915_request_is_active(rq)) { + if (!i915_request_completed(rq)) *active = locked; ret = true; } @@ -479,13 +456,26 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) if (!ce->timeline) return NULL; + /* + * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference + * to the request to prevent it being transferred to a new timeline + * (and onto a new timeline->requests list). + */ rcu_read_lock(); - list_for_each_entry_rcu(rq, &ce->timeline->requests, link) { - if (i915_request_is_active(rq) && i915_request_completed(rq)) - continue; + list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { + bool found; + + /* timeline is already completed upto this point? */ + if (!i915_request_get_rcu(rq)) + break; /* Check with the backend if the request is inflight */ - if (__active_engine(rq, &engine)) + found = true; + if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) + found = __active_engine(rq, &engine); + + i915_request_put(rq); + if (found) break; } rcu_read_unlock(); @@ -493,7 +483,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) return engine; } -static void kill_engines(struct i915_gem_engines *engines) +static void kill_engines(struct i915_gem_engines *engines, bool ban) { struct i915_gem_engines_iter it; struct intel_context *ce; @@ -508,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines) for_each_gem_engine(ce, engines, it) { struct intel_engine_cs *engine; - if (intel_context_set_banned(ce)) + if (ban && intel_context_set_banned(ce)) continue; /* @@ -521,7 +511,7 @@ static void kill_engines(struct i915_gem_engines *engines) engine = active_engine(ce); /* First attempt to gracefully cancel the context */ - if (engine && !__cancel_engine(engine)) + if (engine && !__cancel_engine(engine) && ban) /* * If we are unable to send a preemptive pulse to bump * the context from the GPU, we have to resort to a full @@ -531,8 +521,10 @@ static void kill_engines(struct i915_gem_engines *engines) } } -static void kill_stale_engines(struct i915_gem_context *ctx) +static void kill_context(struct i915_gem_context *ctx) { + bool ban = (!i915_gem_context_is_persistent(ctx) || + !ctx->i915->params.enable_hangcheck); struct i915_gem_engines *pos, *next; spin_lock_irq(&ctx->stale.lock); @@ -545,7 +537,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx) spin_unlock_irq(&ctx->stale.lock); - kill_engines(pos); + kill_engines(pos, ban); spin_lock_irq(&ctx->stale.lock); GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); @@ -557,11 +549,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx) spin_unlock_irq(&ctx->stale.lock); } -static void kill_context(struct i915_gem_context *ctx) -{ - kill_stale_engines(ctx); -} - static void engines_idle_release(struct i915_gem_context *ctx, struct i915_gem_engines *engines) { @@ -596,7 +583,7 @@ static void engines_idle_release(struct i915_gem_context *ctx, kill: if (list_empty(&engines->link)) /* raced, already closed */ - kill_engines(engines); + kill_engines(engines, true); i915_sw_fence_commit(&engines->fence); } @@ -654,9 +641,7 @@ static void context_close(struct i915_gem_context *ctx) * case we opt to forcibly kill off all remaining requests on * context close. */ - if (!i915_gem_context_is_persistent(ctx) || - !ctx->i915->params.enable_hangcheck) - kill_context(ctx); + kill_context(ctx); i915_gem_context_put(ctx); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 27fddc22a7c6..0dd477e56573 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -48,12 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme src = sg_next(src); } - if (!dma_map_sg_attrs(attachment->dev, - st->sgl, st->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { - ret = -ENOMEM; + ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC); + if (ret) goto err_free_sg; - } return st; @@ -73,23 +70,28 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - dma_unmap_sg_attrs(attachment->dev, - sg->sgl, sg->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sg); kfree(sg); i915_gem_object_unpin_pages(obj); } -static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) +static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + void *vaddr; - return i915_gem_object_pin_map(obj, I915_MAP_WB); + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + dma_buf_map_set_vaddr(map, vaddr); + + return 0; } -static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5509946f1a1d..1904e6e5ea64 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -287,8 +287,8 @@ struct i915_execbuffer { u64 invalid_flags; /** Set of execobj.flags that are invalid */ u32 context_flags; /** Set of execobj.flags to insert from the ctx */ + u64 batch_len; /** Length of batch within object */ u32 batch_start_offset; /** Location within object of batch */ - u32 batch_len; /** Length of batch within object */ u32 batch_flags; /** Flags composed for emit_bb_start() */ struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ @@ -871,6 +871,10 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) if (eb->batch_len == 0) eb->batch_len = eb->batch->vma->size - eb->batch_start_offset; + if (unlikely(eb->batch_len == 0)) { /* impossible! */ + drm_dbg(&i915->drm, "Invalid batch length\n"); + return -EINVAL; + } return 0; @@ -2267,8 +2271,8 @@ struct eb_parse_work { struct i915_vma *batch; struct i915_vma *shadow; struct i915_vma *trampoline; - unsigned int batch_offset; - unsigned int batch_length; + unsigned long batch_offset; + unsigned long batch_length; }; static int __eb_parse(struct dma_fence_work *work) @@ -2338,6 +2342,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, struct eb_parse_work *pw; int err; + GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset)); + GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length)); + pw = kzalloc(sizeof(*pw), GFP_KERNEL); if (!pw) return -ENOMEM; @@ -2421,7 +2428,7 @@ static int eb_parse(struct i915_execbuffer *eb) struct drm_i915_private *i915 = eb->i915; struct intel_gt_buffer_pool_node *pool = eb->batch_pool; struct i915_vma *shadow, *trampoline, *batch; - unsigned int len; + unsigned long len; int err; if (!eb_use_cmdparser(eb)) { @@ -2446,6 +2453,8 @@ static int eb_parse(struct i915_execbuffer *eb) } else { len += I915_CMD_PARSER_TRAMPOLINE_SIZE; } + if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */ + return -EINVAL; if (!pool) { pool = intel_gt_get_buffer_pool(eb->engine->gt, len); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index c8421fd9d2dc..3389ac972d16 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -39,9 +39,18 @@ static struct i915_global_object { struct kmem_cache *slab_objects; } global; +static const struct drm_gem_object_funcs i915_gem_object_funcs; + struct drm_i915_gem_object *i915_gem_object_alloc(void) { - return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); + struct drm_i915_gem_object *obj; + + obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); + if (!obj) + return NULL; + obj->base.funcs = &i915_gem_object_funcs; + + return obj; } void i915_gem_object_free(struct drm_i915_gem_object *obj) @@ -101,7 +110,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); } -void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) +static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) { struct drm_i915_gem_object *obj = to_intel_bo(gem); struct drm_i915_file_private *fpriv = file->driver_priv; @@ -264,7 +273,7 @@ static void __i915_gem_free_work(struct work_struct *work) i915_gem_flush_free_objects(i915); } -void i915_gem_free_object(struct drm_gem_object *gem_obj) +static void i915_gem_free_object(struct drm_gem_object *gem_obj) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_i915_private *i915 = to_i915(obj->base.dev); @@ -403,6 +412,12 @@ int __init i915_global_objects_init(void) return 0; } +static const struct drm_gem_object_funcs i915_gem_object_funcs = { + .free = i915_gem_free_object, + .close = i915_gem_close_object, + .export = i915_gem_prime_export, +}; + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/huge_gem_object.c" #include "selftests/huge_pages.c" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index d46db8d8f38e..eaf3d4147be0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -38,9 +38,6 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); -void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); -void i915_gem_free_object(struct drm_gem_object *obj); - void i915_gem_flush_free_objects(struct drm_i915_private *i915); struct sg_table * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index d93eb36160c9..aee7ad3cc3c6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -364,7 +364,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, vma[1] = i915_vma_instance(dst, vm, NULL); if (IS_ERR(vma[1])) - return PTR_ERR(vma); + return PTR_ERR(vma[1]); i915_gem_ww_ctx_init(&ww, true); intel_engine_pm_get(ce->engine); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e8a083743e09..f60ca6dc911f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -162,8 +162,6 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) { if (is_vmalloc_addr(ptr)) vunmap(ptr); - else - kunmap(kmap_to_page(ptr)); } struct sg_table * @@ -234,50 +232,40 @@ unlock: return err; } -static inline pte_t iomap_pte(resource_size_t base, - dma_addr_t offset, - pgprot_t prot) -{ - return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot)); -} - /* The 'mapping' part of i915_gem_object_pin_map() below */ -static void *i915_gem_object_map(struct drm_i915_gem_object *obj, - enum i915_map_type type) +static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, + enum i915_map_type type) { - unsigned long n_pte = obj->base.size >> PAGE_SHIFT; - struct sg_table *sgt = obj->mm.pages; - pte_t *stack[32], **mem; - struct vm_struct *area; + unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; + struct page *stack[32], **pages = stack, *page; + struct sgt_iter iter; pgprot_t pgprot; - - if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC) - return NULL; - - /* A single page can always be kmapped */ - if (n_pte == 1 && type == I915_MAP_WB) - return kmap(sg_page(sgt->sgl)); - - mem = stack; - if (n_pte > ARRAY_SIZE(stack)) { - /* Too big for stack -- allocate temporary array instead */ - mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL); - if (!mem) - return NULL; - } - - area = alloc_vm_area(obj->base.size, mem); - if (!area) { - if (mem != stack) - kvfree(mem); - return NULL; - } + void *vaddr; switch (type) { default: MISSING_CASE(type); fallthrough; /* to use PAGE_KERNEL anyway */ case I915_MAP_WB: + /* + * On 32b, highmem using a finite set of indirect PTE (i.e. + * vmap) to provide virtual mappings of the high pages. + * As these are finite, map_new_virtual() must wait for some + * other kmap() to finish when it runs out. If we map a large + * number of objects, there is no method for it to tell us + * to release the mappings, and we deadlock. + * + * However, if we make an explicit vmap of the page, that + * uses a larger vmalloc arena, and also has the ability + * to tell us to release unwanted mappings. Most importantly, + * it will fail and propagate an error instead of waiting + * forever. + * + * So if the page is beyond the 32b boundary, make an explicit + * vmap. + */ + if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl))) + return page_address(sg_page(obj->mm.pages->sgl)); pgprot = PAGE_KERNEL; break; case I915_MAP_WC: @@ -285,30 +273,50 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, break; } - if (i915_gem_object_has_struct_page(obj)) { - struct sgt_iter iter; - struct page *page; - pte_t **ptes = mem; + if (n_pages > ARRAY_SIZE(stack)) { + /* Too big for stack -- allocate temporary array instead */ + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return NULL; + } - for_each_sgt_page(page, iter, sgt) - **ptes++ = mk_pte(page, pgprot); - } else { - resource_size_t iomap; - struct sgt_iter iter; - pte_t **ptes = mem; - dma_addr_t addr; + i = 0; + for_each_sgt_page(page, iter, obj->mm.pages) + pages[i++] = page; + vaddr = vmap(pages, n_pages, 0, pgprot); + if (pages != stack) + kvfree(pages); + return vaddr; +} - iomap = obj->mm.region->iomap.base; - iomap -= obj->mm.region->region.start; +static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, + enum i915_map_type type) +{ + resource_size_t iomap = obj->mm.region->iomap.base - + obj->mm.region->region.start; + unsigned long n_pfn = obj->base.size >> PAGE_SHIFT; + unsigned long stack[32], *pfns = stack, i; + struct sgt_iter iter; + dma_addr_t addr; + void *vaddr; + + if (type != I915_MAP_WC) + return NULL; - for_each_sgt_daddr(addr, iter, sgt) - **ptes++ = iomap_pte(iomap, addr, pgprot); + if (n_pfn > ARRAY_SIZE(stack)) { + /* Too big for stack -- allocate temporary array instead */ + pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); + if (!pfns) + return NULL; } - if (mem != stack) - kvfree(mem); - - return area->addr; + i = 0; + for_each_sgt_daddr(addr, iter, obj->mm.pages) + pfns[i++] = (iomap + addr) >> PAGE_SHIFT; + vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); + if (pfns != stack) + kvfree(pfns); + return vaddr; } /* get, pin, and map the pages of the object into kernel space */ @@ -360,7 +368,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, } if (!ptr) { - ptr = i915_gem_object_map(obj, type); + if (GEM_WARN_ON(type == I915_MAP_WC && + !static_cpu_has(X86_FEATURE_PAT))) + ptr = NULL; + else if (i915_gem_object_has_struct_page(obj)) + ptr = i915_gem_object_map_page(obj, type); + else + ptr = i915_gem_object_map_pfn(obj, type); if (!ptr) { err = -ENOMEM; goto err_unpin; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 38113d3c0138..75e8b71c18b9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -258,8 +258,8 @@ shmem_writeback(struct drm_i915_gem_object *obj) for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { struct page *page; - page = find_lock_entry(mapping, i); - if (!page || xa_is_value(page)) + page = find_lock_page(mapping, i); + if (!page) continue; if (!page_mapped(page) && clear_page_dirty_for_io(page)) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 0be5e8683337..84b2707d8b17 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -53,8 +53,10 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *i915, struct drm_mm_node *node, u64 size, unsigned alignment) { - return i915_gem_stolen_insert_node_in_range(i915, node, size, - alignment, 0, U64_MAX); + return i915_gem_stolen_insert_node_in_range(i915, node, + size, alignment, + I915_GEM_STOLEN_BIAS, + U64_MAX); } void i915_gem_stolen_remove_node(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index e15c0adad8af..61e028063f9f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -30,4 +30,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv resource_size_t stolen_offset, resource_size_t size); +#define I915_GEM_STOLEN_BIAS SZ_128K + #endif /* __I915_GEM_STOLEN_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 12b30075134a..f2eaed6aca3d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -403,6 +403,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; unsigned int sg_page_sizes; + struct scatterlist *sg; int ret; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -410,13 +411,12 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOMEM); alloc_table: - ret = __sg_alloc_table_from_pages(st, pvec, num_pages, - 0, num_pages << PAGE_SHIFT, - max_segment, - GFP_KERNEL); - if (ret) { + sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, + num_pages << PAGE_SHIFT, max_segment, + NULL, 0, GFP_KERNEL); + if (IS_ERR(sg)) { kfree(st); - return ERR_PTR(ret); + return ERR_CAST(sg); } ret = i915_gem_gtt_prepare_pages(obj, st); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 5daf4a2be422..1f35e71429b4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1617,7 +1617,7 @@ int i915_gem_huge_page_mock_selftests(void) out_put: i915_vm_put(&ppgtt->vm); out_unlock: - drm_dev_put(&dev_priv->drm); + mock_destroy_device(dev_priv); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 99becb86abd3..d3f87dc4eda3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1997,7 +1997,7 @@ int i915_gem_context_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 2a52b92586b9..b6d43880b0c1 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -82,6 +82,7 @@ static int igt_dmabuf_import(void *arg) struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *obj_map, *dma_map; + struct dma_buf_map map; u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; int err, i; @@ -110,7 +111,8 @@ static int igt_dmabuf_import(void *arg) goto out_obj; } - dma_map = dma_buf_vmap(dmabuf); + err = dma_buf_vmap(dmabuf, &map); + dma_map = err ? NULL : map.vaddr; if (!dma_map) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; @@ -150,7 +152,7 @@ static int igt_dmabuf_import(void *arg) err = 0; out_dma_map: - dma_buf_vunmap(dmabuf, dma_map); + dma_buf_vunmap(dmabuf, &map); out_obj: i915_gem_object_put(obj); out_dmabuf: @@ -163,6 +165,7 @@ static int igt_dmabuf_import_ownership(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; + struct dma_buf_map map; void *ptr; int err; @@ -170,7 +173,8 @@ static int igt_dmabuf_import_ownership(void *arg) if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); - ptr = dma_buf_vmap(dmabuf); + err = dma_buf_vmap(dmabuf, &map); + ptr = err ? NULL : map.vaddr; if (!ptr) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; @@ -178,7 +182,7 @@ static int igt_dmabuf_import_ownership(void *arg) } memset(ptr, 0xc5, PAGE_SIZE); - dma_buf_vunmap(dmabuf, ptr); + dma_buf_vunmap(dmabuf, &map); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); if (IS_ERR(obj)) { @@ -212,6 +216,7 @@ static int igt_dmabuf_export_vmap(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; + struct dma_buf_map map; void *ptr; int err; @@ -228,7 +233,8 @@ static int igt_dmabuf_export_vmap(void *arg) } i915_gem_object_put(obj); - ptr = dma_buf_vmap(dmabuf); + err = dma_buf_vmap(dmabuf, &map); + ptr = err ? NULL : map.vaddr; if (!ptr) { pr_err("dma_buf_vmap failed\n"); err = -ENOMEM; @@ -244,7 +250,7 @@ static int igt_dmabuf_export_vmap(void *arg) memset(ptr, 0xc5, dmabuf->size); err = 0; - dma_buf_vunmap(dmabuf, ptr); + dma_buf_vunmap(dmabuf, &map); out: dma_buf_put(dmabuf); return err; @@ -272,7 +278,7 @@ int i915_gem_dmabuf_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c index faa5b6d91795..bf853c40ec65 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c @@ -85,7 +85,7 @@ int i915_gem_object_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c index a94243dc4c5c..8cee68c6a6dc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -73,6 +73,6 @@ int i915_gem_phys_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index debaf7b18ab5..2855d11c7a51 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -28,10 +28,9 @@ static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, sg = sg_next(sg); } - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { - err = -ENOMEM; + err = dma_map_sgtable(attachment->dev, st, dir, 0); + if (err) goto err_st; - } return st; @@ -46,7 +45,7 @@ static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *st, enum dma_data_direction dir) { - dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir); + dma_unmap_sgtable(attachment->dev, st, dir, 0); sg_free_table(st); kfree(st); } @@ -62,18 +61,24 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf) kfree(mock); } -static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) +static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); + void *vaddr; + + vaddr = vm_map_ram(mock->pages, mock->npages, 0); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); - return vm_map_ram(mock->pages, mock->npages, 0); + return 0; } -static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); - vm_unmap_ram(vaddr, mock->npages); + vm_unmap_ram(map->vaddr, mock->npages); } static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index fd0d24d28763..c30adc05fa98 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -239,18 +239,24 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) I915_CACHE_NONE, PTE_READ_ONLY); vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); - if (IS_ERR(vm->scratch[1])) - return PTR_ERR(vm->scratch[1]); + if (IS_ERR(vm->scratch[1])) { + ret = PTR_ERR(vm->scratch[1]); + goto err_scratch0; + } ret = pin_pt_dma(vm, vm->scratch[1]); - if (ret) { - i915_gem_object_put(vm->scratch[1]); - return ret; - } + if (ret) + goto err_scratch1; fill32_px(vm->scratch[1], vm->scratch[0]->encode); return 0; + +err_scratch1: + i915_gem_object_put(vm->scratch[1]); +err_scratch0: + i915_gem_object_put(vm->scratch[0]); + return ret; } static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index eb64f474a78c..38c7069b7749 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -604,7 +604,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) return 0; free_scratch: - free_scratch(vm); + while (i--) + i915_gem_object_put(vm->scratch[i]); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index d301dda1b261..92a3f25c4006 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -472,6 +472,7 @@ retry: err = i915_gem_ww_ctx_backoff(&ww); if (!err) goto retry; + rq = ERR_PTR(err); } else { rq = ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 08e2c000dcc3..7c3a1012e702 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -337,4 +337,13 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) return intel_engine_has_preemption(engine); } +static inline bool +intel_engine_has_heartbeat(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + return false; + + return READ_ONCE(engine->props.heartbeat_interval_ms); +} + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 8ffdf676c0a0..5067d0524d4b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine) INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat); } +static int __intel_engine_pulse(struct intel_engine_cs *engine) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + + lockdep_assert_held(&ce->timeline->mutex); + GEM_BUG_ON(!intel_engine_has_preemption(engine)); + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); + idle_pulse(engine, rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); + + return 0; +} + +static unsigned long set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay) +{ + unsigned long old; + + old = xchg(&engine->props.heartbeat_interval_ms, delay); + if (delay) + intel_engine_unpark_heartbeat(engine); + else + intel_engine_park_heartbeat(engine); + + return old; +} + int intel_engine_set_heartbeat(struct intel_engine_cs *engine, unsigned long delay) { - int err; + struct intel_context *ce = engine->kernel_context; + int err = 0; - /* Send one last pulse before to cleanup persistent hogs */ - if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) { - err = intel_engine_pulse(engine); - if (err) - return err; - } + if (!delay && !intel_engine_has_preempt_reset(engine)) + return -ENODEV; + + intel_engine_pm_get(engine); + + err = mutex_lock_interruptible(&ce->timeline->mutex); + if (err) + goto out_rpm; - WRITE_ONCE(engine->props.heartbeat_interval_ms, delay); + if (delay != engine->props.heartbeat_interval_ms) { + unsigned long saved = set_heartbeat(engine, delay); - if (intel_engine_pm_get_if_awake(engine)) { - if (delay) - intel_engine_unpark_heartbeat(engine); - else - intel_engine_park_heartbeat(engine); - intel_engine_pm_put(engine); + /* recheck current execution */ + if (intel_engine_has_preemption(engine)) { + err = __intel_engine_pulse(engine); + if (err) + set_heartbeat(engine, saved); + } } - return 0; + mutex_unlock(&ce->timeline->mutex); + +out_rpm: + intel_engine_pm_put(engine); + return err; } int intel_engine_pulse(struct intel_engine_cs *engine) { - struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; struct intel_context *ce = engine->kernel_context; - struct i915_request *rq; int err; if (!intel_engine_has_preemption(engine)) @@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine) if (!intel_engine_pm_get_if_awake(engine)) return 0; - if (mutex_lock_interruptible(&ce->timeline->mutex)) { - err = -EINTR; - goto out_rpm; - } - - intel_context_enter(ce); - rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); - intel_context_exit(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_unlock; + err = -EINTR; + if (!mutex_lock_interruptible(&ce->timeline->mutex)) { + err = __intel_engine_pulse(engine); + mutex_unlock(&ce->timeline->mutex); } - __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); - idle_pulse(engine, rq); - - __i915_request_commit(rq); - __i915_request_queue(rq, &attr); - GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); - err = 0; - -out_unlock: - mutex_unlock(&ce->timeline->mutex); -out_rpm: intel_engine_pm_put(engine); return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index c400aaa2287b..ee6312601c56 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -278,7 +278,7 @@ struct intel_engine_execlists { * * Note these register may be either mmio or HWSP shadow. */ - u32 *csb_status; + u64 *csb_status; /** * @csb_size: context status buffer FIFO size diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 81c05f551b9c..188a5f70177d 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -835,7 +835,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) u16 snb_gmch_ctl; /* TODO: We're not aware of mappable constraints on gen8 yet */ - if (!IS_DGFX(i915)) { + if (!HAS_LMEM(i915)) { ggtt->gmadr = pci_resource(pdev, 2); ggtt->mappable_end = resource_size(&ggtt->gmadr); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index 4b7671ac5dca..104cb30e8c13 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -134,6 +134,7 @@ static void pool_retire(struct i915_active *ref) /* Return this object to the shrinker pool */ i915_gem_object_make_purgeable(node->obj); + GEM_BUG_ON(node->age); spin_lock_irqsave(&pool->lock, flags); list_add_rcu(&node->link, list); WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ @@ -155,6 +156,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz) if (!node) return ERR_PTR(-ENOMEM); + node->age = 0; node->pool = pool; i915_active_init(&node->active, pool_active, pool_retire); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 0412a44f25f2..a32aabce7901 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1140,9 +1140,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) /* Check in case we rollback so far we wrap [size/2] */ if (intel_ring_direction(rq->ring, - intel_ring_wrap(rq->ring, - rq->tail), - rq->ring->tail) > 0) + rq->tail, + rq->ring->tail + 8) > 0) rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; active = rq; @@ -2464,7 +2463,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists) } static inline void -invalidate_csb_entries(const u32 *first, const u32 *last) +invalidate_csb_entries(const u64 *first, const u64 *last) { clflush((void *)first); clflush((void *)last); @@ -2496,14 +2495,25 @@ invalidate_csb_entries(const u32 *first, const u32 *last) * bits 47-57: sw context id of the lrc the GT switched away from * bits 58-63: sw counter of the lrc the GT switched away from */ -static inline bool -gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) -{ - u32 lower_dw = csb[0]; - u32 upper_dw = csb[1]; - bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw); - bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw); - bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; +static inline bool gen12_csb_parse(const u64 *csb) +{ + bool ctx_away_valid; + bool new_queue; + u64 entry; + + /* HSD#22011248461 */ + entry = READ_ONCE(*csb); + if (unlikely(entry == -1)) { + preempt_disable(); + if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 50)) + GEM_WARN_ON("50us CSB timeout"); + preempt_enable(); + } + WRITE_ONCE(*(u64 *)csb, -1); + + ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(entry)); + new_queue = + lower_32_bits(entry) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; /* * The context switch detail is not guaranteed to be 5 when a preemption @@ -2513,7 +2523,7 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) * would require some extra handling, but we don't support that. */ if (!ctx_away_valid || new_queue) { - GEM_BUG_ON(!ctx_to_valid); + GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(entry))); return true; } @@ -2522,12 +2532,11 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) * context switch on an unsuccessful wait instruction since we always * use polling mode. */ - GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw)); + GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(entry))); return false; } -static inline bool -gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) +static inline bool gen8_csb_parse(const u64 *csb) { return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); } @@ -2535,7 +2544,7 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - const u32 * const buf = execlists->csb_status; + const u64 * const buf = execlists->csb_status; const u8 num_entries = execlists->csb_size; u8 head, tail; @@ -2616,12 +2625,14 @@ static void process_csb(struct intel_engine_cs *engine) */ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", - head, buf[2 * head + 0], buf[2 * head + 1]); + head, + upper_32_bits(buf[head]), + lower_32_bits(buf[head])); if (INTEL_GEN(engine->i915) >= 12) - promote = gen12_csb_parse(execlists, buf + 2 * head); + promote = gen12_csb_parse(buf + head); else - promote = gen8_csb_parse(execlists, buf + 2 * head); + promote = gen8_csb_parse(buf + head); if (promote) { struct i915_request * const *old = execlists->active; @@ -2649,6 +2660,9 @@ static void process_csb(struct intel_engine_cs *engine) smp_wmb(); /* complete the seqlock */ WRITE_ONCE(execlists->active, execlists->inflight); + /* XXX Magic delay for tgl */ + ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); + WRITE_ONCE(execlists->pending[0], NULL); } else { if (GEM_WARN_ON(!*execlists->active)) { @@ -4005,6 +4019,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) WRITE_ONCE(*execlists->csb_write, reset_value); wmb(); /* Make sure this is visible to HW (paranoia?) */ + /* Check that the GPU does indeed update the CSB entries! */ + memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64)); invalidate_csb_entries(&execlists->csb_status[0], &execlists->csb_status[reset_value]); @@ -5157,7 +5173,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) } execlists->csb_status = - &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; + (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; execlists->csb_write = &engine->status_page.addr[intel_hws_csb_write_index(i915)]; diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 632e08a4592b..39179a3eee98 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -109,7 +109,7 @@ struct drm_i915_mocs_table { * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for * PTE and will be initialized to an invalid value. * - * The last two entries are reserved by the hardware. For ICL+ they + * The last few entries are reserved by the hardware. For ICL+ they * should be initialized according to bspec and never used, for older * platforms they should never be written to. * @@ -234,11 +234,17 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { L3_1_UC) static const struct drm_i915_mocs_entry tgl_mocs_table[] = { - /* Base - Error (Reserved for Non-Use) */ - MOCS_ENTRY(0, 0x0, 0x0), - /* Base - Reserved */ - MOCS_ENTRY(1, 0x0, 0x0), - + /* + * NOTE: + * Reserved and unspecified MOCS indices have been set to (L3 + LCC). + * These reserved entries should never be used, they may be changed + * to low performant variants with better coherency in the future if + * more entries are needed. We are programming index I915_MOCS_PTE(1) + * only, __init_mocs_table() take care to program unused index with + * this entry. + */ + MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_3_WB), GEN11_MOCS_ENTRIES, /* Implicitly enable L1 - HDC:L1 + L3 + LLC */ @@ -280,6 +286,39 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = { GEN11_MOCS_ENTRIES }; +static const struct drm_i915_mocs_entry dg1_mocs_table[] = { + /* Error */ + MOCS_ENTRY(0, 0, L3_0_DIRECT), + + /* UC */ + MOCS_ENTRY(1, 0, L3_1_UC), + + /* Reserved */ + MOCS_ENTRY(2, 0, L3_0_DIRECT), + MOCS_ENTRY(3, 0, L3_0_DIRECT), + MOCS_ENTRY(4, 0, L3_0_DIRECT), + + /* WB - L3 */ + MOCS_ENTRY(5, 0, L3_3_WB), + /* WB - L3 50% */ + MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB), + /* WB - L3 25% */ + MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB), + /* WB - L3 12.5% */ + MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB), + + /* HDC:L1 + L3 */ + MOCS_ENTRY(48, 0, L3_3_WB), + /* HDC:L1 */ + MOCS_ENTRY(49, 0, L3_1_UC), + + /* HW Reserved */ + MOCS_ENTRY(60, 0, L3_1_UC), + MOCS_ENTRY(61, 0, L3_1_UC), + MOCS_ENTRY(62, 0, L3_1_UC), + MOCS_ENTRY(63, 0, L3_1_UC), +}; + enum { HAS_GLOBAL_MOCS = BIT(0), HAS_ENGINE_MOCS = BIT(1), @@ -306,7 +345,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, { unsigned int flags; - if (INTEL_GEN(i915) >= 12) { + if (IS_DG1(i915)) { + table->size = ARRAY_SIZE(dg1_mocs_table); + table->table = dg1_mocs_table; + table->n_entries = GEN11_NUM_MOCS_ENTRIES; + } else if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index e6a00eea0631..466ec671b379 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -390,6 +390,16 @@ static void gen5_rps_update(struct intel_rps *rps) spin_unlock_irq(&mchdev_lock); } +static unsigned int gen5_invert_freq(struct intel_rps *rps, + unsigned int val) +{ + /* Invert the frequency bin into an ips delay */ + val = rps->max_freq - val; + val = rps->min_freq + val; + + return val; +} + static bool gen5_rps_set(struct intel_rps *rps, u8 val) { struct intel_uncore *uncore = rps_to_uncore(rps); @@ -404,8 +414,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val) } /* Invert the frequency bin into an ips delay */ - val = rps->max_freq - val; - val = rps->min_freq + val; + val = gen5_invert_freq(rps, val); rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | @@ -500,6 +509,7 @@ static unsigned int init_emon(struct intel_uncore *uncore) static bool gen5_rps_enable(struct intel_rps *rps) { + struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); u8 fstart, vstart; u32 rgvmodectl; @@ -557,6 +567,10 @@ static bool gen5_rps_enable(struct intel_rps *rps) rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); rps->ips.last_time2 = ktime_get_raw_ns(); + spin_lock(&i915->irq_lock); + ilk_enable_display_irq(i915, DE_PCU_EVENT); + spin_unlock(&i915->irq_lock); + spin_unlock_irq(&mchdev_lock); rps->ips.corr = init_emon(uncore); @@ -566,11 +580,16 @@ static bool gen5_rps_enable(struct intel_rps *rps) static void gen5_rps_disable(struct intel_rps *rps) { + struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); u16 rgvswctl; spin_lock_irq(&mchdev_lock); + spin_lock(&i915->irq_lock); + ilk_disable_display_irq(i915, DE_PCU_EVENT); + spin_unlock(&i915->irq_lock); + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); /* Ack interrupts, disable EFC interrupt */ @@ -578,11 +597,6 @@ static void gen5_rps_disable(struct intel_rps *rps) intel_uncore_read(uncore, MEMINTREN) & ~MEMINT_EVAL_CHG_EN); intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); - intel_uncore_write(uncore, DEIER, - intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); - intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); - intel_uncore_write(uncore, DEIMR, - intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ gen5_rps_set(rps, rps->idle_freq); @@ -1272,8 +1286,9 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips) { struct intel_rps *rps = container_of(ips, typeof(*rps), ips); struct intel_uncore *uncore = rps_to_uncore(rps); - unsigned long t, corr, state1, corr2, state2; + unsigned int t, state1, state2; u32 pxvid, ext_v; + u64 corr, corr2; lockdep_assert_held(&mchdev_lock); @@ -1294,11 +1309,10 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips) else /* < 50 */ corr = t * 301 + 1004; - corr = corr * 150142 * state1 / 10000 - 78642; - corr /= 100000; - corr2 = corr * ips->corr; + corr = div_u64(corr * 150142 * state1, 10000) - 78642; + corr2 = div_u64(corr, 100000) * ips->corr; - state2 = corr2 * state1 / 10000; + state2 = div_u64(corr2 * state1, 10000); state2 /= 100; /* convert to mW */ __gen5_ips_update(ips); @@ -1432,8 +1446,10 @@ int intel_gpu_freq(struct intel_rps *rps, int val) return chv_gpu_freq(rps, val); else if (IS_VALLEYVIEW(i915)) return byt_gpu_freq(rps, val); - else + else if (INTEL_GEN(i915) >= 6) return val * GT_FREQUENCY_MULTIPLIER; + else + return val; } int intel_freq_opcode(struct intel_rps *rps, int val) @@ -1447,8 +1463,10 @@ int intel_freq_opcode(struct intel_rps *rps, int val) return chv_freq_opcode(rps, val); else if (IS_VALLEYVIEW(i915)) return byt_freq_opcode(rps, val); - else + else if (INTEL_GEN(i915) >= 6) return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); + else + return val; } static void vlv_init_gpll_ref_freq(struct intel_rps *rps) @@ -1864,8 +1882,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; - else + else if (INTEL_GEN(i915) >= 6) cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; + else + cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >> + MEMSTAT_PSTATE_SHIFT); return cagf; } @@ -1873,14 +1894,17 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) static u32 read_cagf(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); u32 freq; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { vlv_punit_get(i915); freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); vlv_punit_put(i915); + } else if (INTEL_GEN(i915) >= 6) { + freq = intel_uncore_read(uncore, GEN6_RPSTAT1); } else { - freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1); + freq = intel_uncore_read(uncore, MEMSTAT_ILK); } return intel_rps_get_cagf(rps, freq); diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index f1c039e1b5ad..8a72e0fe34ca 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -169,7 +169,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt) u8 eu_en; u8 s_en; - if (IS_ELKHARTLAKE(gt->i915)) + if (IS_JSL_EHL(gt->i915)) intel_sseu_set_info(sseu, 1, 4, 8); else intel_sseu_set_info(sseu, 1, 8, 8); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index a3f72b75c61e..fed9503a7c4e 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -70,6 +70,19 @@ const struct i915_rev_steppings kbl_revids[] = { [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 }, }; +const struct i915_rev_steppings tgl_uy_revids[] = { + [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 }, + [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 }, + [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 }, + [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 }, +}; + +/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ +const struct i915_rev_steppings tgl_revids[] = { + [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 }, + [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 }, +}; + static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) { wal->name = name; @@ -659,6 +672,20 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, 0); } +static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + gen12_ctx_workarounds_init(engine, wal); + + /* Wa_1409044764 */ + WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, + DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN); + + /* Wa_22010493298 */ + WA_SET_BIT_MASKED(HIZ_CHICKEN, + DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); +} + static void __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, struct i915_wa_list *wal, @@ -671,7 +698,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, wa_init_start(wal, name, engine->name); - if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) + if (IS_DG1(i915)) + dg1_ctx_workarounds_init(engine, wal); + else if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) tgl_ctx_workarounds_init(engine, wal); else if (IS_GEN(i915, 12)) gen12_ctx_workarounds_init(engine, wal); @@ -1199,7 +1228,7 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) /* Wa_1607087056:icl,ehl,jsl */ if (IS_ICELAKE(i915) || - IS_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) { + IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0)) { wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1219,22 +1248,48 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) gen12_gt_workarounds_init(i915, wal); /* Wa_1409420604:tgl */ - if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); } static void +dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen12_gt_workarounds_init(i915, wal); + + /* Wa_1607087056:dg1 */ + if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0)) + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); + + /* Wa_1409420604:dg1 */ + if (IS_DG1(i915)) + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE2, + CPSSUNIT_CLKGATE_DIS); + + /* Wa_1408615072:dg1 */ + /* Empirical testing shows this register is unaffected by engine reset. */ + if (IS_DG1(i915)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + VSUNIT_CLKGATE_DIS_TGL); +} + +static void gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) { - if (IS_TIGERLAKE(i915)) + if (IS_DG1(i915)) + dg1_gt_workarounds_init(i915, wal); + else if (IS_TIGERLAKE(i915)) tgl_gt_workarounds_init(i915, wal); else if (IS_GEN(i915, 12)) gen12_gt_workarounds_init(i915, wal); @@ -1599,6 +1654,20 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) } } +static void dg1_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + tgl_whitelist_build(engine); + + /* GEN:BUG:1409280441:dg1 */ + if (IS_DG1_REVID(engine->i915, DG1_REVID_A0, DG1_REVID_A0) && + (engine->class == RENDER_CLASS || + engine->class == COPY_ENGINE_CLASS)) + whitelist_reg_ext(w, RING_ID(engine->mmio_base), + RING_FORCE_TO_NONPRIV_ACCESS_RD); +} + void intel_engine_init_whitelist(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -1606,7 +1675,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) wa_init_start(w, "whitelist", engine->name); - if (IS_GEN(i915, 12)) + if (IS_DG1(i915)) + dg1_whitelist_build(engine); + else if (IS_GEN(i915, 12)) tgl_whitelist_build(engine); else if (IS_GEN(i915, 11)) icl_whitelist_build(engine); @@ -1660,15 +1731,18 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { + if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) || + IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { /* - * Wa_1607138336:tgl - * Wa_1607063988:tgl + * Wa_1607138336:tgl[a0],dg1[a0] + * Wa_1607063988:tgl[a0],dg1[a0] */ wa_write_or(wal, GEN9_CTX_PREEMPT_REG, GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); + } + if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { /* * Wa_1606679103:tgl * (see also Wa_1606682166:icl) @@ -1682,49 +1756,63 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) VSUNIT_CLKGATE_DIS_TGL); } - if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { - /* Wa_1606931601:tgl,rkl */ + if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { + /* Wa_1606931601:tgl,rkl,dg1 */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ); - /* Wa_1409804808:tgl,rkl */ + /* + * Wa_1407928979:tgl A* + * Wa_18011464164:tgl[B0+],dg1[B0+] + * Wa_22010931296:tgl[B0+],dg1[B0+] + * Wa_14010919138:rkl, dg1 + */ + wa_write_or(wal, GEN7_FF_THREAD_MODE, + GEN12_FF_TESSELATION_DOP_GATE_DISABLE); + } + + if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) || + IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { + /* Wa_1409804808:tgl,rkl,dg1[a0] */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_PUSH_CONST_DEREF_HOLD_DIS); /* * Wa_1409085225:tgl - * Wa_14010229206:tgl,rkl + * Wa_14010229206:tgl,rkl,dg1[a0] */ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); /* - * Wa_1407928979:tgl A* - * Wa_18011464164:tgl B0+ - * Wa_22010931296:tgl B0+ - * Wa_14010919138:rkl - */ - wa_write_or(wal, GEN7_FF_THREAD_MODE, - GEN12_FF_TESSELATION_DOP_GATE_DISABLE); - - /* * Wa_1607030317:tgl * Wa_1607186500:tgl - * Wa_1607297627:tgl,rkl there are multiple entries for this - * WA in the BSpec; some indicate this is an A0-only WA, - * others indicate it applies to all steppings. + * Wa_1607297627:tgl,rkl,dg1[a0] + * + * On TGL and RKL there are multiple entries for this WA in the + * BSpec; some indicate this is an A0-only WA, others indicate + * it applies to all steppings so we trust the "all steppings." + * For DG1 this only applies to A0. */ wa_masked_en(wal, GEN6_RC_SLEEP_PSMI_CONTROL, GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | GEN8_RC_SEMA_IDLE_MSG_DISABLE); - } - if (IS_TIGERLAKE(i915)) { - /* Wa_1606700617:tgl */ + /* + * Wa_1606700617:tgl + * Wa_22010271021:tgl,rkl + */ wa_masked_en(wal, GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE); } + if (IS_GEN(i915, 12)) { + /* Wa_1406941453:gen12 */ + wa_masked_en(wal, + GEN10_SAMPLER_MODE, + ENABLE_SMALLPL); + } + if (IS_GEN(i915, 11)) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, @@ -1818,7 +1906,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN12_FF_TESSELATION_DOP_GATE_DISABLE); /* Wa_22010271021:ehl */ - if (IS_ELKHARTLAKE(i915)) + if (IS_JSL_EHL(i915)) wa_masked_en(wal, GEN9_CS_DEBUG_MODE1, FF_DOP_CLOCK_GATE_DISABLE); @@ -2010,10 +2098,12 @@ err_obj: return ERR_PTR(err); } -static const struct { +struct mcr_range { u32 start; u32 end; -} mcr_ranges_gen8[] = { +}; + +static const struct mcr_range mcr_ranges_gen8[] = { { .start = 0x5500, .end = 0x55ff }, { .start = 0x7000, .end = 0x7fff }, { .start = 0x9400, .end = 0x97ff }, @@ -2022,11 +2112,25 @@ static const struct { {}, }; +static const struct mcr_range mcr_ranges_gen12[] = { + { .start = 0x8150, .end = 0x815f }, + { .start = 0x9520, .end = 0x955f }, + { .start = 0xb100, .end = 0xb3ff }, + { .start = 0xde80, .end = 0xe8ff }, + { .start = 0x24a00, .end = 0x24a7f }, + {}, +}; + static bool mcr_range(struct drm_i915_private *i915, u32 offset) { + const struct mcr_range *mcr_ranges; int i; - if (INTEL_GEN(i915) < 8) + if (INTEL_GEN(i915) >= 12) + mcr_ranges = mcr_ranges_gen12; + else if (INTEL_GEN(i915) >= 8) + mcr_ranges = mcr_ranges_gen8; + else return false; /* @@ -2034,9 +2138,9 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) * which only controls CPU initiated MMIO. Routing does not * work for CS access so we cannot verify them on this path. */ - for (i = 0; mcr_ranges_gen8[i].start; i++) - if (offset >= mcr_ranges_gen8[i].start && - offset <= mcr_ranges_gen8[i].end) + for (i = 0; mcr_ranges[i].start; i++) + if (offset >= mcr_ranges[i].start && + offset <= mcr_ranges[i].end) return true; return false; diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 35406ecdf0b2..ef5aeebbeeb0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -3,9 +3,203 @@ * Copyright © 2018 Intel Corporation */ +#include <linux/crc32.h> + +#include "gem/i915_gem_stolen.h" + +#include "i915_memcpy.h" #include "i915_selftest.h" #include "selftests/igt_reset.h" #include "selftests/igt_atomic.h" +#include "selftests/igt_spinner.h" + +static int +__igt_reset_stolen(struct intel_gt *gt, + intel_engine_mask_t mask, + const char *msg) +{ + struct i915_ggtt *ggtt = >->i915->ggtt; + const struct resource *dsm = >->i915->dsm; + resource_size_t num_pages, page; + struct intel_engine_cs *engine; + intel_wakeref_t wakeref; + enum intel_engine_id id; + struct igt_spinner spin; + long max, count; + void *tmp; + u32 *crc; + int err; + + if (!drm_mm_node_allocated(&ggtt->error_capture)) + return 0; + + num_pages = resource_size(dsm) >> PAGE_SHIFT; + if (!num_pages) + return 0; + + crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL); + if (!crc) + return -ENOMEM; + + tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto err_crc; + } + + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + err = igt_spinner_init(&spin, gt); + if (err) + goto err_lock; + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + struct i915_request *rq; + + if (!(mask & engine->mask)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto err_spin; + } + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_spin; + } + i915_request_add(rq); + } + + for (page = 0; page < num_pages; page++) { + dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT); + void __iomem *s; + void *in; + + ggtt->vm.insert_page(&ggtt->vm, dma, + ggtt->error_capture.start, + I915_CACHE_NONE, 0); + mb(); + + s = io_mapping_map_wc(&ggtt->iomap, + ggtt->error_capture.start, + PAGE_SIZE); + + if (!__drm_mm_interval_first(>->i915->mm.stolen, + page << PAGE_SHIFT, + ((page + 1) << PAGE_SHIFT) - 1)) + memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); + + in = s; + if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE)) + in = tmp; + crc[page] = crc32_le(0, in, PAGE_SIZE); + + io_mapping_unmap(s); + } + mb(); + ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); + + if (mask == ALL_ENGINES) { + intel_gt_reset(gt, mask, NULL); + } else { + for_each_engine(engine, gt, id) { + if (mask & engine->mask) + intel_engine_reset(engine, NULL); + } + } + + max = -1; + count = 0; + for (page = 0; page < num_pages; page++) { + dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT); + void __iomem *s; + void *in; + u32 x; + + ggtt->vm.insert_page(&ggtt->vm, dma, + ggtt->error_capture.start, + I915_CACHE_NONE, 0); + mb(); + + s = io_mapping_map_wc(&ggtt->iomap, + ggtt->error_capture.start, + PAGE_SIZE); + + in = s; + if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE)) + in = tmp; + x = crc32_le(0, in, PAGE_SIZE); + + if (x != crc[page] && + !__drm_mm_interval_first(>->i915->mm.stolen, + page << PAGE_SHIFT, + ((page + 1) << PAGE_SHIFT) - 1)) { + pr_debug("unused stolen page %pa modified by GPU reset\n", + &page); + if (count++ == 0) + igt_hexdump(in, PAGE_SIZE); + max = page; + } + + io_mapping_unmap(s); + } + mb(); + ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); + + if (count > 0) { + pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n", + msg, count, max); + } + if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) { + pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n", + msg, I915_GEM_STOLEN_BIAS); + err = -EINVAL; + } + +err_spin: + igt_spinner_fini(&spin); + +err_lock: + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + igt_global_reset_unlock(gt); + + kfree(tmp); +err_crc: + kfree(crc); + return err; +} + +static int igt_reset_device_stolen(void *arg) +{ + return __igt_reset_stolen(arg, ALL_ENGINES, "device"); +} + +static int igt_reset_engines_stolen(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + if (!intel_has_reset_engine(gt)) + return 0; + + for_each_engine(engine, gt, id) { + err = __igt_reset_stolen(gt, engine->mask, engine->name); + if (err) + return err; + } + + return 0; +} static int igt_global_reset(void *arg) { @@ -164,6 +358,8 @@ int intel_reset_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_global_reset), /* attempt to recover GPU first */ + SUBTEST(igt_reset_device_stolen), + SUBTEST(igt_reset_engines_stolen), SUBTEST(igt_wedged_reset), SUBTEST(igt_atomic_reset), SUBTEST(igt_atomic_engine_reset), diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 96d164a3841d..19c2cb166e7c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -158,7 +158,7 @@ out: __mock_hwsp_record(&state, na, NULL); kfree(state.history); err_put: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index 43c7acbdc79d..f011ea42487e 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -49,80 +49,40 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) return file; } -static size_t shmem_npte(struct file *file) -{ - return file->f_mapping->host->i_size >> PAGE_SHIFT; -} - -static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte) -{ - unsigned long pfn; - - vunmap(ptr); - - for (pfn = 0; pfn < n_pte; pfn++) { - struct page *page; - - page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, - GFP_KERNEL); - if (!WARN_ON(IS_ERR(page))) { - put_page(page); - put_page(page); - } - } -} - void *shmem_pin_map(struct file *file) { - const size_t n_pte = shmem_npte(file); - pte_t *stack[32], **ptes, **mem; - struct vm_struct *area; - unsigned long pfn; - - mem = stack; - if (n_pte > ARRAY_SIZE(stack)) { - mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL); - if (!mem) - return NULL; - } + struct page **pages; + size_t n_pages, i; + void *vaddr; - area = alloc_vm_area(n_pte << PAGE_SHIFT, mem); - if (!area) { - if (mem != stack) - kvfree(mem); + n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT; + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) return NULL; - } - ptes = mem; - for (pfn = 0; pfn < n_pte; pfn++) { - struct page *page; - - page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, - GFP_KERNEL); - if (IS_ERR(page)) + for (i = 0; i < n_pages; i++) { + pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i, + GFP_KERNEL); + if (IS_ERR(pages[i])) goto err_page; - - **ptes++ = mk_pte(page, PAGE_KERNEL); } - if (mem != stack) - kvfree(mem); - + vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL); + if (!vaddr) + goto err_page; mapping_set_unevictable(file->f_mapping); - return area->addr; - + return vaddr; err_page: - if (mem != stack) - kvfree(mem); - - __shmem_unpin_map(file, area->addr, pfn); + while (--i >= 0) + put_page(pages[i]); + kvfree(pages); return NULL; } void shmem_unpin_map(struct file *file, void *ptr) { mapping_clear_unevictable(file->f_mapping); - __shmem_unpin_map(file, ptr, shmem_npte(file)); + vfree(ptr); } static int __shmem_rw(struct file *file, loff_t off, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 942c7c187adb..e4aaa5f29796 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -312,18 +312,18 @@ void intel_guc_write_params(struct intel_guc *guc) int i; /* - * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and + * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and * they are power context saved so it's ok to release forcewake * when we are done here and take it again at xfer time. */ - intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER); + intel_uncore_forcewake_get(uncore, FORCEWAKE_GT); intel_uncore_write(uncore, SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]); - intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER); + intel_uncore_forcewake_put(uncore, FORCEWAKE_GT); } int intel_guc_init(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 80e8b6c3bc8c..037bcaf3c8b5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \ + fw_def(JASPERLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d0a599b51bfe..16b582cb97ed 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -936,7 +936,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, return -EFAULT; } - if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { + if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { gvt_vgpu_err("%s access to non-render register (%x)\n", cmd, offset); return -EBADRQC; @@ -976,7 +976,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, * inhibit context will restore with correct values */ if (IS_GEN(s->engine->i915, 9) && - intel_gvt_mmio_is_in_ctx(gvt, offset) && + intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { intel_gvt_hypervisor_read_gpa(s->vgpu, s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); @@ -992,8 +992,6 @@ static int cmd_reg_handler(struct parser_exec_state *s, } } - /* TODO: Update the global mask if this MMIO is a masked-MMIO */ - intel_gvt_mmio_set_cmd_accessed(gvt, offset); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 7ba16ddfe75f..5b5c71a0b4af 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -173,23 +173,24 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) int pipe; if (IS_BROXTON(dev_priv)) { - vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | - BXT_DE_PORT_HP_DDIB | - BXT_DE_PORT_HP_DDIC); + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | + GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | + GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= - BXT_DE_PORT_HP_DDIA; + GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= - BXT_DE_PORT_HP_DDIB; + GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= - BXT_DE_PORT_HP_DDIC; + GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); } return; @@ -327,7 +328,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (IS_BROADWELL(dev_priv)) vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= - GEN8_PORT_DP_A_HOTPLUG; + GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); else vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index ff7f2515a6fe..9831361f181e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -256,11 +256,11 @@ struct intel_gvt_mmio { /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) /* This reg has been accessed through GPU commands */ -#define F_CMD_ACCESSED (1 << 5) -/* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) -/* This reg is saved/restored in context */ -#define F_IN_CTX (1 << 7) +/* This reg is in GVT's mmio save-restor list and in hardware + * logical context image + */ +#define F_SR_IN_CTX (1 << 7) struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; @@ -597,39 +597,42 @@ static inline void intel_gvt_mmio_set_accessed( } /** - * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command + * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command * @gvt: a GVT device * @offset: register offset * + * Returns: + * True if an MMIO is able to be accessed by GPU commands */ -static inline bool intel_gvt_mmio_is_cmd_access( +static inline bool intel_gvt_mmio_is_cmd_accessible( struct intel_gvt *gvt, unsigned int offset) { return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; } /** - * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned + * intel_gvt_mmio_set_cmd_accessible - + * mark a MMIO could be accessible by command * @gvt: a GVT device * @offset: register offset * */ -static inline bool intel_gvt_mmio_is_unalign( +static inline void intel_gvt_mmio_set_cmd_accessible( struct intel_gvt *gvt, unsigned int offset) { - return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; + gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS; } /** - * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command + * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned * @gvt: a GVT device * @offset: register offset * */ -static inline void intel_gvt_mmio_set_cmd_accessed( +static inline bool intel_gvt_mmio_is_unalign( struct intel_gvt *gvt, unsigned int offset) { - gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED; + return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; } /** @@ -648,30 +651,33 @@ static inline bool intel_gvt_mmio_has_mode_mask( } /** - * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask + * intel_gvt_mmio_is_sr_in_ctx - + * check if an MMIO has F_SR_IN_CTX mask * @gvt: a GVT device * @offset: register offset * * Returns: - * True if a MMIO has a in-context mask, false if it isn't. + * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't. * */ -static inline bool intel_gvt_mmio_is_in_ctx( +static inline bool intel_gvt_mmio_is_sr_in_ctx( struct intel_gvt *gvt, unsigned int offset) { - return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX; + return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX; } /** - * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context + * intel_gvt_mmio_set_sr_in_ctx - + * mask an MMIO in GVT's mmio save-restore list and also + * in hardware logical context image * @gvt: a GVT device * @offset: register offset * */ -static inline void intel_gvt_mmio_set_in_ctx( +static inline void intel_gvt_mmio_set_sr_in_ctx( struct intel_gvt *gvt, unsigned int offset) { - gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; + gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX; } void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 05f3bc98d242..c7cf15fe9ef6 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -290,8 +290,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; break; - case FORCEWAKE_BLITTER_GEN9_REG: - ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG; + case FORCEWAKE_GT_GEN9_REG: + ack_reg_offset = FORCEWAKE_ACK_GT_GEN9_REG; break; case FORCEWAKE_MEDIA_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG; @@ -1892,7 +1892,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; - MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, + MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); @@ -1900,7 +1900,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); MMIO_D(SDEISR, D_ALL); - MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); + MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL); + MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL, gamw_echo_dev_rw_ia_write); @@ -1927,11 +1928,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); MMIO_D(GEN7_CXT_SIZE, D_ALL); - MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL); - MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); + MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL); + MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL); + MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL); + MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL); + MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); /* RING MODE */ #define RING_REG(base) _MMIO((base) + 0x29c) @@ -2208,9 +2209,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(PF_VSCALE(PIPE_C), D_ALL); MMIO_D(PF_HSCALE(PIPE_C), D_ALL); - MMIO_D(WM0_PIPEA_ILK, D_ALL); - MMIO_D(WM0_PIPEB_ILK, D_ALL); - MMIO_D(WM0_PIPEC_IVB, D_ALL); + MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL); + MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL); + MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL); MMIO_D(WM1_LP_ILK, D_ALL); MMIO_D(WM2_LP_ILK, D_ALL); MMIO_D(WM3_LP_ILK, D_ALL); @@ -2686,7 +2687,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL); + MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL); MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL); @@ -2771,7 +2772,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, intel_vgpu_reg_master_irq_handler); - MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, + MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0, mmio_read_from_hw, NULL); #define RING_REG(base) _MMIO((base) + 0xd0) @@ -2785,7 +2786,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) #undef RING_REG #define RING_REG(base) _MMIO((base) + 0x234) - MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, + MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); #undef RING_REG @@ -2820,7 +2821,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG - MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); + MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write); MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); @@ -2900,8 +2901,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL); - MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); - MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL); + MMIO_DH(FORCEWAKE_GT_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); + MMIO_DH(FORCEWAKE_ACK_GT_GEN9, D_SKL_PLUS, NULL, NULL); MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); @@ -2921,7 +2922,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL); + MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); MMIO_D(DC_STATE_EN, D_SKL_PLUS); MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS); @@ -3137,7 +3138,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL); + MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); return 0; @@ -3357,7 +3358,10 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) gvt->mmio.mmio_attribute = NULL; } -/* Special MMIO blocks. */ +/* Special MMIO blocks. registers in MMIO block ranges should not be command + * accessible (should have no F_CMD_ACCESS flag). + * otherwise, need to update cmd_reg_handler in cmd_parser.c + */ static struct gvt_mmio_block mmio_blocks[] = { {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 291993615af9..b6811f6a230d 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -251,6 +251,9 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; + /* uc reset hw expect GS_MIA_IN_RESET */ + vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; + if (IS_BROXTON(vgpu->gvt->gt->i915)) { vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 86a60bdf0818..afe574d6b3b5 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -595,7 +595,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->in_context) { gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; - intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); + intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg); } } } diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index b88e033cbed4..b58860dee970 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -101,8 +101,8 @@ #define FORCEWAKE_RENDER_GEN9_REG 0xa278 #define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84 -#define FORCEWAKE_BLITTER_GEN9_REG 0xa188 -#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044 +#define FORCEWAKE_GT_GEN9_REG 0xa188 +#define FORCEWAKE_ACK_GT_GEN9_REG 0x130044 #define FORCEWAKE_MEDIA_GEN9_REG 0xa270 #define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88 #define FORCEWAKE_ACK_HSW_REG 0x130044 diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 8fa9b31a2484..f6d7e33c7099 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -368,6 +368,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params *param) { + struct drm_i915_private *dev_priv = gvt->gt->i915; struct intel_vgpu *vgpu; int ret; @@ -436,7 +437,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); + if (IS_BROADWELL(dev_priv)) + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); + else + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index b0a6522be3d1..10a865f3dc09 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -83,7 +83,7 @@ static void *active_debug_hint(void *addr) return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; } -static struct debug_obj_descr active_debug_desc = { +static const struct debug_obj_descr active_debug_desc = { .name = "i915_active", .debug_hint = active_debug_hint, }; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 5ac4a999f05a..e88970256e8e 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr) /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, struct drm_i915_gem_object *src_obj, - u32 offset, u32 length) + unsigned long offset, unsigned long length) { bool needs_clflush; void *dst, *src; @@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, } } if (IS_ERR(src)) { + unsigned long x, n; void *ptr; - int x, n; /* * We can avoid clflushing partial cachelines before the write @@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ptr = dst; x = offset_in_page(offset); for (n = offset >> PAGE_SHIFT; length; n++) { - int len = min_t(int, length, PAGE_SIZE - x); + int len = min(length, PAGE_SIZE - x); src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); if (needs_clflush) @@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj) */ int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, - u32 batch_offset, - u32 batch_length, + unsigned long batch_offset, + unsigned long batch_length, struct i915_vma *shadow, bool trampoline) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 784219962193..200f6b86f864 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -326,6 +326,7 @@ static void print_context_stats(struct seq_file *m, } i915_gem_context_unlock_engines(ctx); + mutex_lock(&ctx->mutex); if (!IS_ERR_OR_NULL(ctx->file_priv)) { struct file_stats stats = { .vm = rcu_access_pointer(ctx->vm), @@ -346,6 +347,7 @@ static void print_context_stats(struct seq_file *m, print_file_stats(m, name, stats); } + mutex_unlock(&ctx->mutex); spin_lock(&i915->gem.contexts.lock); list_safe_reset_next(ctx, cn, link); @@ -784,7 +786,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) struct intel_uncore *uncore = &dev_priv->uncore; struct intel_rps *rps = &dev_priv->gt.rps; intel_wakeref_t wakeref; - int ret = 0; wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); @@ -1007,7 +1008,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - return ret; + return 0; } static int i915_ring_freq_table(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 00292a849c34..d6e25212d5c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -58,7 +58,6 @@ #include "display/intel_hotplug.h" #include "display/intel_overlay.h" #include "display/intel_pipe_crc.h" -#include "display/intel_psr.h" #include "display/intel_sprite.h" #include "display/intel_vga.h" @@ -85,6 +84,7 @@ #include "intel_gvt.h" #include "intel_memory_region.h" #include "intel_pm.h" +#include "intel_sideband.h" #include "vlv_suspend.h" static struct drm_driver driver; @@ -216,125 +216,6 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv) release_resource(&dev_priv->mch_res); } -/* part #1: call before irq install */ -static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915) -{ - int ret; - - if (i915_inject_probe_failure(i915)) - return -ENODEV; - - if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { - ret = drm_vblank_init(&i915->drm, - INTEL_NUM_PIPES(i915)); - if (ret) - return ret; - } - - intel_bios_init(i915); - - ret = intel_vga_register(i915); - if (ret) - goto cleanup_bios; - - intel_power_domains_init_hw(i915, false); - - intel_csr_ucode_init(i915); - - ret = intel_modeset_init_noirq(i915); - if (ret) - goto cleanup_vga_client_pw_domain_csr; - - return 0; - -cleanup_vga_client_pw_domain_csr: - intel_csr_ucode_fini(i915); - intel_power_domains_driver_remove(i915); - intel_vga_unregister(i915); -cleanup_bios: - intel_bios_driver_remove(i915); - return ret; -} - -/* part #2: call after irq install */ -static int i915_driver_modeset_probe(struct drm_i915_private *i915) -{ - int ret; - - /* Important: The output setup functions called by modeset_init need - * working irqs for e.g. gmbus and dp aux transfers. */ - ret = intel_modeset_init(i915); - if (ret) - goto out; - - ret = i915_gem_init(i915); - if (ret) - goto cleanup_modeset; - - intel_overlay_setup(i915); - - if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915)) - return 0; - - ret = intel_fbdev_init(&i915->drm); - if (ret) - goto cleanup_gem; - - /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(i915); - - intel_init_ipc(i915); - - intel_psr_set_force_mode_changed(i915->psr.dp); - - return 0; - -cleanup_gem: - i915_gem_suspend(i915); - i915_gem_driver_remove(i915); - i915_gem_driver_release(i915); -cleanup_modeset: - /* FIXME */ - intel_modeset_driver_remove(i915); - intel_irq_uninstall(i915); - intel_modeset_driver_remove_noirq(i915); -out: - return ret; -} - -/* part #1: call before irq uninstall */ -static void i915_driver_modeset_remove(struct drm_i915_private *i915) -{ - intel_modeset_driver_remove(i915); -} - -/* part #2: call after irq uninstall */ -static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915) -{ - intel_csr_ucode_fini(i915); - - intel_power_domains_driver_remove(i915); - - intel_vga_unregister(i915); - - intel_bios_driver_remove(i915); -} - -static void intel_init_dpio(struct drm_i915_private *dev_priv) -{ - /* - * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), - * CHV x1 PHY (DP/HDMI D) - * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) - */ - if (IS_CHERRYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; - DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; - } else if (IS_VALLEYVIEW(dev_priv)) { - DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; - } -} - static int i915_workqueues_init(struct drm_i915_private *dev_priv) { /* @@ -463,7 +344,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_detect_pch(dev_priv); intel_pm_setup(dev_priv); - intel_init_dpio(dev_priv); ret = intel_power_domains_init(dev_priv); if (ret < 0) goto err_gem; @@ -737,6 +617,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) */ intel_dram_detect(dev_priv); + intel_pcode_init(dev_priv); + intel_bw_init_hw(dev_priv); return 0; @@ -789,7 +671,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) /* Reveal our presence to userspace */ if (drm_dev_register(dev, 0) == 0) { i915_debugfs_register(dev_priv); - intel_display_debugfs_register(dev_priv); + if (HAS_DISPLAY(dev_priv)) + intel_display_debugfs_register(dev_priv); i915_setup_sysfs(dev_priv); /* Depends on sysfs having been initialized */ @@ -798,7 +681,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "Failed to register driver for userspace access!\n"); - if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { + if (HAS_DISPLAY(dev_priv)) { /* Must be done after probing outputs */ intel_opregion_register(dev_priv); acpi_video_register(); @@ -821,7 +704,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * We need to coordinate the hotplugs with the asynchronous fbdev * configuration, for which we use the fbdev->async_cookie. */ - if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) + if (HAS_DISPLAY(dev_priv)) drm_kms_helper_poll_init(dev); intel_power_domains_enable(dev_priv); @@ -961,9 +844,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i915->params.fake_lmem_start) { mkwrite_device_info(i915)->memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN; - mkwrite_device_info(i915)->is_dgfx = true; GEM_BUG_ON(!HAS_LMEM(i915)); - GEM_BUG_ON(!IS_DGFX(i915)); } } #endif @@ -988,7 +869,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_cleanup_mmio; - ret = i915_driver_modeset_probe_noirq(i915); + ret = intel_modeset_init_noirq(i915); if (ret < 0) goto out_cleanup_hw; @@ -996,10 +877,18 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_cleanup_modeset; - ret = i915_driver_modeset_probe(i915); - if (ret < 0) + ret = intel_modeset_init_nogem(i915); + if (ret) goto out_cleanup_irq; + ret = i915_gem_init(i915); + if (ret) + goto out_cleanup_modeset2; + + ret = intel_modeset_init(i915); + if (ret) + goto out_cleanup_gem; + i915_driver_register(i915); enable_rpm_wakeref_asserts(&i915->runtime_pm); @@ -1010,10 +899,20 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +out_cleanup_gem: + i915_gem_suspend(i915); + i915_gem_driver_remove(i915); + i915_gem_driver_release(i915); +out_cleanup_modeset2: + /* FIXME clean up the error path */ + intel_modeset_driver_remove(i915); + intel_irq_uninstall(i915); + intel_modeset_driver_remove_noirq(i915); + goto out_cleanup_modeset; out_cleanup_irq: intel_irq_uninstall(i915); out_cleanup_modeset: - i915_driver_modeset_remove_noirq(i915); + intel_modeset_driver_remove_nogem(i915); out_cleanup_hw: i915_driver_hw_remove(i915); intel_memory_regions_driver_release(i915); @@ -1045,7 +944,7 @@ void i915_driver_remove(struct drm_i915_private *i915) intel_gvt_driver_remove(i915); - i915_driver_modeset_remove(i915); + intel_modeset_driver_remove(i915); intel_irq_uninstall(i915); @@ -1054,7 +953,7 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_reset_error_state(i915); i915_gem_driver_remove(i915); - i915_driver_modeset_remove_noirq(i915); + intel_modeset_driver_remove_nogem(i915); i915_driver_hw_remove(i915); @@ -1139,6 +1038,35 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv) drm_modeset_unlock_all(dev); } +static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct intel_encoder *encoder; + + drm_modeset_lock_all(dev); + for_each_intel_encoder(dev, encoder) + if (encoder->shutdown) + encoder->shutdown(encoder); + drm_modeset_unlock_all(dev); +} + +void i915_driver_shutdown(struct drm_i915_private *i915) +{ + i915_gem_suspend(i915); + + drm_kms_helper_poll_disable(&i915->drm); + + drm_atomic_helper_shutdown(&i915->drm); + + intel_dp_mst_suspend(i915); + + intel_runtime_pm_disable_interrupts(i915); + intel_hpd_cancel_work(i915); + + intel_suspend_encoders(i915); + intel_shutdown_encoders(i915); +} + static bool suspend_to_idle(struct drm_i915_private *dev_priv) { #if IS_ENABLED(CONFIG_ACPI_SLEEP) @@ -1192,7 +1120,7 @@ static int i915_drm_suspend(struct drm_device *dev) i915_ggtt_suspend(&dev_priv->ggtt); - i915_save_state(dev_priv); + i915_save_display(dev_priv); opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_suspend(dev_priv, opregion_target_state); @@ -1305,7 +1233,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_csr_ucode_resume(dev_priv); - i915_restore_state(dev_priv); + i915_restore_display(dev_priv); intel_pps_unlock_regs_wa(dev_priv); intel_init_pch_refclk(dev_priv); @@ -1328,26 +1256,15 @@ static int i915_drm_resume(struct drm_device *dev) intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); + intel_hpd_init(dev_priv); - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - + /* MST sideband requires HPD interrupts enabled */ intel_dp_mst_resume(dev_priv); - intel_display_resume(dev); + intel_hpd_poll_disable(dev_priv); drm_kms_helper_poll_enable(dev); - /* - * ... but also need to make sure that hotplug processing - * doesn't cause havoc. Like in the driver load code we don't - * bother with the tiny race here where we might lose hotplug - * notifications. - * */ - intel_hpd_init(dev_priv); - intel_opregion_resume(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); @@ -1659,7 +1576,7 @@ static int intel_runtime_suspend(struct device *kdev) assert_forcewakes_inactive(&dev_priv->uncore); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) - intel_hpd_poll_init(dev_priv); + intel_hpd_poll_enable(dev_priv); drm_dbg_kms(&dev_priv->drm, "Device suspended\n"); return 0; @@ -1704,8 +1621,10 @@ static int intel_runtime_resume(struct device *kdev) * power well, so hpd is reinitialized from there. For * everyone else do it here. */ - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { intel_hpd_init(dev_priv); + intel_hpd_poll_disable(dev_priv); + } intel_enable_ipc(dev_priv); @@ -1853,12 +1772,8 @@ static struct drm_driver driver = { .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, - .gem_close_object = i915_gem_close_object, - .gem_free_object_unlocked = i915_gem_free_object, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = i915_gem_prime_export, .gem_prime_import = i915_gem_prime_import, .dumb_create = i915_gem_dumb_create, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ab17084af0ff..d7765b31fbef 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -33,6 +33,8 @@ #include <uapi/drm/i915_drm.h> #include <uapi/drm/drm_fourcc.h> +#include <asm/hypervisor.h> + #include <linux/io-mapping.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> @@ -108,18 +110,11 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20200824" -#define DRIVER_TIMESTAMP 1598293597 +#define DRIVER_DATE "20201103" +#define DRIVER_TIMESTAMP 1604406085 struct drm_i915_gem_object; -/* - * The code assumes that the hpd_pins below have consecutive values and - * starting with HPD_PORT_A, the HPD pin associated with any port can be - * retrieved by adding the corresponding port (or phy) enum value to - * HPD_PORT_A in most cases. For example: - * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A - */ enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ @@ -131,10 +126,12 @@ enum hpd_pin { HPD_PORT_C, HPD_PORT_D, HPD_PORT_E, - HPD_PORT_F, - HPD_PORT_G, - HPD_PORT_H, - HPD_PORT_I, + HPD_PORT_TC1, + HPD_PORT_TC2, + HPD_PORT_TC3, + HPD_PORT_TC4, + HPD_PORT_TC5, + HPD_PORT_TC6, HPD_NUM_PINS }; @@ -537,13 +534,9 @@ struct intel_gmbus { struct i915_suspend_saved_registers { u32 saveDSPARB; - u32 saveFBC_CONTROL; - u32 saveCACHE_MODE_0; - u32 saveMI_ARB_STATE; u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF3[3]; - u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -1020,8 +1013,6 @@ struct drm_i915_private { */ u8 active_pipes; - int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; - struct i915_wa_list gt_wa_list; struct i915_frontbuffer_tracking fb_tracking; @@ -1428,7 +1419,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) -#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) +#define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ + IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1) @@ -1569,15 +1561,44 @@ extern const struct i915_rev_steppings kbl_revids[]; #define EHL_REVID_A0 0x0 -#define IS_EHL_REVID(p, since, until) \ - (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until)) +#define IS_JSL_EHL_REVID(p, since, until) \ + (IS_JSL_EHL(p) && IS_REVID(p, since, until)) -#define TGL_REVID_A0 0x0 -#define TGL_REVID_B0 0x1 -#define TGL_REVID_C0 0x2 +enum { + TGL_REVID_A0, + TGL_REVID_B0, + TGL_REVID_B1, + TGL_REVID_C0, + TGL_REVID_D0, +}; -#define IS_TGL_REVID(p, since, until) \ - (IS_TIGERLAKE(p) && IS_REVID(p, since, until)) +extern const struct i915_rev_steppings tgl_uy_revids[]; +extern const struct i915_rev_steppings tgl_revids[]; + +static inline const struct i915_rev_steppings * +tgl_revids_get(struct drm_i915_private *dev_priv) +{ + if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) + return tgl_uy_revids; + else + return tgl_revids; +} + +#define IS_TGL_DISP_REVID(p, since, until) \ + (IS_TIGERLAKE(p) && \ + tgl_revids_get(p)->disp_stepping >= (since) && \ + tgl_revids_get(p)->disp_stepping <= (until)) + +#define IS_TGL_UY_GT_REVID(p, since, until) \ + ((IS_TGL_U(p) || IS_TGL_Y(p)) && \ + tgl_uy_revids->gt_stepping >= (since) && \ + tgl_uy_revids->gt_stepping <= (until)) + +#define IS_TGL_GT_REVID(p, since, until) \ + (IS_TIGERLAKE(p) && \ + !(IS_TGL_U(p) || IS_TGL_Y(p)) && \ + tgl_revids->gt_stepping >= (since) && \ + tgl_revids->gt_stepping <= (until)) #define RKL_REVID_A0 0x0 #define RKL_REVID_B0 0x1 @@ -1742,7 +1763,9 @@ static inline bool intel_vtd_active(void) if (intel_iommu_gfx_mapped) return true; #endif - return false; + + /* Running as a guest, we assume the host is enforcing VT'd */ + return !hypervisor_is_type(X86_HYPER_NATIVE); } static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) @@ -1761,6 +1784,7 @@ extern const struct dev_pm_ops i915_pm_ops; int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); void i915_driver_remove(struct drm_i915_private *i915); +void i915_driver_shutdown(struct drm_i915_private *i915); int i915_resume_switcheroo(struct drm_i915_private *i915); int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); @@ -1931,8 +1955,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); int intel_engine_cmd_parser(struct intel_engine_cs *engine, struct i915_vma *batch, - u32 batch_offset, - u32 batch_length, + unsigned long batch_offset, + unsigned long batch_length, struct i915_vma *shadow, bool trampoline); #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 3e6cbb0d1150..cf6e47adfde6 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -311,6 +311,8 @@ static int compress_page(struct i915_vma_compress *c, if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) return -EIO; + + cond_resched(); } while (zstream->avail_in); /* Fallback to uncompressed if we increase size? */ @@ -397,6 +399,7 @@ static int compress_page(struct i915_vma_compress *c, if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; + cond_resched(); return 0; } @@ -1309,7 +1312,7 @@ capture_vma(struct intel_engine_capture_vma *next, } strcpy(c->name, name); - c->vma = i915_vma_get(vma); + c->vma = vma; /* reference held while active */ c->next = next; return c; @@ -1399,7 +1402,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, compress)); i915_active_release(&vma->active); - i915_vma_put(vma); capture = this->next; kfree(this); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f113fe44572b..e0eb32bd9607 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -61,6 +61,8 @@ */ typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); +typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, + enum hpd_pin pin); static const u32 hpd_ilk[HPD_NUM_PINS] = { [HPD_PORT_A] = DE_DP_A_HOTPLUG, @@ -71,7 +73,7 @@ static const u32 hpd_ivb[HPD_NUM_PINS] = { }; static const u32 hpd_bdw[HPD_NUM_PINS] = { - [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, + [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), }; static const u32 hpd_ibx[HPD_NUM_PINS] = { @@ -126,46 +128,37 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = { }; static const u32 hpd_bxt[HPD_NUM_PINS] = { - [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, - [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, - [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC, + [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), + [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), + [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), }; static const u32 hpd_gen11[HPD_NUM_PINS] = { - [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, - [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, - [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, - [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, -}; - -static const u32 hpd_gen12[HPD_NUM_PINS] = { - [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, - [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, - [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, - [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, - [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG, - [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG, + [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), + [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), + [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), + [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), + [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), + [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), }; static const u32 hpd_icp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), - [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), - [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), - [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2), - [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3), - [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4), + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), + [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), + [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), + [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), + [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), + [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), + [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), + [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), }; -static const u32 hpd_tgp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), - [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), - [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C), - [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1), - [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2), - [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3), - [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4), - [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5), - [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6), +static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), + [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), + [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), }; static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) @@ -181,9 +174,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) return; } - if (INTEL_GEN(dev_priv) >= 12) - hpd->hpd = hpd_gen12; - else if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 11) hpd->hpd = hpd_gen11; else if (IS_GEN9_LP(dev_priv)) hpd->hpd = hpd_bxt; @@ -194,12 +185,14 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) else hpd->hpd = hpd_ilk; - if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)) + if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && + (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) return; - if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv)) - hpd->pch_hpd = hpd_tgp; - else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv)) + if (HAS_PCH_DG1(dev_priv)) + hpd->pch_hpd = hpd_sde_dg1; + else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) || + HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv)) hpd->pch_hpd = hpd_icp; else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) hpd->pch_hpd = hpd_spt; @@ -356,17 +349,14 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) - return; - new_val = dev_priv->irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->irq_mask) { + if (new_val != dev_priv->irq_mask && + !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { dev_priv->irq_mask = new_val; I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); @@ -701,8 +691,12 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc) u32 g4x_get_vblank_counter(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; enum pipe pipe = to_intel_crtc(crtc)->pipe; + if (!vblank->max_vblank_count) + return 0; + return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -1049,34 +1043,18 @@ out: static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { - case HPD_PORT_C: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); - case HPD_PORT_D: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); - case HPD_PORT_E: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); - case HPD_PORT_F: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); - default: - return false; - } -} - -static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_D: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); - case HPD_PORT_E: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); - case HPD_PORT_F: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); - case HPD_PORT_G: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); - case HPD_PORT_H: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5); - case HPD_PORT_I: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6); + case HPD_PORT_TC1: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1); + case HPD_PORT_TC2: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2); + case HPD_PORT_TC3: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3); + case HPD_PORT_TC4: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4); + case HPD_PORT_TC5: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5); + case HPD_PORT_TC6: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6); default: return false; } @@ -1100,47 +1078,33 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A); + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A); case HPD_PORT_B: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B); + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B); case HPD_PORT_C: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C); - default: - return false; - } -} - -static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_C: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C); case HPD_PORT_D: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); - case HPD_PORT_E: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); - case HPD_PORT_F: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D); default: return false; } } -static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { - case HPD_PORT_D: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); - case HPD_PORT_E: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); - case HPD_PORT_F: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); - case HPD_PORT_G: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); - case HPD_PORT_H: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5); - case HPD_PORT_I: - return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6); + case HPD_PORT_TC1: + return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1); + case HPD_PORT_TC2: + return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2); + case HPD_PORT_TC3: + return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3); + case HPD_PORT_TC4: + return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4); + case HPD_PORT_TC5: + return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5); + case HPD_PORT_TC6: + return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6); default: return false; } @@ -1243,6 +1207,43 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, } +static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, + const u32 hpd[HPD_NUM_PINS]) +{ + struct intel_encoder *encoder; + u32 enabled_irqs = 0; + + for_each_intel_encoder(&dev_priv->drm, encoder) + if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) + enabled_irqs |= hpd[encoder->hpd_pin]; + + return enabled_irqs; +} + +static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, + const u32 hpd[HPD_NUM_PINS]) +{ + struct intel_encoder *encoder; + u32 hotplug_irqs = 0; + + for_each_intel_encoder(&dev_priv->drm, encoder) + hotplug_irqs |= hpd[encoder->hpd_pin]; + + return hotplug_irqs; +} + +static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, + hotplug_enables_func hotplug_enables) +{ + struct intel_encoder *encoder; + u32 hotplug = 0; + + for_each_intel_encoder(&i915->drm, encoder) + hotplug |= hotplug_enables(i915, encoder->hpd_pin); + + return hotplug; +} + static void gmbus_irq_handler(struct drm_i915_private *dev_priv) { wake_up_all(&dev_priv->gmbus_wait_queue); @@ -1296,6 +1297,23 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, u32 crc4) {} #endif +static void flip_done_handler(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe); + struct drm_crtc_state *crtc_state = crtc->base.state; + struct drm_pending_vblank_event *e = crtc_state->event; + struct drm_device *dev = &i915->drm; + unsigned long irqflags; + + spin_lock_irqsave(&dev->event_lock, irqflags); + + crtc_state->event = NULL; + + drm_crtc_send_vblank_event(&crtc->base, e); + + spin_unlock_irqrestore(&dev->event_lock, irqflags); +} static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -1891,30 +1909,9 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - u32 ddi_hotplug_trigger, tc_hotplug_trigger; + u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; + u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; u32 pin_mask = 0, long_mask = 0; - bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val); - - if (HAS_PCH_TGP(dev_priv)) { - ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; - tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; - tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect; - } else if (HAS_PCH_JSP(dev_priv)) { - ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; - tc_hotplug_trigger = 0; - } else if (HAS_PCH_MCC(dev_priv)) { - ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; - tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1); - tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; - } else { - drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv), - "Unrecognized PCH type 0x%x\n", - INTEL_PCH_TYPE(dev_priv)); - - ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; - tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; - tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; - } if (ddi_hotplug_trigger) { u32 dig_hotplug_reg; @@ -1937,7 +1934,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, dev_priv->hotplug.pch_hpd, - tc_port_hotplug_long_detect); + icp_tc_port_hotplug_long_detect); } if (pin_mask) @@ -2185,12 +2182,6 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) u32 pin_mask = 0, long_mask = 0; u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; - long_pulse_detect_func long_pulse_detect; - - if (INTEL_GEN(dev_priv) >= 12) - long_pulse_detect = gen12_port_hotplug_long_detect; - else - long_pulse_detect = gen11_port_hotplug_long_detect; if (trigger_tc) { u32 dig_hotplug_reg; @@ -2201,7 +2192,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, dev_priv->hotplug.hpd, - long_pulse_detect); + gen11_port_hotplug_long_detect); } if (trigger_tbt) { @@ -2213,7 +2204,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, dev_priv->hotplug.hpd, - long_pulse_detect); + gen11_port_hotplug_long_detect); } if (pin_mask) @@ -2298,6 +2289,63 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); } +static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, + u32 te_trigger) +{ + enum pipe pipe = INVALID_PIPE; + enum transcoder dsi_trans; + enum port port; + u32 val, tmp; + + /* + * Incase of dual link, TE comes from DSI_1 + * this is to check if dual link is enabled + */ + val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); + val &= PORT_SYNC_MODE_ENABLE; + + /* + * if dual link is enabled, then read DSI_0 + * transcoder registers + */ + port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? + PORT_A : PORT_B; + dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; + + /* Check if DSI configured in command mode */ + val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)); + val = val & OP_MODE_MASK; + + if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { + drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); + return; + } + + /* Get PIPE for handling VBLANK event */ + val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + switch (val & TRANS_DDI_EDP_INPUT_MASK) { + case TRANS_DDI_EDP_INPUT_A_ON: + pipe = PIPE_A; + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + pipe = PIPE_B; + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + pipe = PIPE_C; + break; + default: + drm_err(&dev_priv->drm, "Invalid PIPE\n"); + return; + } + + intel_handle_vblank(dev_priv, pipe); + + /* clear TE in dsi IIR */ + port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; + tmp = I915_READ(DSI_INTR_IDENT_REG(port)); + I915_WRITE(DSI_INTR_IDENT_REG(port), tmp); +} + static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { @@ -2332,7 +2380,6 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (master_ctl & GEN8_DE_PORT_IRQ) { iir = I915_READ(GEN8_DE_PORT_IIR); if (iir) { - u32 tmp_mask; bool found = false; I915_WRITE(GEN8_DE_PORT_IIR, iir); @@ -2344,15 +2391,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) } if (IS_GEN9_LP(dev_priv)) { - tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; - if (tmp_mask) { - bxt_hpd_irq_handler(dev_priv, tmp_mask); + u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; + + if (hotplug_trigger) { + bxt_hpd_irq_handler(dev_priv, hotplug_trigger); found = true; } } else if (IS_BROADWELL(dev_priv)) { - tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; - if (tmp_mask) { - ilk_hpd_irq_handler(dev_priv, tmp_mask); + u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; + + if (hotplug_trigger) { + ilk_hpd_irq_handler(dev_priv, hotplug_trigger); found = true; } } @@ -2362,6 +2411,15 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) found = true; } + if (INTEL_GEN(dev_priv) >= 11) { + u32 te_trigger = iir & (DSI0_TE | DSI1_TE); + + if (te_trigger) { + gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); + found = true; + } + } + if (!found) drm_err(&dev_priv->drm, "Unexpected DE Port interrupt\n"); @@ -2390,6 +2448,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (iir & GEN8_PIPE_VBLANK) intel_handle_vblank(dev_priv, pipe); + if (iir & GEN9_PIPE_PLANE1_FLIP_DONE) + flip_done_handler(dev_priv, pipe); + if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); @@ -2692,12 +2753,47 @@ int ilk_enable_vblank(struct drm_crtc *crtc) return 0; } +static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, + bool enable) +{ + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + enum port port; + u32 tmp; + + if (!(intel_crtc->mode_flags & + (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) + return false; + + /* for dual link cases we consider TE from slave */ + if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) + port = PORT_B; + else + port = PORT_A; + + tmp = I915_READ(DSI_INTR_MASK_REG(port)); + if (enable) + tmp &= ~DSI_TE_EVENT; + else + tmp |= DSI_TE_EVENT; + + I915_WRITE(DSI_INTR_MASK_REG(port), tmp); + + tmp = I915_READ(DSI_INTR_IDENT_REG(port)); + I915_WRITE(DSI_INTR_IDENT_REG(port), tmp); + + return true; +} + int bdw_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; unsigned long irqflags; + if (gen11_dsi_configure_te(intel_crtc, true)) + return 0; + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -2711,6 +2807,19 @@ int bdw_enable_vblank(struct drm_crtc *crtc) return 0; } +void skl_enable_flip_done(struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&i915->irq_lock, irqflags); + + bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE); + + spin_unlock_irqrestore(&i915->irq_lock, irqflags); +} + /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -2763,14 +2872,31 @@ void ilk_disable_vblank(struct drm_crtc *crtc) void bdw_disable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); - enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; unsigned long irqflags; + if (gen11_dsi_configure_te(intel_crtc, false)) + return; + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } +void skl_disable_flip_done(struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + unsigned long irqflags; + + spin_lock_irqsave(&i915->irq_lock, irqflags); + + bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE); + + spin_unlock_irqrestore(&i915->irq_lock, irqflags); +} + static void ibx_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -2784,24 +2910,6 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) I915_WRITE(SERR_INT, 0xffffffff); } -/* - * SDEIER is also touched by the interrupt handler to work around missed PCH - * interrupts. Hence we can't update it after the interrupt handler is enabled - - * instead we unconditionally enable all PCH interrupt sources here, but then - * only unmask them as needed with SDEIMR. - * - * This function needs to be called before interrupts are enabled. - */ -static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_NOP(dev_priv)) - return; - - drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0); - I915_WRITE(SDEIER, 0xffffffff); - POSTING_READ(SDEIER); -} - static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -2858,6 +2966,8 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; GEN3_IRQ_RESET(uncore, DE); + dev_priv->irq_mask = ~0u; + if (IS_GEN(dev_priv, 7)) intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); @@ -2981,6 +3091,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; enum pipe pipe; + if (INTEL_GEN(dev_priv) >= 9) + extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE; + spin_lock_irq(&dev_priv->irq_lock); if (!intel_irqs_enabled(dev_priv)) { @@ -3035,17 +3148,29 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, - const u32 hpd[HPD_NUM_PINS]) +static u32 ibx_hotplug_enables(struct drm_i915_private *i915, + enum hpd_pin pin) { - struct intel_encoder *encoder; - u32 enabled_irqs = 0; - - for_each_intel_encoder(&dev_priv->drm, encoder) - if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) - enabled_irqs |= hpd[encoder->hpd_pin]; - - return enabled_irqs; + switch (pin) { + case HPD_PORT_A: + /* + * When CPU and PCH are on the same package, port A + * HPD must be enabled in both north and south. + */ + return HAS_PCH_LPT_LP(i915) ? + PORTA_HOTPLUG_ENABLE : 0; + case HPD_PORT_B: + return PORTB_HOTPLUG_ENABLE | + PORTB_PULSE_DURATION_2ms; + case HPD_PORT_C: + return PORTC_HOTPLUG_ENABLE | + PORTC_PULSE_DURATION_2ms; + case HPD_PORT_D: + return PORTD_HOTPLUG_ENABLE | + PORTD_PULSE_DURATION_2ms; + default: + return 0; + } } static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -3058,18 +3183,14 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) * The pulse duration bits are reserved on LPT+. */ hotplug = I915_READ(PCH_PORT_HOTPLUG); - hotplug &= ~(PORTB_PULSE_DURATION_MASK | + hotplug &= ~(PORTA_HOTPLUG_ENABLE | + PORTB_HOTPLUG_ENABLE | + PORTC_HOTPLUG_ENABLE | + PORTD_HOTPLUG_ENABLE | + PORTB_PULSE_DURATION_MASK | PORTC_PULSE_DURATION_MASK | PORTD_PULSE_DURATION_MASK); - hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; - hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; - hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; - /* - * When CPU and PCH are on the same package, port A - * HPD must be enabled in both north and south. - */ - if (HAS_PCH_LPT_LP(dev_priv)) - hotplug |= PORTA_HOTPLUG_ENABLE; + hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables); I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } @@ -3077,91 +3198,145 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - if (HAS_PCH_IBX(dev_priv)) - hotplug_irqs = SDE_HOTPLUG_MASK; - else - hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); ibx_hpd_detection_setup(dev_priv); } -static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, - u32 ddi_hotplug_enable_mask, - u32 tc_hotplug_enable_mask) +static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, + enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_A: + case HPD_PORT_B: + case HPD_PORT_C: + case HPD_PORT_D: + return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); + default: + return 0; + } +} + +static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, + enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_TC1: + case HPD_PORT_TC2: + case HPD_PORT_TC3: + case HPD_PORT_TC4: + case HPD_PORT_TC5: + case HPD_PORT_TC6: + return ICP_TC_HPD_ENABLE(pin); + default: + return 0; + } +} + +static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; hotplug = I915_READ(SHOTPLUG_CTL_DDI); - hotplug |= ddi_hotplug_enable_mask; + hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | + SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | + SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | + SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D)); + hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables); I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); +} - if (tc_hotplug_enable_mask) { - hotplug = I915_READ(SHOTPLUG_CTL_TC); - hotplug |= tc_hotplug_enable_mask; - I915_WRITE(SHOTPLUG_CTL_TC, hotplug); - } +static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug; + + hotplug = I915_READ(SHOTPLUG_CTL_TC); + hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | + ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | + ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | + ICP_TC_HPD_ENABLE(HPD_PORT_TC4) | + ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | + ICP_TC_HPD_ENABLE(HPD_PORT_TC6)); + hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables); + I915_WRITE(SHOTPLUG_CTL_TC, hotplug); } -static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, - u32 sde_ddi_mask, u32 sde_tc_mask, - u32 ddi_enable_mask, u32 tc_enable_mask) +static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - hotplug_irqs = sde_ddi_mask | sde_tc_mask; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask); + icp_ddi_hpd_detection_setup(dev_priv); + icp_tc_hpd_detection_setup(dev_priv); } -/* - * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the - * equivalent of SDE. - */ -static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) +static u32 gen11_hotplug_enables(struct drm_i915_private *i915, + enum hpd_pin pin) { - icp_hpd_irq_setup(dev_priv, - SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1), - ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1)); + switch (pin) { + case HPD_PORT_TC1: + case HPD_PORT_TC2: + case HPD_PORT_TC3: + case HPD_PORT_TC4: + case HPD_PORT_TC5: + case HPD_PORT_TC6: + return GEN11_HOTPLUG_CTL_ENABLE(pin); + default: + return 0; + } } -/* - * JSP behaves exactly the same as MCC above except that port C is mapped to - * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's - * masks & tables rather than ICP's masks & tables. - */ -static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) { - icp_hpd_irq_setup(dev_priv, - SDE_DDI_MASK_TGP, 0, - TGP_DDI_HPD_ENABLE_MASK, 0); + u32 val; + + val = I915_READ(SOUTH_CHICKEN1); + val |= (INVERT_DDIA_HPD | + INVERT_DDIB_HPD | + INVERT_DDIC_HPD | + INVERT_DDID_HPD); + I915_WRITE(SOUTH_CHICKEN1, val); + + icp_hpd_irq_setup(dev_priv); } -static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); - hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); + hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); + hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); +} + +static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug; hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); - hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | - GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); + hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | + GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); + hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); } @@ -3171,7 +3346,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) u32 val; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); val = I915_READ(GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; @@ -3179,14 +3354,39 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_DE_HPD_IMR, val); POSTING_READ(GEN11_DE_HPD_IMR); - gen11_hpd_detection_setup(dev_priv); + gen11_tc_hpd_detection_setup(dev_priv); + gen11_tbt_hpd_detection_setup(dev_priv); + + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + icp_hpd_irq_setup(dev_priv); +} + +static u32 spt_hotplug_enables(struct drm_i915_private *i915, + enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_A: + return PORTA_HOTPLUG_ENABLE; + case HPD_PORT_B: + return PORTB_HOTPLUG_ENABLE; + case HPD_PORT_C: + return PORTC_HOTPLUG_ENABLE; + case HPD_PORT_D: + return PORTD_HOTPLUG_ENABLE; + default: + return 0; + } +} - if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) - icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP, - TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK); - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP, - ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK); +static u32 spt_hotplug2_enables(struct drm_i915_private *i915, + enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_E: + return PORTE_HOTPLUG_ENABLE; + default: + return 0; + } } static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -3203,14 +3403,16 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) /* Enable digital hotplug on the PCH */ hotplug = I915_READ(PCH_PORT_HOTPLUG); - hotplug |= PORTA_HOTPLUG_ENABLE | - PORTB_HOTPLUG_ENABLE | - PORTC_HOTPLUG_ENABLE | - PORTD_HOTPLUG_ENABLE; + hotplug &= ~(PORTA_HOTPLUG_ENABLE | + PORTB_HOTPLUG_ENABLE | + PORTC_HOTPLUG_ENABLE | + PORTD_HOTPLUG_ENABLE); + hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables); I915_WRITE(PCH_PORT_HOTPLUG, hotplug); hotplug = I915_READ(PCH_PORT_HOTPLUG2); - hotplug |= PORTE_HOTPLUG_ENABLE; + hotplug &= ~PORTE_HOTPLUG_ENABLE; + hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables); I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); } @@ -3221,14 +3423,26 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - hotplug_irqs = SDE_HOTPLUG_MASK_SPT; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); spt_hpd_detection_setup(dev_priv); } +static u32 ilk_hotplug_enables(struct drm_i915_private *i915, + enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_A: + return DIGITAL_PORTA_HOTPLUG_ENABLE | + DIGITAL_PORTA_PULSE_DURATION_2ms; + default: + return 0; + } +} + static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; @@ -3239,9 +3453,9 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) * The pulse duration bits are reserved on HSW+. */ hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); - hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; - hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | - DIGITAL_PORTA_PULSE_DURATION_2ms; + hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE | + DIGITAL_PORTA_PULSE_DURATION_MASK); + hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables); I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); } @@ -3249,63 +3463,58 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - if (INTEL_GEN(dev_priv) >= 8) { - hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + if (INTEL_GEN(dev_priv) >= 8) bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - } else if (INTEL_GEN(dev_priv) >= 7) { - hotplug_irqs = DE_DP_A_HOTPLUG_IVB; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - + else ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); - } else { - hotplug_irqs = DE_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - - ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); - } ilk_hpd_detection_setup(dev_priv); ibx_hpd_irq_setup(dev_priv); } -static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, - u32 enabled_irqs) +static u32 bxt_hotplug_enables(struct drm_i915_private *i915, + enum hpd_pin pin) { u32 hotplug; - hotplug = I915_READ(PCH_PORT_HOTPLUG); - hotplug |= PORTA_HOTPLUG_ENABLE | - PORTB_HOTPLUG_ENABLE | - PORTC_HOTPLUG_ENABLE; - - drm_dbg_kms(&dev_priv->drm, - "Invert bit setting: hp_ctl:%x hp_port:%x\n", - hotplug, enabled_irqs); - hotplug &= ~BXT_DDI_HPD_INVERT_MASK; - - /* - * For BXT invert bit has to be set based on AOB design - * for HPD detection logic, update it based on VBT fields. - */ - if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && - intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) - hotplug |= BXT_DDIA_HPD_INVERT; - if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && - intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) - hotplug |= BXT_DDIB_HPD_INVERT; - if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && - intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) - hotplug |= BXT_DDIC_HPD_INVERT; - - I915_WRITE(PCH_PORT_HOTPLUG, hotplug); + switch (pin) { + case HPD_PORT_A: + hotplug = PORTA_HOTPLUG_ENABLE; + if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) + hotplug |= BXT_DDIA_HPD_INVERT; + return hotplug; + case HPD_PORT_B: + hotplug = PORTB_HOTPLUG_ENABLE; + if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) + hotplug |= BXT_DDIB_HPD_INVERT; + return hotplug; + case HPD_PORT_C: + hotplug = PORTC_HOTPLUG_ENABLE; + if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) + hotplug |= BXT_DDIC_HPD_INVERT; + return hotplug; + default: + return 0; + } } static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) { - __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); + u32 hotplug; + + hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug &= ~(PORTA_HOTPLUG_ENABLE | + PORTB_HOTPLUG_ENABLE | + PORTC_HOTPLUG_ENABLE | + BXT_DDIA_HPD_INVERT | + BXT_DDIB_HPD_INVERT | + BXT_DDIC_HPD_INVERT); + hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables); + I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3313,15 +3522,27 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - __bxt_hpd_detection_setup(dev_priv, enabled_irqs); + bxt_hpd_detection_setup(dev_priv); } +/* + * SDEIER is also touched by the interrupt handler to work around missed PCH + * interrupts. Hence we can't update it after the interrupt handler is enabled - + * instead we unconditionally enable all PCH interrupt sources here, but then + * only unmask them as needed with SDEIMR. + * + * Note that we currently do this after installing the interrupt handler, + * but before we enable the master interrupt. That should be sufficient + * to avoid races with the irq handler, assuming we have MSI. Shared legacy + * interrupts could still race. + */ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; u32 mask; if (HAS_PCH_NOP(dev_priv)) @@ -3334,14 +3555,7 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) else mask = SDE_GMBUS_CPT; - gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); - I915_WRITE(SDEIMR, ~mask); - - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || - HAS_PCH_LPT(dev_priv)) - ibx_hpd_detection_setup(dev_priv); - else - spt_hpd_detection_setup(dev_priv); + GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); } static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) @@ -3359,7 +3573,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | DE_POISON); - extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | + extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | DE_DP_A_HOTPLUG); } @@ -3369,29 +3583,17 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) display_mask |= DE_EDP_PSR_INT_HSW; } - dev_priv->irq_mask = ~display_mask; + if (IS_IRONLAKE_M(dev_priv)) + extra_mask |= DE_PCU_EVENT; - ibx_irq_pre_postinstall(dev_priv); + dev_priv->irq_mask = ~display_mask; - GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, - display_mask | extra_mask); + ibx_irq_postinstall(dev_priv); gen5_gt_irq_postinstall(&dev_priv->gt); - ilk_hpd_detection_setup(dev_priv); - - ibx_irq_postinstall(dev_priv); - - if (IS_IRONLAKE_M(dev_priv)) { - /* Enable PCU event interrupts - * - * spinlocking not required here for correctness since interrupt - * setup is guaranteed to run in single-threaded context. But we - * need it to make the assert_spin_locked happy. */ - spin_lock_irq(&dev_priv->irq_lock); - ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); - spin_unlock_irq(&dev_priv->irq_lock); - } + GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, + display_mask | extra_mask); } void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) @@ -3456,14 +3658,24 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_GEN9_LP(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS; + if (INTEL_GEN(dev_priv) >= 11) { + enum port port; + + if (intel_bios_is_dsi_present(dev_priv, &port)) + de_port_masked |= DSI0_TE | DSI1_TE; + } + de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; + if (INTEL_GEN(dev_priv) >= 9) + de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE; + de_port_enables = de_port_masked; if (IS_GEN9_LP(dev_priv)) de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; else if (IS_BROADWELL(dev_priv)) - de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; + de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; if (INTEL_GEN(dev_priv) >= 12) { enum transcoder trans; @@ -3501,50 +3713,26 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); - gen11_hpd_detection_setup(dev_priv); - } else if (IS_GEN9_LP(dev_priv)) { - bxt_hpd_detection_setup(dev_priv); - } else if (IS_BROADWELL(dev_priv)) { - ilk_hpd_detection_setup(dev_priv); } } static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) { if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_pre_postinstall(dev_priv); + ibx_irq_postinstall(dev_priv); gen8_gt_irq_postinstall(&dev_priv->gt); gen8_de_irq_postinstall(dev_priv); - if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_postinstall(dev_priv); - gen8_master_intr_enable(dev_priv->uncore.regs); } static void icp_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; u32 mask = SDE_GMBUS_ICP; - drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0); - I915_WRITE(SDEIER, 0xffffffff); - POSTING_READ(SDEIER); - - gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); - I915_WRITE(SDEIMR, ~mask); - - if (HAS_PCH_TGP(dev_priv)) - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, - TGP_TC_HPD_ENABLE_MASK); - else if (HAS_PCH_JSP(dev_priv)) - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); - else if (HAS_PCH_MCC(dev_priv)) - icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, - ICP_TC_HPD_ENABLE(PORT_TC1)); - else - icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, - ICP_TC_HPD_ENABLE_MASK); + GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); } static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) @@ -3591,6 +3779,7 @@ static void i8xx_irq_reset(struct drm_i915_private *dev_priv) i9xx_pipestat_irq_reset(dev_priv); GEN2_IRQ_RESET(uncore); + dev_priv->irq_mask = ~0u; } static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) @@ -3760,6 +3949,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv) i9xx_pipestat_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN2_); + dev_priv->irq_mask = ~0u; } static void i915_irq_postinstall(struct drm_i915_private *dev_priv) @@ -3866,6 +4056,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) i9xx_pipestat_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN2_); + dev_priv->irq_mask = ~0u; } static void i965_irq_postinstall(struct drm_i915_private *dev_priv) @@ -4050,10 +4241,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (I915_HAS_HOTPLUG(dev_priv)) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else { - if (HAS_PCH_JSP(dev_priv)) - dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup; - else if (HAS_PCH_MCC(dev_priv)) - dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup; + if (HAS_PCH_DG1(dev_priv)) + dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup; else if (INTEL_GEN(dev_priv) >= 11) dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; else if (IS_GEN9_LP(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 25f25cd95818..2efe609519ca 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -118,6 +118,9 @@ void i965_disable_vblank(struct drm_crtc *crtc); void ilk_disable_vblank(struct drm_crtc *crtc); void bdw_disable_vblank(struct drm_crtc *crtc); +void skl_enable_flip_done(struct intel_crtc *crtc); +void skl_disable_flip_done(struct intel_crtc *crtc); + void gen2_irq_reset(struct intel_uncore *uncore); void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, i915_reg_t iir, i915_reg_t ier); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 366ddfc8df6b..1fe390727d80 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -389,6 +389,7 @@ static const struct intel_device_info ilk_m_info = { GEN5_FEATURES, PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, + .has_rps = true, .display.has_fbc = 1, }; @@ -846,6 +847,14 @@ static const struct intel_device_info ehl_info = { .ppgtt_size = 36, }; +static const struct intel_device_info jsl_info = { + GEN11_FEATURES, + PLATFORM(INTEL_JASPERLAKE), + .require_force_probe = 1, + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .ppgtt_size = 36, +}; + #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ @@ -900,6 +909,8 @@ static const struct intel_device_info rkl_info = { GEN12_FEATURES, \ .memory_regions = REGION_SMEM | REGION_LMEM, \ .has_master_unit_irq = 1, \ + .has_llc = 0, \ + .has_snoop = 1, \ .is_dgfx = 1 static const struct intel_device_info dg1_info __maybe_unused = { @@ -910,6 +921,8 @@ static const struct intel_device_info dg1_info __maybe_unused = { .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), + /* Wa_16011227922 */ + .ppgtt_size = 47, }; #undef GEN @@ -985,6 +998,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_CNL_IDS(&cnl_info), INTEL_ICL_11_IDS(&icl_info), INTEL_EHL_IDS(&ehl_info), + INTEL_JSL_IDS(&jsl_info), INTEL_TGL_12_IDS(&tgl_info), INTEL_RKL_IDS(&rkl_info), {0, 0, 0} @@ -1090,11 +1104,19 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } +static void i915_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_i915_private *i915 = pci_get_drvdata(pdev); + + i915_driver_shutdown(i915); +} + static struct pci_driver i915_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = i915_pci_probe, .remove = i915_pci_remove, + .shutdown = i915_pci_shutdown, .driver.pm = &i915_pm_ops, }; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ac691927e29d..bb0656875697 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -242,7 +242,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) -#define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c)) +#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__)) + /* * Device info offset array based helpers for groups of registers with unevenly @@ -1382,7 +1383,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define DPIO_CMNRST (1 << 0) #define DPIO_PHY(pipe) ((pipe) >> 1) -#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy]) /* * Per pipe/PLL DPIO regs @@ -2528,6 +2528,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_PSMI_CTL(base) _MMIO((base) + 0x50) #define RING_MAX_IDLE(base) _MMIO((base) + 0x54) #define RING_HWS_PGA(base) _MMIO((base) + 0x80) +#define RING_ID(base) _MMIO((base) + 0x8c) #define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080) #define RING_RESET_CTL(base) _MMIO((base) + 0xd0) #define RESET_CTL_CAT_ERROR REG_BIT(2) @@ -4147,6 +4148,7 @@ enum { #define GEN9_CLKGATE_DIS_3 _MMIO(0x46538) #define TGL_VRH_GATING_DIS REG_BIT(31) +#define DPT_GATING_DIS REG_BIT(22) #define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C) #define BXT_GMBUS_GATING_DIS (1 << 14) @@ -4619,6 +4621,110 @@ enum { #define PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(2) #define PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(1) +/* Icelake DSC Rate Control Range Parameter Registers */ +#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) +#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) +#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) +#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) +#define RC_BPG_OFFSET_SHIFT 10 +#define RC_MAX_QP_SHIFT 5 +#define RC_MIN_QP_SHIFT 0 + +#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) +#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) +#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) +#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) +#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) +#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) +#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) +#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) +#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) +#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) + /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) @@ -6328,15 +6434,16 @@ enum { _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))) /* define the Watermark register on Ironlake */ -#define WM0_PIPEA_ILK _MMIO(0x45100) +#define _WM0_PIPEA_ILK 0x45100 +#define _WM0_PIPEB_ILK 0x45104 +#define _WM0_PIPEC_IVB 0x45200 +#define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \ + _WM0_PIPEB_ILK, _WM0_PIPEC_IVB) #define WM0_PIPE_PLANE_MASK (0xffff << 16) #define WM0_PIPE_PLANE_SHIFT 16 #define WM0_PIPE_SPRITE_MASK (0xff << 8) #define WM0_PIPE_SPRITE_SHIFT 8 #define WM0_PIPE_CURSOR_MASK (0xff) - -#define WM0_PIPEB_ILK _MMIO(0x45104) -#define WM0_PIPEC_IVB _MMIO(0x45200) #define WM1_LP_ILK _MMIO(0x45108) #define WM1_LP_SR_EN (1 << 31) #define WM1_LP_LATENCY_SHIFT 24 @@ -6924,6 +7031,7 @@ enum { #define PLANE_CTL_TILED_X (1 << 10) #define PLANE_CTL_TILED_Y (4 << 10) #define PLANE_CTL_TILED_YF (5 << 10) +#define PLANE_CTL_ASYNC_FLIP (1 << 9) #define PLANE_CTL_FLIP_HORIZONTAL (1 << 8) #define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */ #define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */ @@ -7380,6 +7488,7 @@ enum { #define PS_PLANE_SEL(plane) (((plane) + 1) << 25) #define PS_FILTER_MASK (3 << 23) #define PS_FILTER_MEDIUM (0 << 23) +#define PS_FILTER_PROGRAMMED (1 << 23) #define PS_FILTER_EDGE_ENHANCE (2 << 23) #define PS_FILTER_BILINEAR (3 << 23) #define PS_VERT3TAP (1 << 21) @@ -7394,6 +7503,10 @@ enum { #define PS_VADAPT_MODE_MOST_ADAPT (3 << 5) #define PS_PLANE_Y_SEL_MASK (7 << 5) #define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5) +#define PS_Y_VERT_FILTER_SELECT(set) ((set) << 4) +#define PS_Y_HORZ_FILTER_SELECT(set) ((set) << 3) +#define PS_UV_VERT_FILTER_SELECT(set) ((set) << 2) +#define PS_UV_HORZ_FILTER_SELECT(set) ((set) << 1) #define _PS_PWR_GATE_1A 0x68160 #define _PS_PWR_GATE_2A 0x68260 @@ -7456,6 +7569,17 @@ enum { #define _PS_ECC_STAT_2B 0x68AD0 #define _PS_ECC_STAT_1C 0x691D0 +#define _PS_COEF_SET0_INDEX_1A 0x68198 +#define _PS_COEF_SET0_INDEX_2A 0x68298 +#define _PS_COEF_SET0_INDEX_1B 0x68998 +#define _PS_COEF_SET0_INDEX_2B 0x68A98 +#define PS_COEE_INDEX_AUTO_INC (1 << 10) + +#define _PS_COEF_SET0_DATA_1A 0x6819C +#define _PS_COEF_SET0_DATA_2A 0x6829C +#define _PS_COEF_SET0_DATA_1B 0x6899C +#define _PS_COEF_SET0_DATA_2B 0x68A9C + #define _ID(id, a, b) _PICK_EVEN(id, a, b) #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ @@ -7484,7 +7608,13 @@ enum { #define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \ _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)) +#define CNL_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \ + _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \ + _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8) +#define CNL_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \ + _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \ + _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8) /* legacy palette */ #define _LGC_PALETTE_A 0x4a000 #define _LGC_PALETTE_B 0x4a800 @@ -7533,6 +7663,7 @@ enum { #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038) #define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084) #define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088) +#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154) #define DMC_DEBUG3 _MMIO(0x101090) @@ -7682,6 +7813,9 @@ enum { (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \ GEN11_PIPE_PLANE5_FAULT) +#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A) +#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1) + #define GEN8_DE_PORT_ISR _MMIO(0x44440) #define GEN8_DE_PORT_IMR _MMIO(0x44444) #define GEN8_DE_PORT_IIR _MMIO(0x44448) @@ -7695,13 +7829,11 @@ enum { #define GEN9_AUX_CHANNEL_B (1 << 25) #define DSI1_TE (1 << 24) #define DSI0_TE (1 << 23) -#define BXT_DE_PORT_HP_DDIC (1 << 5) -#define BXT_DE_PORT_HP_DDIB (1 << 4) -#define BXT_DE_PORT_HP_DDIA (1 << 3) -#define BXT_DE_PORT_HOTPLUG_MASK (BXT_DE_PORT_HP_DDIA | \ - BXT_DE_PORT_HP_DDIB | \ - BXT_DE_PORT_HP_DDIC) -#define GEN8_PORT_DP_A_HOTPLUG (1 << 3) +#define GEN8_DE_PORT_HOTPLUG(hpd_pin) REG_BIT(3 + _HPD_PIN_DDI(hpd_pin)) +#define BXT_DE_PORT_HOTPLUG_MASK (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \ + GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \ + GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)) +#define BDW_DE_PORT_HOTPLUG_MASK GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) #define BXT_DE_PORT_GMBUS (1 << 1) #define GEN8_AUX_CHANNEL_A (1 << 0) #define TGL_DE_PORT_AUX_USBC6 (1 << 13) @@ -7760,39 +7892,27 @@ enum { #define GEN11_DE_HPD_IMR _MMIO(0x44474) #define GEN11_DE_HPD_IIR _MMIO(0x44478) #define GEN11_DE_HPD_IER _MMIO(0x4447c) -#define GEN12_TC6_HOTPLUG (1 << 21) -#define GEN12_TC5_HOTPLUG (1 << 20) -#define GEN11_TC4_HOTPLUG (1 << 19) -#define GEN11_TC3_HOTPLUG (1 << 18) -#define GEN11_TC2_HOTPLUG (1 << 17) -#define GEN11_TC1_HOTPLUG (1 << 16) -#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16)) -#define GEN11_DE_TC_HOTPLUG_MASK (GEN12_TC6_HOTPLUG | \ - GEN12_TC5_HOTPLUG | \ - GEN11_TC4_HOTPLUG | \ - GEN11_TC3_HOTPLUG | \ - GEN11_TC2_HOTPLUG | \ - GEN11_TC1_HOTPLUG) -#define GEN12_TBT6_HOTPLUG (1 << 5) -#define GEN12_TBT5_HOTPLUG (1 << 4) -#define GEN11_TBT4_HOTPLUG (1 << 3) -#define GEN11_TBT3_HOTPLUG (1 << 2) -#define GEN11_TBT2_HOTPLUG (1 << 1) -#define GEN11_TBT1_HOTPLUG (1 << 0) -#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port)) -#define GEN11_DE_TBT_HOTPLUG_MASK (GEN12_TBT6_HOTPLUG | \ - GEN12_TBT5_HOTPLUG | \ - GEN11_TBT4_HOTPLUG | \ - GEN11_TBT3_HOTPLUG | \ - GEN11_TBT2_HOTPLUG | \ - GEN11_TBT1_HOTPLUG) +#define GEN11_TC_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin)) +#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \ + GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \ + GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \ + GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \ + GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \ + GEN11_TC_HOTPLUG(HPD_PORT_TC1)) +#define GEN11_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin)) +#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \ + GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \ + GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \ + GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \ + GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \ + GEN11_TBT_HOTPLUG(HPD_PORT_TC1)) #define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030) #define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038) -#define GEN11_HOTPLUG_CTL_ENABLE(tc_port) (8 << (tc_port) * 4) -#define GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port) (2 << (tc_port) * 4) -#define GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) -#define GEN11_HOTPLUG_CTL_NO_DETECT(tc_port) (0 << (tc_port) * 4) +#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4)) +#define GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4)) +#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4)) +#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4)) #define GEN11_GT_INTR_DW0 _MMIO(0x190018) #define GEN11_CSME (31) @@ -7878,6 +7998,7 @@ enum { # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) #define CHICKEN_PAR1_1 _MMIO(0x42080) +#define KBL_ARB_FILL_SPARE_22 REG_BIT(22) #define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16) #define SKL_DE_COMPRESSED_HASH_MODE (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15) @@ -7890,6 +8011,8 @@ enum { #define CHICKEN_MISC_2 _MMIO(0x42084) #define CNL_COMP_PWR_DOWN (1 << 23) +#define KBL_ARB_FILL_SPARE_14 REG_BIT(14) +#define KBL_ARB_FILL_SPARE_13 REG_BIT(13) #define GLK_CL2_PWR_DOWN (1 << 12) #define GLK_CL1_PWR_DOWN (1 << 11) #define GLK_CL0_PWR_DOWN (1 << 10) @@ -7932,11 +8055,15 @@ enum { #define DISP_ARB_CTL2 _MMIO(0x45004) #define DISP_DATA_PARTITION_5_6 (1 << 6) #define DISP_IPC_ENABLE (1 << 3) -#define _DBUF_CTL_S1 0x45008 -#define _DBUF_CTL_S2 0x44FE8 -#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2)) -#define DBUF_POWER_REQUEST (1 << 31) -#define DBUF_POWER_STATE (1 << 30) + +#define _DBUF_CTL_S1 0x45008 +#define _DBUF_CTL_S2 0x44FE8 +#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2)) +#define DBUF_POWER_REQUEST REG_BIT(31) +#define DBUF_POWER_STATE REG_BIT(30) +#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19) +#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x) + #define GEN7_MSG_CTL _MMIO(0x45010) #define WAIT_FOR_PCH_RESET_ACK (1 << 1) #define WAIT_FOR_PCH_FLR_ACK (1 << 0) @@ -8027,13 +8154,15 @@ enum { #define GEN8_L3CNTLREG _MMIO(0x7034) #define GEN8_ERRDETBCTRL (1 << 9) -#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) - #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11) - #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9) +#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) + #define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) + #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) + #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) #define HIZ_CHICKEN _MMIO(0x7018) -# define CHV_HZ_8X8_MODE_IN_1X (1 << 15) -# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1 << 3) +# define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15) +# define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14) +# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3) #define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) #define DISABLE_PIXEL_MASK_CAMMING (1 << 14) @@ -8221,23 +8350,18 @@ enum { /* south display engine interrupt: ICP/TGP */ #define SDE_GMBUS_ICP (1 << 23) -#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24)) -#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16)) -#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \ - SDE_DDI_HOTPLUG_ICP(PORT_A)) -#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC3) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC2) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC1)) -#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \ - SDE_DDI_HOTPLUG_ICP(PORT_B) | \ - SDE_DDI_HOTPLUG_ICP(PORT_A)) -#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC5) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC4) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC3) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC2) | \ - SDE_TC_HOTPLUG_ICP(PORT_TC1)) +#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin)) +#define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin)) +#define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \ + SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \ + SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \ + SDE_DDI_HOTPLUG_ICP(HPD_PORT_A)) +#define SDE_TC_HOTPLUG_MASK_ICP (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \ + SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \ + SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \ + SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \ + SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \ + SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1)) #define SDEISR _MMIO(0xc4000) #define SDEIMR _MMIO(0xc4004) @@ -8305,139 +8429,21 @@ enum { */ #define SHOTPLUG_CTL_DDI _MMIO(0xc4030) -#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port))) -#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port))) -#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port))) -#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port))) -#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port))) -#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4)) +#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4)) +#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4)) +#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4)) +#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin) (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4)) +#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4)) #define SHOTPLUG_CTL_TC _MMIO(0xc4034) -#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4) +#define ICP_TC_HPD_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4)) +#define ICP_TC_HPD_LONG_DETECT(hpd_pin) (2 << (_HPD_PIN_TC(hpd_pin) * 4)) +#define ICP_TC_HPD_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4)) #define SHPD_FILTER_CNT _MMIO(0xc4038) #define SHPD_FILTER_CNT_500_ADJ 0x001D9 -/* Icelake DSC Rate Control Range Parameter Registers */ -#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) -#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) -#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) -#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) -#define RC_BPG_OFFSET_SHIFT 10 -#define RC_MAX_QP_SHIFT 5 -#define RC_MIN_QP_SHIFT 0 - -#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) -#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) -#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) -#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) - -#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) -#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) -#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) -#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) - -#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) -#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) -#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) -#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) - -#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) -#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) - -#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \ - SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A)) -#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \ - ICP_TC_HPD_ENABLE(PORT_TC3) | \ - ICP_TC_HPD_ENABLE(PORT_TC2) | \ - ICP_TC_HPD_ENABLE(PORT_TC1)) -#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \ - SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \ - SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A)) -#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \ - ICP_TC_HPD_ENABLE(PORT_TC5) | \ - ICP_TC_HPD_ENABLE_MASK) - #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) @@ -8697,6 +8703,10 @@ enum { #define SOUTH_CHICKEN1 _MMIO(0xc2000) #define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_EN 18 +#define INVERT_DDID_HPD (1 << 18) +#define INVERT_DDIC_HPD (1 << 17) +#define INVERT_DDIB_HPD (1 << 16) +#define INVERT_DDIA_HPD (1 << 15) #define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) #define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define FDI_BC_BIFURCATION_SELECT (1 << 12) @@ -8967,12 +8977,12 @@ enum { #define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4) #define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4) #define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278) -#define FORCEWAKE_BLITTER_GEN9 _MMIO(0xa188) +#define FORCEWAKE_GT_GEN9 _MMIO(0xa188) #define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88) #define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4) #define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4) #define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84) -#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044) +#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044) #define FORCEWAKE_KERNEL BIT(0) #define FORCEWAKE_USER BIT(1) #define FORCEWAKE_KERNEL_FALLBACK BIT(15) @@ -9236,6 +9246,9 @@ enum { #define GEN9_SAGV_DISABLE 0x0 #define GEN9_SAGV_IS_DISABLED 0x1 #define GEN9_SAGV_ENABLE 0x3 +#define DG1_PCODE_STATUS 0x7E +#define DG1_UNCORE_GET_INIT_STATUS 0x0 +#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1 #define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23 #define GEN6_PCODE_DATA _MMIO(0x138128) #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 @@ -9315,6 +9328,7 @@ enum { #define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) #define GEN10_SAMPLER_MODE _MMIO(0xE18C) +#define ENABLE_SMALLPL REG_BIT(15) #define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5) /* IVYBRIDGE DPF */ @@ -10269,6 +10283,7 @@ enum skl_power_gate { #define DPLL_CFGCR2_PDIV_2 (1 << 2) #define DPLL_CFGCR2_PDIV_3 (2 << 2) #define DPLL_CFGCR2_PDIV_7 (4 << 2) +#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2) #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) #define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) @@ -10288,9 +10303,9 @@ enum skl_power_gate { #define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) #define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24)) #define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10) -#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \ +#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < TC_PORT_4 ? \ (tc_port) + 12 : \ - (tc_port) - PORT_TC4 + 21)) + (tc_port) - TC_PORT_4 + 21)) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) @@ -10319,6 +10334,10 @@ enum skl_power_gate { #define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \ _MG_PLL2_ENABLE) +/* DG1 PLL */ +#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ + _MG_PLL1_ENABLE, _MG_PLL2_ENABLE) + #define _MG_REFCLKIN_CTL_PORT1 0x16892C #define _MG_REFCLKIN_CTL_PORT2 0x16992C #define _MG_REFCLKIN_CTL_PORT3 0x16A92C @@ -10535,6 +10554,20 @@ enum skl_power_gate { #define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \ _TGL_DPLL1_CFGCR1) +#define _DG1_DPLL2_CFGCR0 0x16C284 +#define _DG1_DPLL3_CFGCR0 0x16C28C +#define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ + _TGL_DPLL1_CFGCR0, \ + _DG1_DPLL2_CFGCR0, \ + _DG1_DPLL3_CFGCR0) + +#define _DG1_DPLL2_CFGCR1 0x16C288 +#define _DG1_DPLL3_CFGCR1 0x16C290 +#define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ + _TGL_DPLL1_CFGCR1, \ + _DG1_DPLL2_CFGCR1, \ + _DG1_DPLL3_CFGCR1) + #define _DKL_PHY1_BASE 0x168000 #define _DKL_PHY2_BASE 0x169000 #define _DKL_PHY3_BASE 0x16A000 @@ -11015,14 +11048,17 @@ enum skl_power_gate { #define _CGM_PIPE_A_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6790C) #define _CGM_PIPE_A_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x67910) #define _CGM_PIPE_A_DEGAMMA (VLV_DISPLAY_BASE + 0x66000) +#define CGM_PIPE_DEGAMMA_RED_MASK REG_GENMASK(13, 0) +#define CGM_PIPE_DEGAMMA_GREEN_MASK REG_GENMASK(29, 16) +#define CGM_PIPE_DEGAMMA_BLUE_MASK REG_GENMASK(13, 0) #define _CGM_PIPE_A_GAMMA (VLV_DISPLAY_BASE + 0x67000) +#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0) +#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16) +#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0) #define _CGM_PIPE_A_MODE (VLV_DISPLAY_BASE + 0x67A00) #define CGM_PIPE_MODE_GAMMA (1 << 2) #define CGM_PIPE_MODE_CSC (1 << 1) #define CGM_PIPE_MODE_DEGAMMA (1 << 0) -#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0) -#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16) -#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0) #define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900) #define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 11e272422fb7..0e813819b041 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -542,8 +542,13 @@ bool __i915_request_submit(struct i915_request *request) if (i915_request_completed(request)) goto xfer; + if (unlikely(intel_context_is_closed(request->context) && + !intel_engine_has_heartbeat(engine))) + intel_context_set_banned(request->context); + if (unlikely(intel_context_is_banned(request->context))) i915_request_set_error_once(request, -EIO); + if (unlikely(fatal_error(request->fence.error))) __i915_request_skip(request); @@ -593,16 +598,8 @@ xfer: __notify_execute_cb_irq(request); /* We may be recursing from the signal callback of another i915 fence */ - if (!i915_request_signaled(request)) { - spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &request->fence.flags) && - !i915_request_enable_breadcrumb(request)) - intel_engine_signal_breadcrumbs(engine); - - spin_unlock(&request->lock); - } + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) + i915_request_enable_breadcrumb(request); return result; } diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index b7b59328cb76..883dd8d09d6b 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -112,7 +112,7 @@ static inline unsigned int i915_sg_segment_size(void) unsigned int size = swiotlb_max_segment(); if (size == 0) - return SCATTERLIST_MAX_SEGMENT; + size = UINT_MAX; size = rounddown(size, PAGE_SIZE); /* swiotlb_max_segment_size can return 1 byte when it means one page. */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index ed2be3489f8e..db2111fc809e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -32,51 +32,10 @@ #include "i915_reg.h" #include "i915_suspend.h" -static void i915_save_display(struct drm_i915_private *dev_priv) +static void intel_save_swf(struct drm_i915_private *dev_priv) { - /* Display arbitration control */ - if (INTEL_GEN(dev_priv) <= 4) - dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); - - /* save FBC interval */ - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv)) - dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); -} - -static void i915_restore_display(struct drm_i915_private *dev_priv) -{ - /* Display arbitration */ - if (INTEL_GEN(dev_priv) <= 4) - I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); - - /* only restore FBC info on the platform that supports FBC*/ - intel_fbc_global_disable(dev_priv); - - /* restore FBC interval */ - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv)) - I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); - - intel_vga_redisable(dev_priv); -} - -int i915_save_state(struct drm_i915_private *dev_priv) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; int i; - i915_save_display(dev_priv); - - if (IS_GEN(dev_priv, 4)) - pci_read_config_word(pdev, GCDGMBUS, - &dev_priv->regfile.saveGCDGMBUS); - - /* Cache mode state */ - if (INTEL_GEN(dev_priv) < 7) - dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); - - /* Memory Arbitration state */ - dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); - /* Scratch space */ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { @@ -96,28 +55,12 @@ int i915_save_state(struct drm_i915_private *dev_priv) for (i = 0; i < 3; i++) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); } - - return 0; } -int i915_restore_state(struct drm_i915_private *dev_priv) +static void intel_restore_swf(struct drm_i915_private *dev_priv) { - struct pci_dev *pdev = dev_priv->drm.pdev; int i; - if (IS_GEN(dev_priv, 4)) - pci_write_config_word(pdev, GCDGMBUS, - dev_priv->regfile.saveGCDGMBUS); - i915_restore_display(dev_priv); - - /* Cache mode state */ - if (INTEL_GEN(dev_priv) < 7) - I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | - 0xffff0000); - - /* Memory arbitration state */ - I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); - /* Scratch space */ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { @@ -137,8 +80,41 @@ int i915_restore_state(struct drm_i915_private *dev_priv) for (i = 0; i < 3; i++) I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); } +} - intel_gmbus_reset(dev_priv); +void i915_save_display(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + + /* Display arbitration control */ + if (INTEL_GEN(dev_priv) <= 4) + dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); - return 0; + if (IS_GEN(dev_priv, 4)) + pci_read_config_word(pdev, GCDGMBUS, + &dev_priv->regfile.saveGCDGMBUS); + + intel_save_swf(dev_priv); +} + +void i915_restore_display(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + + intel_restore_swf(dev_priv); + + if (IS_GEN(dev_priv, 4)) + pci_write_config_word(pdev, GCDGMBUS, + dev_priv->regfile.saveGCDGMBUS); + + /* Display arbitration */ + if (INTEL_GEN(dev_priv) <= 4) + I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); + + /* only restore FBC info on the platform that supports FBC*/ + intel_fbc_global_disable(dev_priv); + + intel_vga_redisable(dev_priv); + + intel_gmbus_reset(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_suspend.h b/drivers/gpu/drm/i915/i915_suspend.h index 3a36fb4ecc05..e5a611ee3d15 100644 --- a/drivers/gpu/drm/i915/i915_suspend.h +++ b/drivers/gpu/drm/i915/i915_suspend.h @@ -8,7 +8,7 @@ struct drm_i915_private; -int i915_save_state(struct drm_i915_private *i915); -int i915_restore_state(struct drm_i915_private *i915); +void i915_save_display(struct drm_i915_private *i915); +void i915_restore_display(struct drm_i915_private *i915); #endif /* __I915_SUSPEND_H__ */ diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 4cd2038cbe35..038d4c6884c5 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -34,7 +34,7 @@ static void *i915_sw_fence_debug_hint(void *addr) #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS -static struct debug_obj_descr i915_sw_fence_debug_descr = { +static const struct debug_obj_descr i915_sw_fence_debug_descr = { .name = "i915_sw_fence", .debug_hint = i915_sw_fence_debug_hint, }; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 495d28f6d160..ffb5287e055a 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -892,9 +892,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, /* Allocate enough page directories to used PTE */ if (vma->vm->allocate_va_range) { - i915_vm_alloc_pt_stash(vma->vm, - &work->stash, - vma->size); + err = i915_vm_alloc_pt_stash(vma->vm, + &work->stash, + vma->size); + if (err) + goto err_fence; err = i915_vm_pin_pt_stash(vma->vm, &work->stash); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index e2aa5bc3a6e0..e67cec8fa2aa 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -62,6 +62,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(CANNONLAKE), PLATFORM_NAME(ICELAKE), PLATFORM_NAME(ELKHARTLAKE), + PLATFORM_NAME(JASPERLAKE), PLATFORM_NAME(TIGERLAKE), PLATFORM_NAME(ROCKETLAKE), PLATFORM_NAME(DG1), @@ -516,6 +517,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) S32_MAX), USEC_PER_SEC)); } + + if (!HAS_DISPLAY(dev_priv)) { + dev_priv->drm.driver_features &= ~(DRIVER_MODESET | + DRIVER_ATOMIC); + memset(&info->display, 0, sizeof(info->display)); + memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites)); + memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers)); + } } void intel_driver_caps_print(const struct intel_driver_caps *caps, diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 6a3d607218aa..d92fa041c700 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -79,6 +79,7 @@ enum intel_platform { /* gen11 */ INTEL_ICELAKE, INTEL_ELKHARTLAKE, + INTEL_JASPERLAKE, /* gen12 */ INTEL_TIGERLAKE, INTEL_ROCKETLAKE, diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 8aa12cad93ce..4754296a250e 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -7,7 +7,8 @@ #include "intel_dram.h" struct dram_dimm_info { - u8 size, width, ranks; + u16 size; + u8 width, ranks; }; struct dram_channel_info { @@ -41,10 +42,10 @@ static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) return dimm->ranks * 64 / (dimm->width ?: 1); } -/* Returns total GB for the whole DIMM */ +/* Returns total Gb for the whole DIMM */ static int skl_get_dimm_size(u16 val) { - return val & SKL_DRAM_SIZE_MASK; + return (val & SKL_DRAM_SIZE_MASK) * 8; } static int skl_get_dimm_width(u16 val) @@ -74,10 +75,10 @@ static int skl_get_dimm_ranks(u16 val) return val + 1; } -/* Returns total GB for the whole DIMM */ +/* Returns total Gb for the whole DIMM */ static int cnl_get_dimm_size(u16 val) { - return (val & CNL_DRAM_SIZE_MASK) / 2; + return (val & CNL_DRAM_SIZE_MASK) * 8 / 2; } static int cnl_get_dimm_width(u16 val) @@ -110,8 +111,8 @@ static int cnl_get_dimm_ranks(u16 val) static bool skl_is_16gb_dimm(const struct dram_dimm_info *dimm) { - /* Convert total GB to Gb per DRAM device */ - return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; + /* Convert total Gb to Gb per DRAM device */ + return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; } static void @@ -130,7 +131,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915, } drm_dbg_kms(&i915->drm, - "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", + "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", channel, dimm_name, dimm->size, dimm->width, dimm->ranks, yesno(skl_is_16gb_dimm(dimm))); } @@ -354,9 +355,9 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val) /* * Size in register is Gb per DRAM device. Convert to total - * GB to match the way we report this for non-LP platforms. + * Gb to match the way we report this for non-LP platforms. */ - dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; + dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm); } static int bxt_get_dram_info(struct drm_i915_private *i915) @@ -404,7 +405,7 @@ static int bxt_get_dram_info(struct drm_i915_private *i915) dram_info->type != type); drm_dbg_kms(&i915->drm, - "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", + "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n", i - BXT_D_CR_DRP0_DUNIT_START, dimm.size, dimm.width, dimm.ranks, intel_dram_type_str(type)); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 6b5e9d88646d..180e1078ef7c 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -87,7 +87,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, min_order = ilog2(size) - ilog2(mem->mm.chunk_size); } - if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size) + if (size > mem->mm.size) return -E2BIG; n_pages = size >> ilog2(mem->mm.chunk_size); diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 6c97192e9ca8..f31c0dabd0cc 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -115,7 +115,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n"); - drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv)); return PCH_MCC; case INTEL_PCH_TGP_DEVICE_ID_TYPE: case INTEL_PCH_TGP2_DEVICE_ID_TYPE: @@ -126,7 +126,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) case INTEL_PCH_JSP_DEVICE_ID_TYPE: case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); - drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv)); return PCH_JSP; default: return PCH_NONE; @@ -157,7 +157,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) id = INTEL_PCH_TGP_DEVICE_ID_TYPE; - else if (IS_ELKHARTLAKE(dev_priv)) + else if (IS_JSL_EHL(dev_priv)) id = INTEL_PCH_MCC_DEVICE_ID_TYPE; else if (IS_ICELAKE(dev_priv)) id = INTEL_PCH_ICP_DEVICE_ID_TYPE; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b4bd19266b8c..f54375b11964 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3573,11 +3573,11 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, _ilk_disable_lp_wm(dev_priv, dirty); if (dirty & WM_DIRTY_PIPE(PIPE_A)) - I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); + I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); if (dirty & WM_DIRTY_PIPE(PIPE_B)) - I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); + I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); if (dirty & WM_DIRTY_PIPE(PIPE_C)) - I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); + I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); if (dirty & WM_DIRTY_DDB) { if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { @@ -3706,7 +3706,7 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) * - All planes can enable watermarks for latencies >= SAGV engine block time * - We're not using an interlaced display configuration */ -int +static int intel_enable_sagv(struct drm_i915_private *dev_priv) { int ret; @@ -3740,7 +3740,7 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) return 0; } -int +static int intel_disable_sagv(struct drm_i915_private *dev_priv) { int ret; @@ -6287,13 +6287,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; - static const i915_reg_t wm0_pipe_reg[] = { - [PIPE_A] = WM0_PIPEA_ILK, - [PIPE_B] = WM0_PIPEB_ILK, - [PIPE_C] = WM0_PIPEC_IVB, - }; - hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); + hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe)); memset(active, 0, sizeof(*active)); @@ -7116,27 +7111,28 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) 0, CNL_DELAY_PMRSP); } -static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) +static void gen12_init_clock_gating(struct drm_i915_private *i915) { - u32 vd_pg_enable = 0; unsigned int i; + /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ + for (i = 0; i < I915_MAX_VCS; i++) + if (HAS_ENGINE(&i915->gt, _VCS(i))) + intel_uncore_rmw(&i915->uncore, POWERGATE_ENABLE, 0, + VDN_HCP_POWERGATE_ENABLE(i) | + VDN_MFX_POWERGATE_ENABLE(i)); +} + +static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) +{ + gen12_init_clock_gating(dev_priv); + /* Wa_1409120013:tgl */ I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); - /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ - for (i = 0; i < I915_MAX_VCS; i++) { - if (HAS_ENGINE(&dev_priv->gt, _VCS(i))) - vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | - VDN_MFX_POWERGATE_ENABLE(i); - } - - I915_WRITE(POWERGATE_ENABLE, - I915_READ(POWERGATE_ENABLE) | vd_pg_enable); - /* Wa_1409825376:tgl (pre-prod)*/ - if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) + if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1)) I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | TGL_VRH_GATING_DIS); @@ -7145,6 +7141,16 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) 0, DFR_DISABLE); } +static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) +{ + gen12_init_clock_gating(dev_priv); + + /* Wa_1409836686:dg1[a0] */ + if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0)) + I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | + DPT_GATING_DIS); +} + static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) { if (!HAS_PCH_CNP(dev_priv)) @@ -7197,6 +7203,10 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) cnp_init_clock_gating(dev_priv); gen9_init_clock_gating(dev_priv); + /* WAC6entrylatency:cfl */ + I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | + FBC_LLC_FULLY_OPEN); + /* * WaFbcTurnOffFbcWatermark:cfl * Display WA #0562: cfl @@ -7216,6 +7226,10 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) { gen9_init_clock_gating(dev_priv); + /* WAC6entrylatency:kbl */ + I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | + FBC_LLC_FULLY_OPEN); + /* WaDisableSDEUnitClockGating:kbl */ if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)) I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | @@ -7590,7 +7604,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_GEN(dev_priv, 12)) + if (IS_DG1(dev_priv)) + dev_priv->display.init_clock_gating = dg1_init_clock_gating; + else if (IS_GEN(dev_priv, 12)) dev_priv->display.init_clock_gating = tgl_init_clock_gating; else if (IS_GEN(dev_priv, 11)) dev_priv->display.init_clock_gating = icl_init_clock_gating; diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index a2473594c2db..eab83e251dd5 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -49,8 +49,6 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state); -int intel_enable_sagv(struct drm_i915_private *dev_priv); -int intel_disable_sagv(struct drm_i915_private *dev_priv); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); void intel_sagv_post_plane_update(struct intel_atomic_state *state); bool skl_wm_level_equals(const struct skl_wm_level *l1, diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 916ccd1c0e96..02ebf5a04a9b 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -231,9 +231,21 @@ void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val) SB_CRWRDA_NP, reg, &val); } +static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy) +{ + /* + * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D) + * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C) + */ + if (IS_CHERRYVIEW(i915)) + return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO; + else + return IOSF_PORT_DPIO; +} + u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) { - int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)]; + u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); u32 val = 0; vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val); @@ -252,7 +264,7 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) void vlv_dpio_write(struct drm_i915_private *i915, enum pipe pipe, int reg, u32 val) { - int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)]; + u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val); } @@ -543,3 +555,18 @@ out: return ret ? ret : status; #undef COND } + +void intel_pcode_init(struct drm_i915_private *i915) +{ + int ret; + + if (!IS_DGFX(i915)) + return; + + ret = skl_pcode_request(i915, DG1_PCODE_STATUS, + DG1_UNCORE_GET_INIT_STATUS, + DG1_UNCORE_INIT_STATUS_COMPLETE, + DG1_UNCORE_INIT_STATUS_COMPLETE, 50); + if (ret) + drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n"); +} diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h index 7fb95745a444..094c7b19c5d4 100644 --- a/drivers/gpu/drm/i915/intel_sideband.h +++ b/drivers/gpu/drm/i915/intel_sideband.h @@ -138,4 +138,6 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms); +void intel_pcode_init(struct drm_i915_private *i915); + #endif /* _INTEL_SIDEBAND_H */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8d5a933e6af6..1c14a07eba7d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1051,37 +1051,37 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = { /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ static const struct intel_forcewake_range __gen9_fw_ranges[] = { - GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT), GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT), GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT), GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT), GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT), GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), - GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT), GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT), GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), - GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT), GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT), GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), - GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT), GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), - GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT), GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), - GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT), GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT), GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), }; @@ -1089,33 +1089,33 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = { static const struct intel_forcewake_range __gen11_fw_ranges[] = { GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT), GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT), GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT), GEN_FW_RANGE(0x8800, 0x8bff, 0), GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT), GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), GEN_FW_RANGE(0x9560, 0x95ff, 0), - GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT), GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT), GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT), GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT), GEN_FW_RANGE(0x24000, 0x2407f, 0), - GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT), GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT), GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT), GEN_FW_RANGE(0x40000, 0x1bffff, 0), GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0), @@ -1124,44 +1124,113 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = { GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0) }; -/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ +/* + * *Must* be sorted by offset ranges! See intel_fw_table_check(). + * + * Note that the spec lists several reserved/unused ranges that don't + * actually contain any registers. In the table below we'll combine those + * reserved ranges with either the preceding or following range to keep the + * table small and lookups fast. + */ static const struct intel_forcewake_range __gen12_fw_ranges[] = { - GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ + GEN_FW_RANGE(0x0, 0x1fff, 0), /* + 0x0 - 0xaff: reserved + 0xb00 - 0x1fff: always on */ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT), + GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT), GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /* + 0x4000 - 0x48ff: gt + 0x4900 - 0x51ff: reserved */ + GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /* + 0x5200 - 0x53ff: render + 0x5400 - 0x54ff: reserved + 0x5500 - 0x7fff: render */ + GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8160, 0x81ff, 0), /* + 0x8160 - 0x817f: reserved + 0x8180 - 0x81ff: always on */ + GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), - GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /* + 0x8500 - 0x87ff: gt + 0x8800 - 0x8fff: reserved + 0x9000 - 0x947f: gt + 0x9480 - 0x94cf: reserved */ + GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x9560, 0x97ff, 0), /* + 0x9560 - 0x95ff: always on + 0x9600 - 0x97ff: reserved */ + GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT), + GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /* + 0xb400 - 0xbf7f: gt + 0xb480 - 0xbfff: reserved + 0xc000 - 0xcfff: gt */ + GEN_FW_RANGE(0xd000, 0xd7ff, 0), + GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT), + GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /* + 0xdc00 - 0xddff: render + 0xde00 - 0xde7f: reserved + 0xde80 - 0xe8ff: render + 0xe900 - 0xefff: reserved */ + GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /* + 0xf000 - 0xffff: gt + 0x10000 - 0x147ff: reserved */ + GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /* + 0x14800 - 0x14fff: render + 0x15000 - 0x16dff: reserved + 0x16e00 - 0x1bfff: render + 0x1c000 - 0x1ffff: reserved */ + GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0), + GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2), + GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), + GEN_FW_RANGE(0x24000, 0x2417f, 0), /* + 0x24000 - 0x2407f: always on + 0x24080 - 0x2417f: reserved */ + GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* + 0x24180 - 0x241ff: gt + 0x24200 - 0x249ff: reserved */ + GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* + 0x24a00 - 0x24a7f: render + 0x24a80 - 0x251ff: reserved */ + GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /* + 0x25200 - 0x252ff: gt + 0x25300 - 0x255ff: reserved */ + GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0), + GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /* + 0x25680 - 0x256ff: VD2 + 0x25700 - 0x259ff: reserved */ + GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0), + GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /* + 0x25a80 - 0x25aff: VD2 + 0x25b00 - 0x2ffff: reserved */ + GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), GEN_FW_RANGE(0x40000, 0x1bffff, 0), - GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), - GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), - GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), - GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), - GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), - GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) + GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* + 0x1c0000 - 0x1c2bff: VD0 + 0x1c2c00 - 0x1c2cff: reserved + 0x1c2d00 - 0x1c2dff: VD0 + 0x1c2e00 - 0x1c3eff: reserved + 0x1c3f00 - 0x1c3fff: VD0 */ + GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0), + GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* + 0x1c8000 - 0x1ca0ff: VE0 + 0x1ca100 - 0x1cbeff: reserved + 0x1cbf00 - 0x1cbfff: VE0 */ + GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /* + 0x1cc000 - 0x1ccfff: VD0 + 0x1cd000 - 0x1cffff: reserved */ + GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* + 0x1d0000 - 0x1d2bff: VD2 + 0x1d2c00 - 0x1d2cff: reserved + 0x1d2d00 - 0x1d2dff: VD2 + 0x1d2e00 - 0x1d3eff: reserved + 0x1d3f00 - 0x1d3fff: VD2 */ }; static void @@ -1209,6 +1278,18 @@ unclaimed_reg_debug(struct intel_uncore *uncore, spin_unlock(&uncore->debug->lock); } +#define __vgpu_read(x) \ +static u##x \ +vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ + u##x val = __raw_uncore_read##x(uncore, reg); \ + trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ + return val; \ +} +__vgpu_read(8) +__vgpu_read(16) +__vgpu_read(32) +__vgpu_read(64) + #define GEN2_READ_HEADER(x) \ u##x val = 0; \ assert_rpm_wakelock_held(uncore->rpm); @@ -1414,6 +1495,16 @@ __gen_reg_write_funcs(gen8); #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER +#define __vgpu_write(x) \ +static void \ +vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ + trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ + __raw_uncore_write##x(uncore, reg, val); \ +} +__vgpu_write(8) +__vgpu_write(16) +__vgpu_write(32) + #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ do { \ (uncore)->funcs.mmio_writeb = x##_write8; \ @@ -1469,7 +1560,7 @@ static int __fw_domain_init(struct intel_uncore *uncore, d->id = domain_id; BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); - BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); + BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT)); BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0)); BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1)); @@ -1538,9 +1629,9 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); - fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, - FORCEWAKE_BLITTER_GEN9, - FORCEWAKE_ACK_BLITTER_GEN9); + fw_domain_init(uncore, FW_DOMAIN_ID_GT, + FORCEWAKE_GT_GEN9, + FORCEWAKE_ACK_GT_GEN9); for (i = 0; i < I915_MAX_VCS; i++) { if (!__HAS_ENGINE(emask, _VCS(i))) @@ -1564,9 +1655,9 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); - fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, - FORCEWAKE_BLITTER_GEN9, - FORCEWAKE_ACK_BLITTER_GEN9); + fw_domain_init(uncore, FW_DOMAIN_ID_GT, + FORCEWAKE_GT_GEN9, + FORCEWAKE_ACK_GT_GEN9); fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { @@ -1701,11 +1792,15 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) * clobbering the GTT which we want ioremap_wc instead. Fortunately, * the register BAR remains the same size for all the earlier * generations up to Ironlake. + * For dgfx chips register range is expanded to 4MB. */ if (INTEL_GEN(i915) < 5) mmio_size = 512 * 1024; + else if (IS_DGFX(i915)) + mmio_size = 4 * 1024 * 1024; else mmio_size = 2 * 1024 * 1024; + uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); if (uncore->regs == NULL) { drm_err(&i915->drm, "failed to map registers\n"); @@ -1735,7 +1830,10 @@ static void uncore_raw_init(struct intel_uncore *uncore) { GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); - if (IS_GEN(uncore->i915, 5)) { + if (intel_vgpu_active(uncore->i915)) { + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu); + } else if (IS_GEN(uncore->i915, 5)) { ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); } else { @@ -1993,13 +2091,14 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore, unsigned int slow_timeout_ms, u32 *out_value) { - u32 reg_value; + u32 reg_value = 0; #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) int ret; /* Catch any overuse of this function */ might_sleep_if(slow_timeout_ms); GEM_BUG_ON(fast_timeout_us > 20000); + GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms); ret = -ETIMEDOUT; if (fast_timeout_us && fast_timeout_us <= 20000) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index c4b22d9d0b45..bd2467284295 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -46,7 +46,7 @@ struct intel_uncore_mmio_debug { enum forcewake_domain_id { FW_DOMAIN_ID_RENDER = 0, - FW_DOMAIN_ID_BLITTER, + FW_DOMAIN_ID_GT, /* also includes blitter engine */ FW_DOMAIN_ID_MEDIA, FW_DOMAIN_ID_MEDIA_VDBOX0, FW_DOMAIN_ID_MEDIA_VDBOX1, @@ -60,7 +60,7 @@ enum forcewake_domain_id { enum forcewake_domains { FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), - FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), + FORCEWAKE_GT = BIT(FW_DOMAIN_ID_GT), FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0), FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1), diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 028baae9631f..f88473d396f4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -536,7 +536,7 @@ int i915_gem_evict_mock_selftests(void) with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = i915_subtests(tests, &i915->gt); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index af8205a2bd8f..c53a222e3dec 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1727,7 +1727,7 @@ int i915_gem_gtt_mock_selftests(void) mock_fini_ggtt(ggtt); kfree(ggtt); out_put: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 3092ca763789..64bbb8288249 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -527,7 +527,7 @@ int i915_request_mock_selftests(void) with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = i915_subtests(tests, i915); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 88c5e9acb84c..1b6125e4c1ac 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -841,7 +841,7 @@ int i915_vma_mock_selftests(void) mock_fini_ggtt(ggtt); kfree(ggtt); out_put: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 93a38a323584..0aeba8e3af28 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -261,6 +261,82 @@ err_close_objects: return err; } +static int igt_mock_splintered_region(void *arg) +{ + struct intel_memory_region *mem = arg; + struct drm_i915_private *i915 = mem->i915; + struct drm_i915_gem_object *obj; + unsigned int expected_order; + LIST_HEAD(objects); + u64 size; + int err = 0; + + /* + * Sanity check we can still allocate everything even if the + * mm.max_order != mm.size. i.e our starting address space size is not a + * power-of-two. + */ + + size = (SZ_4G - 1) & PAGE_MASK; + mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + if (mem->mm.size != size) { + pr_err("%s size mismatch(%llu != %llu)\n", + __func__, mem->mm.size, size); + err = -EINVAL; + goto out_put; + } + + expected_order = get_order(rounddown_pow_of_two(size)); + if (mem->mm.max_order != expected_order) { + pr_err("%s order mismatch(%u != %u)\n", + __func__, mem->mm.max_order, expected_order); + err = -EINVAL; + goto out_put; + } + + obj = igt_object_create(mem, &objects, size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_close; + } + + close_objects(mem, &objects); + + /* + * While we should be able allocate everything without any flag + * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are + * actually limited to the largest power-of-two for the region size i.e + * max_order, due to the inner workings of the buddy allocator. So make + * sure that does indeed hold true. + */ + + obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS); + if (!IS_ERR(obj)) { + pr_err("%s too large contiguous allocation was not rejected\n", + __func__); + err = -EINVAL; + goto out_close; + } + + obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size), + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + pr_err("%s largest possible contiguous allocation failed\n", + __func__); + err = PTR_ERR(obj); + goto out_close; + } + +out_close: + close_objects(mem, &objects); +out_put: + intel_memory_region_put(mem); + return err; +} + static int igt_gpu_write_dw(struct intel_context *ce, struct i915_vma *vma, u32 dword, @@ -771,6 +847,7 @@ int intel_memory_region_mock_selftests(void) static const struct i915_subtest tests[] = { SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), + SUBTEST(igt_mock_splintered_region), }; struct intel_memory_region *mem; struct drm_i915_private *i915; @@ -791,7 +868,7 @@ int intel_memory_region_mock_selftests(void) intel_memory_region_put(mem); out_unref: - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return err; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index f127e633f7ca..9220c9d1a4b7 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -79,17 +79,12 @@ static void mock_device_release(struct drm_device *dev) out: i915_params_free(&i915->params); - put_device(&i915->drm.pdev->dev); - i915->drm.pdev = NULL; } static struct drm_driver mock_driver = { .name = "mock", .driver_features = DRIVER_GEM, .release = mock_device_release, - - .gem_close_object = i915_gem_close_object, - .gem_free_object_unlocked = i915_gem_free_object, }; static void release_dev(struct device *dev) @@ -118,22 +113,15 @@ static struct dev_pm_domain pm_domain = { struct drm_i915_private *mock_gem_device(void) { - struct drm_i915_private *i915; - struct pci_dev *pdev; #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - struct dev_iommu iommu; + static struct dev_iommu fake_iommu = { .priv = (void *)-1 }; #endif - int err; + struct drm_i915_private *i915; + struct pci_dev *pdev; pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); if (!pdev) return NULL; - i915 = kzalloc(sizeof(*i915), GFP_KERNEL); - if (!i915) { - kfree(pdev); - return NULL; - } - device_initialize(&pdev->dev); pdev->class = PCI_BASE_CLASS_DISPLAY << 16; pdev->dev.release = release_dev; @@ -141,13 +129,26 @@ struct drm_i915_private *mock_gem_device(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */ - memset(&iommu, 0, sizeof(iommu)); - iommu.priv = (void *)-1; - pdev->dev.iommu = &iommu; + /* HACK to disable iommu for the fake device; force identity mapping */ + pdev->dev.iommu = &fake_iommu; #endif + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + put_device(&pdev->dev); + return NULL; + } + + i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver, + struct drm_i915_private, drm); + if (IS_ERR(i915)) { + pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915)); + devres_release_group(&pdev->dev, NULL); + put_device(&pdev->dev); + + return NULL; + } pci_set_drvdata(pdev, i915); + i915->drm.pdev = pdev; dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); @@ -155,16 +156,6 @@ struct drm_i915_private *mock_gem_device(void) if (pm_runtime_enabled(&pdev->dev)) WARN_ON(pm_runtime_get_sync(&pdev->dev)); - err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); - if (err) { - pr_err("Failed to initialise mock GEM device: err=%d\n", err); - put_device(&pdev->dev); - kfree(i915); - - return NULL; - } - i915->drm.pdev = pdev; - drmm_add_final_kfree(&i915->drm, i915); i915_params_copy(&i915->params, &i915_modparams); @@ -224,7 +215,15 @@ err_drv: intel_gt_driver_late_release(&i915->gt); intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); - drm_dev_put(&i915->drm); + mock_destroy_device(i915); return NULL; } + +void mock_destroy_device(struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + + devres_release_group(dev, NULL); + put_device(dev); +} diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h index b5dc4e394555..953cfe4fab34 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.h +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h @@ -7,4 +7,6 @@ struct drm_i915_private; struct drm_i915_private *mock_gem_device(void); void mock_device_flush(struct drm_i915_private *i915); +void mock_destroy_device(struct drm_i915_private *i915); + #endif /* !__MOCK_GEM_DEVICE_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 09660f5a0a4c..979d96f27c43 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -24,7 +24,7 @@ mock_object_create(struct intel_memory_region *mem, struct drm_i915_private *i915 = mem->i915; struct drm_i915_gem_object *obj; - if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size) + if (size > mem->mm.size) return ERR_PTR(-E2BIG); obj = i915_gem_object_alloc(); diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 207bf7409dfb..6231048aa5aa 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -39,3 +39,5 @@ config DRM_IMX_HDMI depends on DRM_IMX help Choose this if you want to use HDMI on i.MX6. + +source "drivers/gpu/drm/imx/dcss/Kconfig" diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile index 21cdcc2faabc..b644deffe948 100644 --- a/drivers/gpu/drm/imx/Makefile +++ b/drivers/gpu/drm/imx/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o +obj-$(CONFIG_DRM_IMX_DCSS) += dcss/ diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig new file mode 100644 index 000000000000..2b17a964ff05 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -0,0 +1,9 @@ +config DRM_IMX_DCSS + tristate "i.MX8MQ DCSS" + select IMX_IRQSTEER + select DRM_KMS_CMA_HELPER + select VIDEOMODE_HELPERS + depends on DRM && ARCH_MXC && ARM64 + help + Choose this if you have a NXP i.MX8MQ based system and want to use the + Display Controller Subsystem. This option enables DCSS support. diff --git a/drivers/gpu/drm/imx/dcss/Makefile b/drivers/gpu/drm/imx/dcss/Makefile new file mode 100644 index 000000000000..8c7c8da42792 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/Makefile @@ -0,0 +1,6 @@ +imx-dcss-objs := dcss-drv.o dcss-dev.o dcss-blkctl.o dcss-ctxld.o dcss-dtg.o \ + dcss-ss.o dcss-dpr.o dcss-scaler.o dcss-kms.o dcss-crtc.o \ + dcss-plane.o + +obj-$(CONFIG_DRM_IMX_DCSS) += imx-dcss.o + diff --git a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c new file mode 100644 index 000000000000..c9b54bb2692d --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/device.h> +#include <linux/of.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_BLKCTL_RESET_CTRL 0x00 +#define B_CLK_RESETN BIT(0) +#define APB_CLK_RESETN BIT(1) +#define P_CLK_RESETN BIT(2) +#define RTR_CLK_RESETN BIT(4) +#define DCSS_BLKCTL_CONTROL0 0x10 +#define HDMI_MIPI_CLK_SEL BIT(0) +#define DISPMIX_REFCLK_SEL_POS 4 +#define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4) +#define DISPMIX_PIXCLK_SEL BIT(8) +#define HDMI_SRC_SECURE_EN BIT(16) + +struct dcss_blkctl { + struct dcss_dev *dcss; + void __iomem *base_reg; +}; + +void dcss_blkctl_cfg(struct dcss_blkctl *blkctl) +{ + if (blkctl->dcss->hdmi_output) + dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0); + else + dcss_writel(DISPMIX_PIXCLK_SEL, + blkctl->base_reg + DCSS_BLKCTL_CONTROL0); + + dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN, + blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL); +} + +int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base) +{ + struct dcss_blkctl *blkctl; + + blkctl = kzalloc(sizeof(*blkctl), GFP_KERNEL); + if (!blkctl) + return -ENOMEM; + + blkctl->base_reg = ioremap(blkctl_base, SZ_4K); + if (!blkctl->base_reg) { + dev_err(dcss->dev, "unable to remap BLK CTRL base\n"); + kfree(blkctl); + return -ENOMEM; + } + + dcss->blkctl = blkctl; + blkctl->dcss = dcss; + + dcss_blkctl_cfg(blkctl); + + return 0; +} + +void dcss_blkctl_exit(struct dcss_blkctl *blkctl) +{ + if (blkctl->base_reg) + iounmap(blkctl->base_reg); + + kfree(blkctl); +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c new file mode 100644 index 000000000000..31267c00782f --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_vblank.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +static int dcss_enable_vblank(struct drm_crtc *crtc) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = crtc->dev->dev_private; + + dcss_dtg_vblank_irq_enable(dcss->dtg, true); + + dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true); + + enable_irq(dcss_crtc->irq); + + return 0; +} + +static void dcss_disable_vblank(struct drm_crtc *crtc) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + + disable_irq_nosync(dcss_crtc->irq); + + dcss_dtg_vblank_irq_enable(dcss->dtg, false); + + if (dcss_crtc->disable_ctxld_kick_irq) + dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false); +} + +static const struct drm_crtc_funcs dcss_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = dcss_enable_vblank, + .disable_vblank = dcss_disable_vblank, +}; + +static void dcss_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + drm_crtc_vblank_on(crtc); +} + +static void dcss_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc)); + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); + + if (dcss_dtg_is_enabled(dcss->dtg)) + dcss_ctxld_enable(dcss->ctxld); +} + +static void dcss_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode; + struct videomode vm; + + drm_display_mode_to_videomode(mode, &vm); + + pm_runtime_get_sync(dcss->dev); + + vm.pixelclock = mode->crtc_clock * 1000; + + dcss_ss_subsam_set(dcss->ss); + dcss_dtg_css_set(dcss->dtg); + + if (!drm_mode_equal(mode, old_mode) || !old_crtc_state->active) { + dcss_dtg_sync_set(dcss->dtg, &vm); + dcss_ss_sync_set(dcss->ss, &vm, + mode->flags & DRM_MODE_FLAG_PHSYNC, + mode->flags & DRM_MODE_FLAG_PVSYNC); + } + + dcss_enable_dtg_and_ss(dcss); + + dcss_ctxld_enable(dcss->ctxld); + + /* Allow CTXLD kick interrupt to be disabled when VBLANK is disabled. */ + dcss_crtc->disable_ctxld_kick_irq = true; +} + +static void dcss_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); + struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, + base); + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode; + + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); + + dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true); + + reinit_completion(&dcss->disable_completion); + + dcss_disable_dtg_and_ss(dcss); + + dcss_ctxld_enable(dcss->ctxld); + + if (!drm_mode_equal(mode, old_mode) || !crtc->state->active) + if (!wait_for_completion_timeout(&dcss->disable_completion, + msecs_to_jiffies(100))) + dev_err(dcss->dev, "Shutting off DTG timed out.\n"); + + /* + * Do not shut off CTXLD kick interrupt when shutting VBLANK off. It + * will be needed to commit the last changes, before going to suspend. + */ + dcss_crtc->disable_ctxld_kick_irq = false; + + drm_crtc_vblank_off(crtc); + + pm_runtime_mark_last_busy(dcss->dev); + pm_runtime_put_autosuspend(dcss->dev); +} + +static const struct drm_crtc_helper_funcs dcss_helper_funcs = { + .atomic_begin = dcss_crtc_atomic_begin, + .atomic_flush = dcss_crtc_atomic_flush, + .atomic_enable = dcss_crtc_atomic_enable, + .atomic_disable = dcss_crtc_atomic_disable, +}; + +static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id) +{ + struct dcss_crtc *dcss_crtc = dev_id; + struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; + + if (!dcss_dtg_vblank_irq_valid(dcss->dtg)) + return IRQ_NONE; + + if (dcss_ctxld_is_flushed(dcss->ctxld)) + drm_crtc_handle_vblank(&dcss_crtc->base); + + dcss_dtg_vblank_irq_clear(dcss->dtg); + + return IRQ_HANDLED; +} + +int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm) +{ + struct dcss_dev *dcss = drm->dev_private; + struct platform_device *pdev = to_platform_device(dcss->dev); + int ret; + + crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base), + DRM_PLANE_TYPE_PRIMARY, 0); + if (IS_ERR(crtc->plane[0])) + return PTR_ERR(crtc->plane[0]); + + crtc->base.port = dcss->of_port; + + drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs); + ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base, + NULL, &dcss_crtc_funcs, NULL); + if (ret) { + dev_err(dcss->dev, "failed to init crtc\n"); + return ret; + } + + crtc->irq = platform_get_irq_byname(pdev, "vblank"); + if (crtc->irq < 0) + return crtc->irq; + + ret = request_irq(crtc->irq, dcss_crtc_irq_handler, + 0, "dcss_drm", crtc); + if (ret) { + dev_err(dcss->dev, "irq request failed with %d.\n", ret); + return ret; + } + + disable_irq(crtc->irq); + + return 0; +} + +void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm) +{ + free_irq(crtc->irq, crtc); +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c new file mode 100644 index 000000000000..3a84cb3209c4 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_CTXLD_CONTROL_STATUS 0x0 +#define CTXLD_ENABLE BIT(0) +#define ARB_SEL BIT(1) +#define RD_ERR_EN BIT(2) +#define DB_COMP_EN BIT(3) +#define SB_HP_COMP_EN BIT(4) +#define SB_LP_COMP_EN BIT(5) +#define DB_PEND_SB_REC_EN BIT(6) +#define SB_PEND_DISP_ACTIVE_EN BIT(7) +#define AHB_ERR_EN BIT(8) +#define RD_ERR BIT(16) +#define DB_COMP BIT(17) +#define SB_HP_COMP BIT(18) +#define SB_LP_COMP BIT(19) +#define DB_PEND_SB_REC BIT(20) +#define SB_PEND_DISP_ACTIVE BIT(21) +#define AHB_ERR BIT(22) +#define DCSS_CTXLD_DB_BASE_ADDR 0x10 +#define DCSS_CTXLD_DB_COUNT 0x14 +#define DCSS_CTXLD_SB_BASE_ADDR 0x18 +#define DCSS_CTXLD_SB_COUNT 0x1C +#define SB_HP_COUNT_POS 0 +#define SB_HP_COUNT_MASK 0xffff +#define SB_LP_COUNT_POS 16 +#define SB_LP_COUNT_MASK 0xffff0000 +#define DCSS_AHB_ERR_ADDR 0x20 + +#define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP) +#define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR) + +/* The following sizes are in context loader entries, 8 bytes each. */ +#define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */ +#define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */ +#define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */ +#define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \ + CTXLD_SB_HP_CTX_ENTRIES) + +/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */ +static u16 dcss_ctxld_ctx_size[3] = { + CTXLD_DB_CTX_ENTRIES, + CTXLD_SB_HP_CTX_ENTRIES, + CTXLD_SB_LP_CTX_ENTRIES +}; + +/* this represents an entry in the context loader map */ +struct dcss_ctxld_item { + u32 val; + u32 ofs; +}; + +#define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item) + +struct dcss_ctxld { + struct device *dev; + void __iomem *ctxld_reg; + int irq; + bool irq_en; + + struct dcss_ctxld_item *db[2]; + struct dcss_ctxld_item *sb_hp[2]; + struct dcss_ctxld_item *sb_lp[2]; + + dma_addr_t db_paddr[2]; + dma_addr_t sb_paddr[2]; + + u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */ + u8 current_ctx; + + bool in_use; + bool armed; + + spinlock_t lock; /* protects concurent access to private data */ +}; + +static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data) +{ + struct dcss_ctxld *ctxld = data; + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev); + u32 irq_status; + + irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); + + if (irq_status & CTXLD_IRQ_COMPLETION && + !(irq_status & CTXLD_ENABLE) && ctxld->in_use) { + ctxld->in_use = false; + + if (dcss && dcss->disable_callback) + dcss->disable_callback(dcss); + } else if (irq_status & CTXLD_IRQ_ERROR) { + /* + * Except for throwing an error message and clearing the status + * register, there's not much we can do here. + */ + dev_err(ctxld->dev, "ctxld: error encountered: %08x\n", + irq_status); + dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n", + ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB], + ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP], + ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]); + } + + dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION), + ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); + + return IRQ_HANDLED; +} + +static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld, + struct platform_device *pdev) +{ + int ret; + + ctxld->irq = platform_get_irq_byname(pdev, "ctxld"); + if (ctxld->irq < 0) + return ctxld->irq; + + ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler, + 0, "dcss_ctxld", ctxld); + if (ret) { + dev_err(ctxld->dev, "ctxld: irq request failed.\n"); + return ret; + } + + ctxld->irq_en = true; + + return 0; +} + +static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld) +{ + dcss_writel(RD_ERR_EN | SB_HP_COMP_EN | + DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR, + ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); +} + +static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld) +{ + struct dcss_ctxld_item *ctx; + int i; + + for (i = 0; i < 2; i++) { + if (ctxld->db[i]) { + dma_free_coherent(ctxld->dev, + CTXLD_DB_CTX_ENTRIES * sizeof(*ctx), + ctxld->db[i], ctxld->db_paddr[i]); + ctxld->db[i] = NULL; + ctxld->db_paddr[i] = 0; + } + + if (ctxld->sb_hp[i]) { + dma_free_coherent(ctxld->dev, + CTXLD_SB_CTX_ENTRIES * sizeof(*ctx), + ctxld->sb_hp[i], ctxld->sb_paddr[i]); + ctxld->sb_hp[i] = NULL; + ctxld->sb_paddr[i] = 0; + } + } +} + +static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld) +{ + struct dcss_ctxld_item *ctx; + int i; + + for (i = 0; i < 2; i++) { + ctx = dma_alloc_coherent(ctxld->dev, + CTXLD_DB_CTX_ENTRIES * sizeof(*ctx), + &ctxld->db_paddr[i], GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctxld->db[i] = ctx; + + ctx = dma_alloc_coherent(ctxld->dev, + CTXLD_SB_CTX_ENTRIES * sizeof(*ctx), + &ctxld->sb_paddr[i], GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctxld->sb_hp[i] = ctx; + ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES; + } + + return 0; +} + +int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base) +{ + struct dcss_ctxld *ctxld; + int ret; + + ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL); + if (!ctxld) + return -ENOMEM; + + dcss->ctxld = ctxld; + ctxld->dev = dcss->dev; + + spin_lock_init(&ctxld->lock); + + ret = dcss_ctxld_alloc_ctx(ctxld); + if (ret) { + dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n"); + goto err; + } + + ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K); + if (!ctxld->ctxld_reg) { + dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n"); + ret = -ENOMEM; + goto err; + } + + ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev)); + if (ret) + goto err_irq; + + dcss_ctxld_hw_cfg(ctxld); + + return 0; + +err_irq: + iounmap(ctxld->ctxld_reg); + +err: + dcss_ctxld_free_ctx(ctxld); + kfree(ctxld); + + return ret; +} + +void dcss_ctxld_exit(struct dcss_ctxld *ctxld) +{ + free_irq(ctxld->irq, ctxld); + + if (ctxld->ctxld_reg) + iounmap(ctxld->ctxld_reg); + + dcss_ctxld_free_ctx(ctxld); + kfree(ctxld); +} + +static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld) +{ + int curr_ctx = ctxld->current_ctx; + u32 db_base, sb_base, sb_count; + u32 sb_hp_cnt, sb_lp_cnt, db_cnt; + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev); + + if (!dcss) + return 0; + + dcss_dpr_write_sysctrl(dcss->dpr); + + dcss_scaler_write_sclctrl(dcss->scaler); + + sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP]; + sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP]; + db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB]; + + /* make sure SB_LP context area comes after SB_HP */ + if (sb_lp_cnt && + ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) { + struct dcss_ctxld_item *sb_lp_adjusted; + + sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt; + + memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx], + sb_lp_cnt * CTX_ITEM_SIZE); + } + + db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0; + + dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR); + dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT); + + if (sb_hp_cnt) + sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) | + ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK); + else + sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK; + + sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0; + + dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR); + dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT); + + /* enable the context loader */ + dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); + + ctxld->in_use = true; + + /* + * Toggle the current context to the alternate one so that any updates + * in the modules' settings take place there. + */ + ctxld->current_ctx ^= 1; + + ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0; + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0; + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0; + + return 0; +} + +int dcss_ctxld_enable(struct dcss_ctxld *ctxld) +{ + spin_lock_irq(&ctxld->lock); + ctxld->armed = true; + spin_unlock_irq(&ctxld->lock); + + return 0; +} + +void dcss_ctxld_kick(struct dcss_ctxld *ctxld) +{ + unsigned long flags; + + spin_lock_irqsave(&ctxld->lock, flags); + if (ctxld->armed && !ctxld->in_use) { + ctxld->armed = false; + dcss_ctxld_enable_locked(ctxld); + } + spin_unlock_irqrestore(&ctxld->lock, flags); +} + +void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val, + u32 reg_ofs) +{ + int curr_ctx = ctxld->current_ctx; + struct dcss_ctxld_item *ctx[] = { + [CTX_DB] = ctxld->db[curr_ctx], + [CTX_SB_HP] = ctxld->sb_hp[curr_ctx], + [CTX_SB_LP] = ctxld->sb_lp[curr_ctx] + }; + int item_idx = ctxld->ctx_size[curr_ctx][ctx_id]; + + if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) { + WARN_ON(1); + return; + } + + ctx[ctx_id][item_idx].val = val; + ctx[ctx_id][item_idx].ofs = reg_ofs; + ctxld->ctx_size[curr_ctx][ctx_id] += 1; +} + +void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id, + u32 val, u32 reg_ofs) +{ + spin_lock_irq(&ctxld->lock); + dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs); + spin_unlock_irq(&ctxld->lock); +} + +bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld) +{ + return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 && + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 && + ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0; +} + +int dcss_ctxld_resume(struct dcss_ctxld *ctxld) +{ + dcss_ctxld_hw_cfg(ctxld); + + if (!ctxld->irq_en) { + enable_irq(ctxld->irq); + ctxld->irq_en = true; + } + + return 0; +} + +int dcss_ctxld_suspend(struct dcss_ctxld *ctxld) +{ + int ret = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(500); + + if (!dcss_ctxld_is_flushed(ctxld)) { + dcss_ctxld_kick(ctxld); + + while (!time_after(jiffies, timeout) && ctxld->in_use) + msleep(20); + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + } + + spin_lock_irq(&ctxld->lock); + + if (ctxld->irq_en) { + disable_irq_nosync(ctxld->irq); + ctxld->irq_en = false; + } + + /* reset context region and sizes */ + ctxld->current_ctx = 0; + ctxld->ctx_size[0][CTX_DB] = 0; + ctxld->ctx_size[0][CTX_SB_HP] = 0; + ctxld->ctx_size[0][CTX_SB_LP] = 0; + + spin_unlock_irq(&ctxld->lock); + + return ret; +} + +void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld) +{ + lockdep_assert_held(&ctxld->lock); +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c new file mode 100644 index 000000000000..c849533ca83e --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/clk.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_modeset_helper.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +static void dcss_clocks_enable(struct dcss_dev *dcss) +{ + clk_prepare_enable(dcss->axi_clk); + clk_prepare_enable(dcss->apb_clk); + clk_prepare_enable(dcss->rtrm_clk); + clk_prepare_enable(dcss->dtrc_clk); + clk_prepare_enable(dcss->pix_clk); +} + +static void dcss_clocks_disable(struct dcss_dev *dcss) +{ + clk_disable_unprepare(dcss->pix_clk); + clk_disable_unprepare(dcss->dtrc_clk); + clk_disable_unprepare(dcss->rtrm_clk); + clk_disable_unprepare(dcss->apb_clk); + clk_disable_unprepare(dcss->axi_clk); +} + +static void dcss_disable_dtg_and_ss_cb(void *data) +{ + struct dcss_dev *dcss = data; + + dcss->disable_callback = NULL; + + dcss_ss_shutoff(dcss->ss); + dcss_dtg_shutoff(dcss->dtg); + + complete(&dcss->disable_completion); +} + +void dcss_disable_dtg_and_ss(struct dcss_dev *dcss) +{ + dcss->disable_callback = dcss_disable_dtg_and_ss_cb; +} + +void dcss_enable_dtg_and_ss(struct dcss_dev *dcss) +{ + if (dcss->disable_callback) + dcss->disable_callback = NULL; + + dcss_dtg_enable(dcss->dtg); + dcss_ss_enable(dcss->ss); +} + +static int dcss_submodules_init(struct dcss_dev *dcss) +{ + int ret = 0; + u32 base_addr = dcss->start_addr; + const struct dcss_type_data *devtype = dcss->devtype; + + dcss_clocks_enable(dcss); + + ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs); + if (ret) + return ret; + + ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs); + if (ret) + goto ctxld_err; + + ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs); + if (ret) + goto dtg_err; + + ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs); + if (ret) + goto ss_err; + + ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs); + if (ret) + goto dpr_err; + + ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs); + if (ret) + goto scaler_err; + + dcss_clocks_disable(dcss); + + return 0; + +scaler_err: + dcss_dpr_exit(dcss->dpr); + +dpr_err: + dcss_ss_exit(dcss->ss); + +ss_err: + dcss_dtg_exit(dcss->dtg); + +dtg_err: + dcss_ctxld_exit(dcss->ctxld); + +ctxld_err: + dcss_blkctl_exit(dcss->blkctl); + + dcss_clocks_disable(dcss); + + return ret; +} + +static void dcss_submodules_stop(struct dcss_dev *dcss) +{ + dcss_clocks_enable(dcss); + dcss_scaler_exit(dcss->scaler); + dcss_dpr_exit(dcss->dpr); + dcss_ss_exit(dcss->ss); + dcss_dtg_exit(dcss->dtg); + dcss_ctxld_exit(dcss->ctxld); + dcss_blkctl_exit(dcss->blkctl); + dcss_clocks_disable(dcss); +} + +static int dcss_clks_init(struct dcss_dev *dcss) +{ + int i; + struct { + const char *id; + struct clk **clk; + } clks[] = { + {"apb", &dcss->apb_clk}, + {"axi", &dcss->axi_clk}, + {"pix", &dcss->pix_clk}, + {"rtrm", &dcss->rtrm_clk}, + {"dtrc", &dcss->dtrc_clk}, + }; + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + *clks[i].clk = devm_clk_get(dcss->dev, clks[i].id); + if (IS_ERR(*clks[i].clk)) { + dev_err(dcss->dev, "failed to get %s clock\n", + clks[i].id); + return PTR_ERR(*clks[i].clk); + } + } + + return 0; +} + +static void dcss_clks_release(struct dcss_dev *dcss) +{ + devm_clk_put(dcss->dev, dcss->dtrc_clk); + devm_clk_put(dcss->dev, dcss->rtrm_clk); + devm_clk_put(dcss->dev, dcss->pix_clk); + devm_clk_put(dcss->dev, dcss->axi_clk); + devm_clk_put(dcss->dev, dcss->apb_clk); +} + +struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) +{ + struct platform_device *pdev = to_platform_device(dev); + int ret; + struct resource *res; + struct dcss_dev *dcss; + const struct dcss_type_data *devtype; + + devtype = of_device_get_match_data(dev); + if (!devtype) { + dev_err(dev, "no device match found\n"); + return ERR_PTR(-ENODEV); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "cannot get memory resource\n"); + return ERR_PTR(-EINVAL); + } + + dcss = kzalloc(sizeof(*dcss), GFP_KERNEL); + if (!dcss) + return ERR_PTR(-ENOMEM); + + dcss->dev = dev; + dcss->devtype = devtype; + dcss->hdmi_output = hdmi_output; + + ret = dcss_clks_init(dcss); + if (ret) { + dev_err(dev, "clocks initialization failed\n"); + goto err; + } + + dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0); + if (!dcss->of_port) { + dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name); + ret = -ENODEV; + goto clks_err; + } + + dcss->start_addr = res->start; + + ret = dcss_submodules_init(dcss); + if (ret) { + dev_err(dev, "submodules initialization failed\n"); + goto clks_err; + } + + init_completion(&dcss->disable_completion); + + pm_runtime_set_autosuspend_delay(dev, 100); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_suspended(dev); + pm_runtime_allow(dev); + pm_runtime_enable(dev); + + return dcss; + +clks_err: + dcss_clks_release(dcss); + +err: + kfree(dcss); + + return ERR_PTR(ret); +} + +void dcss_dev_destroy(struct dcss_dev *dcss) +{ + if (!pm_runtime_suspended(dcss->dev)) { + dcss_ctxld_suspend(dcss->ctxld); + dcss_clocks_disable(dcss); + } + + pm_runtime_disable(dcss->dev); + + dcss_submodules_stop(dcss); + + dcss_clks_release(dcss); + + kfree(dcss); +} + +#ifdef CONFIG_PM_SLEEP +int dcss_dev_suspend(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + struct drm_device *ddev = dcss_drv_dev_to_drm(dev); + struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base); + int ret; + + drm_bridge_connector_disable_hpd(kms->connector); + + drm_mode_config_helper_suspend(ddev); + + if (pm_runtime_suspended(dev)) + return 0; + + ret = dcss_ctxld_suspend(dcss->ctxld); + if (ret) + return ret; + + dcss_clocks_disable(dcss); + + return 0; +} + +int dcss_dev_resume(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + struct drm_device *ddev = dcss_drv_dev_to_drm(dev); + struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base); + + if (pm_runtime_suspended(dev)) { + drm_mode_config_helper_resume(ddev); + return 0; + } + + dcss_clocks_enable(dcss); + + dcss_blkctl_cfg(dcss->blkctl); + + dcss_ctxld_resume(dcss->ctxld); + + drm_mode_config_helper_resume(ddev); + + drm_bridge_connector_enable_hpd(kms->connector); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +int dcss_dev_runtime_suspend(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + int ret; + + ret = dcss_ctxld_suspend(dcss->ctxld); + if (ret) + return ret; + + dcss_clocks_disable(dcss); + + return 0; +} + +int dcss_dev_runtime_resume(struct device *dev) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); + + dcss_clocks_enable(dcss); + + dcss_blkctl_cfg(dcss->blkctl); + + dcss_ctxld_resume(dcss->ctxld); + + return 0; +} +#endif /* CONFIG_PM */ diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h new file mode 100644 index 000000000000..c642ae17837f --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 NXP. + */ + +#ifndef __DCSS_PRV_H__ +#define __DCSS_PRV_H__ + +#include <drm/drm_fourcc.h> +#include <linux/io.h> +#include <video/videomode.h> + +#define SET 0x04 +#define CLR 0x08 +#define TGL 0x0C + +#define dcss_writel(v, c) writel((v), (c)) +#define dcss_readl(c) readl(c) +#define dcss_set(v, c) writel((v), (c) + SET) +#define dcss_clr(v, c) writel((v), (c) + CLR) +#define dcss_toggle(v, c) writel((v), (c) + TGL) + +static inline void dcss_update(u32 v, u32 m, void __iomem *c) +{ + writel((readl(c) & ~(m)) | (v), (c)); +} + +#define DCSS_DBG_REG(reg) {.name = #reg, .ofs = reg} + +enum { + DCSS_IMX8MQ = 0, +}; + +struct dcss_type_data { + const char *name; + u32 blkctl_ofs; + u32 ctxld_ofs; + u32 rdsrc_ofs; + u32 wrscl_ofs; + u32 dtg_ofs; + u32 scaler_ofs; + u32 ss_ofs; + u32 dpr_ofs; + u32 dtrc_ofs; + u32 dec400d_ofs; + u32 hdr10_ofs; +}; + +struct dcss_debug_reg { + char *name; + u32 ofs; +}; + +enum dcss_ctxld_ctx_type { + CTX_DB, + CTX_SB_HP, /* high-priority */ + CTX_SB_LP, /* low-priority */ +}; + +struct dcss_dev { + struct device *dev; + const struct dcss_type_data *devtype; + struct device_node *of_port; + + u32 start_addr; + + struct dcss_blkctl *blkctl; + struct dcss_ctxld *ctxld; + struct dcss_dpr *dpr; + struct dcss_dtg *dtg; + struct dcss_ss *ss; + struct dcss_hdr10 *hdr10; + struct dcss_scaler *scaler; + struct dcss_dtrc *dtrc; + struct dcss_dec400d *dec400d; + struct dcss_wrscl *wrscl; + struct dcss_rdsrc *rdsrc; + + struct clk *apb_clk; + struct clk *axi_clk; + struct clk *pix_clk; + struct clk *rtrm_clk; + struct clk *dtrc_clk; + struct clk *pll_src_clk; + struct clk *pll_phy_ref_clk; + + bool hdmi_output; + + void (*disable_callback)(void *data); + struct completion disable_completion; +}; + +struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev); +struct drm_device *dcss_drv_dev_to_drm(struct device *dev); +struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output); +void dcss_dev_destroy(struct dcss_dev *dcss); +int dcss_dev_runtime_suspend(struct device *dev); +int dcss_dev_runtime_resume(struct device *dev); +int dcss_dev_suspend(struct device *dev); +int dcss_dev_resume(struct device *dev); +void dcss_enable_dtg_and_ss(struct dcss_dev *dcss); +void dcss_disable_dtg_and_ss(struct dcss_dev *dcss); + +/* BLKCTL */ +int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base); +void dcss_blkctl_cfg(struct dcss_blkctl *blkctl); +void dcss_blkctl_exit(struct dcss_blkctl *blkctl); + +/* CTXLD */ +int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base); +void dcss_ctxld_exit(struct dcss_ctxld *ctxld); +void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id, + u32 val, u32 reg_idx); +int dcss_ctxld_resume(struct dcss_ctxld *dcss_ctxld); +int dcss_ctxld_suspend(struct dcss_ctxld *dcss_ctxld); +void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctlxd, u32 ctx_id, u32 val, + u32 reg_ofs); +void dcss_ctxld_kick(struct dcss_ctxld *ctxld); +bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld); +int dcss_ctxld_enable(struct dcss_ctxld *ctxld); +void dcss_ctxld_register_completion(struct dcss_ctxld *ctxld, + struct completion *dis_completion); +void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld); + +/* DPR */ +int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base); +void dcss_dpr_exit(struct dcss_dpr *dpr); +void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr); +void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres); +void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr, + u32 chroma_base_addr, u16 pitch); +void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en); +void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num, + const struct drm_format_info *format, u64 modifier); +void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation); + +/* DTG */ +int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base); +void dcss_dtg_exit(struct dcss_dtg *dtg); +bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg); +void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en); +void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg); +void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm); +void dcss_dtg_css_set(struct dcss_dtg *dtg); +void dcss_dtg_enable(struct dcss_dtg *dtg); +void dcss_dtg_shutoff(struct dcss_dtg *dtg); +bool dcss_dtg_is_enabled(struct dcss_dtg *dtg); +void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en); +bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha); +void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num, + const struct drm_format_info *format, int alpha); +void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num, + int px, int py, int pw, int ph); +void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en); + +/* SUBSAM */ +int dcss_ss_init(struct dcss_dev *dcss, unsigned long subsam_base); +void dcss_ss_exit(struct dcss_ss *ss); +void dcss_ss_enable(struct dcss_ss *ss); +void dcss_ss_shutoff(struct dcss_ss *ss); +void dcss_ss_subsam_set(struct dcss_ss *ss); +void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, + bool phsync, bool pvsync); + +/* SCALER */ +int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base); +void dcss_scaler_exit(struct dcss_scaler *scl); +void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, + const struct drm_format_info *format, + int src_xres, int src_yres, int dst_xres, int dst_yres, + u32 vrefresh_hz); +void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en); +int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num, + int *min, int *max); +void dcss_scaler_write_sclctrl(struct dcss_scaler *scl); + +#endif /* __DCSS_PRV_H__ */ diff --git a/drivers/gpu/drm/imx/dcss/dcss-dpr.c b/drivers/gpu/drm/imx/dcss/dcss-dpr.c new file mode 100644 index 000000000000..df9dab949bf2 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dpr.c @@ -0,0 +1,562 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_DPR_SYSTEM_CTRL0 0x000 +#define RUN_EN BIT(0) +#define SOFT_RESET BIT(1) +#define REPEAT_EN BIT(2) +#define SHADOW_LOAD_EN BIT(3) +#define SW_SHADOW_LOAD_SEL BIT(4) +#define BCMD2AXI_MSTR_ID_CTRL BIT(16) +#define DCSS_DPR_IRQ_MASK 0x020 +#define DCSS_DPR_IRQ_MASK_STATUS 0x030 +#define DCSS_DPR_IRQ_NONMASK_STATUS 0x040 +#define IRQ_DPR_CTRL_DONE BIT(0) +#define IRQ_DPR_RUN BIT(1) +#define IRQ_DPR_SHADOW_LOADED BIT(2) +#define IRQ_AXI_READ_ERR BIT(3) +#define DPR2RTR_YRGB_FIFO_OVFL BIT(4) +#define DPR2RTR_UV_FIFO_OVFL BIT(5) +#define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6) +#define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7) +#define DCSS_DPR_MODE_CTRL0 0x050 +#define RTR_3BUF_EN BIT(0) +#define RTR_4LINE_BUF_EN BIT(1) +#define TILE_TYPE_POS 2 +#define TILE_TYPE_MASK GENMASK(4, 2) +#define YUV_EN BIT(6) +#define COMP_2PLANE_EN BIT(7) +#define PIX_SIZE_POS 8 +#define PIX_SIZE_MASK GENMASK(9, 8) +#define PIX_LUMA_UV_SWAP BIT(10) +#define PIX_UV_SWAP BIT(11) +#define B_COMP_SEL_POS 12 +#define B_COMP_SEL_MASK GENMASK(13, 12) +#define G_COMP_SEL_POS 14 +#define G_COMP_SEL_MASK GENMASK(15, 14) +#define R_COMP_SEL_POS 16 +#define R_COMP_SEL_MASK GENMASK(17, 16) +#define A_COMP_SEL_POS 18 +#define A_COMP_SEL_MASK GENMASK(19, 18) +#define DCSS_DPR_FRAME_CTRL0 0x070 +#define HFLIP_EN BIT(0) +#define VFLIP_EN BIT(1) +#define ROT_ENC_POS 2 +#define ROT_ENC_MASK GENMASK(3, 2) +#define ROT_FLIP_ORDER_EN BIT(4) +#define PITCH_POS 16 +#define PITCH_MASK GENMASK(31, 16) +#define DCSS_DPR_FRAME_1P_CTRL0 0x090 +#define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0 +#define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0 +#define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0 +#define DCSS_DPR_FRAME_2P_CTRL0 0x0E0 +#define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0 +#define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100 +#define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110 +#define DCSS_DPR_STATUS_CTRL0 0x130 +#define STATUS_MUX_SEL_MASK GENMASK(2, 0) +#define STATUS_SRC_SEL_POS 16 +#define STATUS_SRC_SEL_MASK GENMASK(18, 16) +#define DCSS_DPR_STATUS_CTRL1 0x140 +#define DCSS_DPR_RTRAM_CTRL0 0x200 +#define NUM_ROWS_ACTIVE BIT(0) +#define THRES_HIGH_POS 1 +#define THRES_HIGH_MASK GENMASK(3, 1) +#define THRES_LOW_POS 4 +#define THRES_LOW_MASK GENMASK(6, 4) +#define ABORT_SEL BIT(7) + +enum dcss_tile_type { + TILE_LINEAR = 0, + TILE_GPU_STANDARD, + TILE_GPU_SUPER, + TILE_VPU_YUV420, + TILE_VPU_VP9, +}; + +enum dcss_pix_size { + PIX_SIZE_8, + PIX_SIZE_16, + PIX_SIZE_32, +}; + +struct dcss_dpr_ch { + struct dcss_dpr *dpr; + void __iomem *base_reg; + u32 base_ofs; + + struct drm_format_info format; + enum dcss_pix_size pix_size; + enum dcss_tile_type tile; + bool rtram_4line_en; + bool rtram_3buf_en; + + u32 frame_ctrl; + u32 mode_ctrl; + u32 sys_ctrl; + u32 rtram_ctrl; + + bool sys_ctrl_chgd; + + int ch_num; + int irq; +}; + +struct dcss_dpr { + struct device *dev; + struct dcss_ctxld *ctxld; + u32 ctx_id; + + struct dcss_dpr_ch ch[3]; +}; + +static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs) +{ + struct dcss_dpr *dpr = ch->dpr; + + dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs); +} + +static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base) +{ + struct dcss_dpr_ch *ch; + int i; + + for (i = 0; i < 3; i++) { + ch = &dpr->ch[i]; + + ch->base_ofs = dpr_base + i * 0x1000; + + ch->base_reg = ioremap(ch->base_ofs, SZ_4K); + if (!ch->base_reg) { + dev_err(dpr->dev, "dpr: unable to remap ch %d base\n", + i); + return -ENOMEM; + } + + ch->dpr = dpr; + ch->ch_num = i; + + dcss_writel(0xff, ch->base_reg + DCSS_DPR_IRQ_MASK); + } + + return 0; +} + +int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base) +{ + struct dcss_dpr *dpr; + + dpr = kzalloc(sizeof(*dpr), GFP_KERNEL); + if (!dpr) + return -ENOMEM; + + dcss->dpr = dpr; + dpr->dev = dcss->dev; + dpr->ctxld = dcss->ctxld; + dpr->ctx_id = CTX_SB_HP; + + if (dcss_dpr_ch_init_all(dpr, dpr_base)) { + int i; + + for (i = 0; i < 3; i++) { + if (dpr->ch[i].base_reg) + iounmap(dpr->ch[i].base_reg); + } + + kfree(dpr); + + return -ENOMEM; + } + + return 0; +} + +void dcss_dpr_exit(struct dcss_dpr *dpr) +{ + int ch_no; + + /* stop DPR on all channels */ + for (ch_no = 0; ch_no < 3; ch_no++) { + struct dcss_dpr_ch *ch = &dpr->ch[ch_no]; + + dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0); + + if (ch->base_reg) + iounmap(ch->base_reg); + } + + kfree(dpr); +} + +static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide, + u32 pix_format) +{ + u8 pix_in_64byte_map[3][5] = { + /* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */ + { 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */ + { 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */ + { 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */ + }; + u32 offset; + u32 div_64byte_mod, pix_in_64byte; + + pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile]; + + div_64byte_mod = pix_wide % pix_in_64byte; + offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod); + + return pix_wide + offset; +} + +static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high, + u32 pix_format) +{ + u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8; + u32 offset, pix_y_mod; + + pix_y_mod = pix_high % num_rows_buf; + offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0; + + return pix_high + offset; +} + +void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + u32 pix_format = ch->format.format; + u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR; + int plane, max_planes = 1; + u32 pix_x_wide, pix_y_high; + + if (pix_format == DRM_FORMAT_NV12 || + pix_format == DRM_FORMAT_NV21) + max_planes = 2; + + for (plane = 0; plane < max_planes; plane++) { + yres = plane == 1 ? yres >> 1 : yres; + + pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format); + pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format); + + dcss_dpr_write(ch, pix_x_wide, + DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap); + dcss_dpr_write(ch, pix_y_high, + DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap); + + dcss_dpr_write(ch, 2, DCSS_DPR_FRAME_1P_CTRL0 + plane * gap); + } +} + +void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr, + u32 chroma_base_addr, u16 pitch) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + + dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR); + + dcss_dpr_write(ch, chroma_base_addr, DCSS_DPR_FRAME_2P_BASE_ADDR); + + ch->frame_ctrl &= ~PITCH_MASK; + ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK); +} + +static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel, + int g_sel, int b_sel) +{ + u32 sel; + + sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) | + ((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) | + ((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) | + ((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK); + + ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK | + G_COMP_SEL_MASK | B_COMP_SEL_MASK); + ch->mode_ctrl |= sel; +} + +static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch, + const struct drm_format_info *format) +{ + u32 val; + + switch (format->format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + val = PIX_SIZE_8; + break; + + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + val = PIX_SIZE_16; + break; + + default: + val = PIX_SIZE_32; + break; + } + + ch->pix_size = val; + + ch->mode_ctrl &= ~PIX_SIZE_MASK; + ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK); +} + +static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap) +{ + ch->mode_ctrl &= ~PIX_UV_SWAP; + ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0); +} + +static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap) +{ + ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP; + ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0); +} + +static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en) +{ + ch->mode_ctrl &= ~COMP_2PLANE_EN; + ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0); +} + +static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en) +{ + ch->mode_ctrl &= ~YUV_EN; + ch->mode_ctrl |= (en ? YUV_EN : 0); +} + +void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + u32 sys_ctrl; + + sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0); + + if (en) { + dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0); + dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0); + dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0); + } + + if (ch->sys_ctrl != sys_ctrl) + ch->sys_ctrl_chgd = true; + + ch->sys_ctrl = sys_ctrl; +} + +struct rgb_comp_sel { + u32 drm_format; + int a_sel; + int r_sel; + int g_sel; + int b_sel; +}; + +static struct rgb_comp_sel comp_sel_map[] = { + {DRM_FORMAT_ARGB8888, 3, 2, 1, 0}, + {DRM_FORMAT_XRGB8888, 3, 2, 1, 0}, + {DRM_FORMAT_ABGR8888, 3, 0, 1, 2}, + {DRM_FORMAT_XBGR8888, 3, 0, 1, 2}, + {DRM_FORMAT_RGBA8888, 0, 3, 2, 1}, + {DRM_FORMAT_RGBX8888, 0, 3, 2, 1}, + {DRM_FORMAT_BGRA8888, 0, 1, 2, 3}, + {DRM_FORMAT_BGRX8888, 0, 1, 2, 3}, +}; + +static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel, + int *b_sel) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) { + if (comp_sel_map[i].drm_format == pix_fmt) { + *a_sel = comp_sel_map[i].a_sel; + *r_sel = comp_sel_map[i].r_sel; + *g_sel = comp_sel_map[i].g_sel; + *b_sel = comp_sel_map[i].b_sel; + + return 0; + } + } + + return -1; +} + +static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format) +{ + u32 val, mask; + + switch (pix_format) { + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV12: + ch->rtram_3buf_en = true; + ch->rtram_4line_en = false; + break; + + default: + ch->rtram_3buf_en = true; + ch->rtram_4line_en = true; + break; + } + + val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0); + val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0); + mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN; + + ch->mode_ctrl &= ~mask; + ch->mode_ctrl |= (val & mask); + + val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE); + val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK; + val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK; + mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE; + + ch->rtram_ctrl &= ~mask; + ch->rtram_ctrl |= (val & mask); +} + +static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch, + const struct drm_format_info *format) +{ + int a_sel, r_sel, g_sel, b_sel; + bool uv_swap, y_uv_swap; + + switch (format->format) { + case DRM_FORMAT_YVYU: + uv_swap = true; + y_uv_swap = true; + break; + + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV21: + uv_swap = true; + y_uv_swap = false; + break; + + case DRM_FORMAT_YUYV: + uv_swap = false; + y_uv_swap = true; + break; + + default: + uv_swap = false; + y_uv_swap = false; + break; + } + + dcss_dpr_uv_swap(ch, uv_swap); + + dcss_dpr_y_uv_swap(ch, y_uv_swap); + + if (!format->is_yuv) { + if (!to_comp_sel(format->format, &a_sel, &r_sel, + &g_sel, &b_sel)) { + dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel); + } else { + dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0); + } + } else { + dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0); + } +} + +static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier) +{ + switch (ch->ch_num) { + case 0: + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + ch->tile = TILE_LINEAR; + break; + case DRM_FORMAT_MOD_VIVANTE_TILED: + ch->tile = TILE_GPU_STANDARD; + break; + case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED: + ch->tile = TILE_GPU_SUPER; + break; + default: + WARN_ON(1); + break; + } + break; + case 1: + case 2: + ch->tile = TILE_LINEAR; + break; + default: + WARN_ON(1); + return; + } + + ch->mode_ctrl &= ~TILE_TYPE_MASK; + ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK); +} + +void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num, + const struct drm_format_info *format, u64 modifier) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + + ch->format = *format; + + dcss_dpr_yuv_en(ch, format->is_yuv); + + dcss_dpr_pix_size_set(ch, format); + + dcss_dpr_setup_components(ch, format); + + dcss_dpr_2plane_en(ch, format->num_planes == 2); + + dcss_dpr_rtram_set(ch, format->format); + + dcss_dpr_tile_set(ch, modifier); +} + +/* This function will be called from interrupt context. */ +void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr) +{ + int chnum; + + dcss_ctxld_assert_locked(dpr->ctxld); + + for (chnum = 0; chnum < 3; chnum++) { + struct dcss_dpr_ch *ch = &dpr->ch[chnum]; + + if (ch->sys_ctrl_chgd) { + dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id, + ch->sys_ctrl, + ch->base_ofs + + DCSS_DPR_SYSTEM_CTRL0); + ch->sys_ctrl_chgd = false; + } + } +} + +void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation) +{ + struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; + + ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK); + + ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0; + ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0; + + if (rotation & DRM_MODE_ROTATE_90) + ch->frame_ctrl |= 1 << ROT_ENC_POS; + else if (rotation & DRM_MODE_ROTATE_180) + ch->frame_ctrl |= 2 << ROT_ENC_POS; + else if (rotation & DRM_MODE_ROTATE_270) + ch->frame_ctrl |= 3 << ROT_ENC_POS; +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c new file mode 100644 index 000000000000..8dc2f85c514b --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <drm/drm_of.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +struct dcss_drv { + struct dcss_dev *dcss; + struct dcss_kms_dev *kms; +}; + +struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev) +{ + struct dcss_drv *mdrv = dev_get_drvdata(dev); + + return mdrv ? mdrv->dcss : NULL; +} + +struct drm_device *dcss_drv_dev_to_drm(struct device *dev) +{ + struct dcss_drv *mdrv = dev_get_drvdata(dev); + + return mdrv ? &mdrv->kms->base : NULL; +} + +static int dcss_drv_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *remote; + struct dcss_drv *mdrv; + int err = 0; + bool hdmi_output = true; + + if (!dev->of_node) + return -ENODEV; + + remote = of_graph_get_remote_node(dev->of_node, 0, 0); + if (!remote) + return -ENODEV; + + hdmi_output = !of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi"); + + of_node_put(remote); + + mdrv = kzalloc(sizeof(*mdrv), GFP_KERNEL); + if (!mdrv) + return -ENOMEM; + + mdrv->dcss = dcss_dev_create(dev, hdmi_output); + if (IS_ERR(mdrv->dcss)) { + err = PTR_ERR(mdrv->dcss); + goto err; + } + + dev_set_drvdata(dev, mdrv); + + mdrv->kms = dcss_kms_attach(mdrv->dcss); + if (IS_ERR(mdrv->kms)) { + err = PTR_ERR(mdrv->kms); + goto dcss_shutoff; + } + + return 0; + +dcss_shutoff: + dcss_dev_destroy(mdrv->dcss); + + dev_set_drvdata(dev, NULL); + +err: + kfree(mdrv); + return err; +} + +static int dcss_drv_platform_remove(struct platform_device *pdev) +{ + struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev); + + if (!mdrv) + return 0; + + dcss_kms_detach(mdrv->kms); + dcss_dev_destroy(mdrv->dcss); + + dev_set_drvdata(&pdev->dev, NULL); + + kfree(mdrv); + + return 0; +} + +static struct dcss_type_data dcss_types[] = { + [DCSS_IMX8MQ] = { + .name = "DCSS_IMX8MQ", + .blkctl_ofs = 0x2F000, + .ctxld_ofs = 0x23000, + .dtg_ofs = 0x20000, + .scaler_ofs = 0x1C000, + .ss_ofs = 0x1B000, + .dpr_ofs = 0x18000, + }, +}; + +static const struct of_device_id dcss_of_match[] = { + { .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], }, + {}, +}; + +MODULE_DEVICE_TABLE(of, dcss_of_match); + +static const struct dev_pm_ops dcss_dev_pm = { + SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume) + SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend, + dcss_dev_runtime_resume, NULL) +}; + +static struct platform_driver dcss_platform_driver = { + .probe = dcss_drv_platform_probe, + .remove = dcss_drv_platform_remove, + .driver = { + .name = "imx-dcss", + .of_match_table = dcss_of_match, + .pm = &dcss_dev_pm, + }, +}; + +module_platform_driver(dcss_platform_driver); + +MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>"); +MODULE_DESCRIPTION("DCSS driver for i.MX8MQ"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c new file mode 100644 index 000000000000..30de00540f63 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_DTG_TC_CONTROL_STATUS 0x00 +#define CH3_EN BIT(0) +#define CH2_EN BIT(1) +#define CH1_EN BIT(2) +#define OVL_DATA_MODE BIT(3) +#define BLENDER_VIDEO_ALPHA_SEL BIT(7) +#define DTG_START BIT(8) +#define DBY_MODE_EN BIT(9) +#define CH1_ALPHA_SEL BIT(10) +#define CSS_PIX_COMP_SWAP_POS 12 +#define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12) +#define DEFAULT_FG_ALPHA_POS 24 +#define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24) +#define DCSS_DTG_TC_DTG 0x04 +#define DCSS_DTG_TC_DISP_TOP 0x08 +#define DCSS_DTG_TC_DISP_BOT 0x0C +#define DCSS_DTG_TC_CH1_TOP 0x10 +#define DCSS_DTG_TC_CH1_BOT 0x14 +#define DCSS_DTG_TC_CH2_TOP 0x18 +#define DCSS_DTG_TC_CH2_BOT 0x1C +#define DCSS_DTG_TC_CH3_TOP 0x20 +#define DCSS_DTG_TC_CH3_BOT 0x24 +#define TC_X_POS 0 +#define TC_X_MASK GENMASK(12, 0) +#define TC_Y_POS 16 +#define TC_Y_MASK GENMASK(28, 16) +#define DCSS_DTG_TC_CTXLD 0x28 +#define TC_CTXLD_DB_Y_POS 0 +#define TC_CTXLD_DB_Y_MASK GENMASK(12, 0) +#define TC_CTXLD_SB_Y_POS 16 +#define TC_CTXLD_SB_Y_MASK GENMASK(28, 16) +#define DCSS_DTG_TC_CH1_BKRND 0x2C +#define DCSS_DTG_TC_CH2_BKRND 0x30 +#define BKRND_R_Y_COMP_POS 20 +#define BKRND_R_Y_COMP_MASK GENMASK(29, 20) +#define BKRND_G_U_COMP_POS 10 +#define BKRND_G_U_COMP_MASK GENMASK(19, 10) +#define BKRND_B_V_COMP_POS 0 +#define BKRND_B_V_COMP_MASK GENMASK(9, 0) +#define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38 +#define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C +#define DCSS_DTG_BLENDER_DBY_BDP 0x40 +#define DCSS_DTG_BLENDER_BKRND_I 0x44 +#define DCSS_DTG_BLENDER_BKRND_P 0x48 +#define DCSS_DTG_BLENDER_BKRND_T 0x4C +#define DCSS_DTG_LINE0_INT 0x50 +#define DCSS_DTG_LINE1_INT 0x54 +#define DCSS_DTG_BG_ALPHA_DEFAULT 0x58 +#define DCSS_DTG_INT_STATUS 0x5C +#define DCSS_DTG_INT_CONTROL 0x60 +#define DCSS_DTG_TC_CH3_BKRND 0x64 +#define DCSS_DTG_INT_MASK 0x68 +#define LINE0_IRQ BIT(0) +#define LINE1_IRQ BIT(1) +#define LINE2_IRQ BIT(2) +#define LINE3_IRQ BIT(3) +#define DCSS_DTG_LINE2_INT 0x6C +#define DCSS_DTG_LINE3_INT 0x70 +#define DCSS_DTG_DBY_OL 0x74 +#define DCSS_DTG_DBY_BL 0x78 +#define DCSS_DTG_DBY_EL 0x7C + +struct dcss_dtg { + struct device *dev; + struct dcss_ctxld *ctxld; + void __iomem *base_reg; + u32 base_ofs; + + u32 ctx_id; + + bool in_use; + + u32 dis_ulc_x; + u32 dis_ulc_y; + + u32 control_status; + u32 alpha; + u32 alpha_cfg; + + int ctxld_kick_irq; + bool ctxld_kick_irq_en; +}; + +static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs) +{ + if (!dtg->in_use) + dcss_writel(val, dtg->base_reg + ofs); + + dcss_ctxld_write(dtg->ctxld, dtg->ctx_id, + val, dtg->base_ofs + ofs); +} + +static irqreturn_t dcss_dtg_irq_handler(int irq, void *data) +{ + struct dcss_dtg *dtg = data; + u32 status; + + status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); + + if (!(status & LINE0_IRQ)) + return IRQ_NONE; + + dcss_ctxld_kick(dtg->ctxld); + + dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); + + return IRQ_HANDLED; +} + +static int dcss_dtg_irq_config(struct dcss_dtg *dtg, + struct platform_device *pdev) +{ + int ret; + + dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick"); + if (dtg->ctxld_kick_irq < 0) + return dtg->ctxld_kick_irq; + + dcss_update(0, LINE0_IRQ | LINE1_IRQ, + dtg->base_reg + DCSS_DTG_INT_MASK); + + ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler, + 0, "dcss_ctxld_kick", dtg); + if (ret) { + dev_err(dtg->dev, "dtg: irq request failed.\n"); + return ret; + } + + disable_irq(dtg->ctxld_kick_irq); + + dtg->ctxld_kick_irq_en = false; + + return 0; +} + +int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base) +{ + int ret = 0; + struct dcss_dtg *dtg; + + dtg = kzalloc(sizeof(*dtg), GFP_KERNEL); + if (!dtg) + return -ENOMEM; + + dcss->dtg = dtg; + dtg->dev = dcss->dev; + dtg->ctxld = dcss->ctxld; + + dtg->base_reg = ioremap(dtg_base, SZ_4K); + if (!dtg->base_reg) { + dev_err(dcss->dev, "dtg: unable to remap dtg base\n"); + ret = -ENOMEM; + goto err_ioremap; + } + + dtg->base_ofs = dtg_base; + dtg->ctx_id = CTX_DB; + + dtg->alpha = 255; + + dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL | + ((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK); + + ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev)); + if (ret) + goto err_irq; + + return 0; + +err_irq: + iounmap(dtg->base_reg); + +err_ioremap: + kfree(dtg); + + return ret; +} + +void dcss_dtg_exit(struct dcss_dtg *dtg) +{ + free_irq(dtg->ctxld_kick_irq, dtg); + + if (dtg->base_reg) + iounmap(dtg->base_reg); + + kfree(dtg); +} + +void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm) +{ + struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dtg->dev); + u16 dtg_lrc_x, dtg_lrc_y; + u16 dis_ulc_x, dis_ulc_y; + u16 dis_lrc_x, dis_lrc_y; + u32 sb_ctxld_trig, db_ctxld_trig; + u32 pixclock = vm->pixelclock; + u32 actual_clk; + + dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + + vm->hactive - 1; + dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + + vm->vactive - 1; + dis_ulc_x = vm->hsync_len + vm->hback_porch - 1; + dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1; + dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1; + dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch + + vm->vactive - 1; + + clk_disable_unprepare(dcss->pix_clk); + clk_set_rate(dcss->pix_clk, vm->pixelclock); + clk_prepare_enable(dcss->pix_clk); + + actual_clk = clk_get_rate(dcss->pix_clk); + if (pixclock != actual_clk) { + dev_info(dtg->dev, + "Pixel clock set to %u kHz instead of %u kHz.\n", + (actual_clk / 1000), (pixclock / 1000)); + } + + dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x), + DCSS_DTG_TC_DTG); + dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x), + DCSS_DTG_TC_DISP_TOP); + dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x), + DCSS_DTG_TC_DISP_BOT); + + dtg->dis_ulc_x = dis_ulc_x; + dtg->dis_ulc_y = dis_ulc_y; + + sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) & + TC_CTXLD_SB_Y_MASK; + db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) & + TC_CTXLD_DB_Y_MASK; + + dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD); + + /* vblank trigger */ + dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT); + + /* CTXLD trigger */ + dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT); +} + +void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num, + int px, int py, int pw, int ph) +{ + u16 p_ulc_x, p_ulc_y; + u16 p_lrc_x, p_lrc_y; + + p_ulc_x = dtg->dis_ulc_x + px; + p_ulc_y = dtg->dis_ulc_y + py; + p_lrc_x = p_ulc_x + pw; + p_lrc_y = p_ulc_y + ph; + + if (!px && !py && !pw && !ph) { + dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num); + dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num); + } else { + dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x), + DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num); + dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x), + DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num); + } +} + +bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha) +{ + if (ch_num) + return false; + + return alpha != dtg->alpha; +} + +void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num, + const struct drm_format_info *format, int alpha) +{ + /* we care about alpha only when channel 0 is concerned */ + if (ch_num) + return; + + /* + * Use global alpha if pixel format does not have alpha channel or the + * user explicitly chose to use global alpha (i.e. alpha is not OPAQUE). + */ + if (!format->has_alpha || alpha != 255) + dtg->alpha_cfg = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK; + else /* use per-pixel alpha otherwise */ + dtg->alpha_cfg = CH1_ALPHA_SEL; + + dtg->alpha = alpha; +} + +void dcss_dtg_css_set(struct dcss_dtg *dtg) +{ + dtg->control_status |= + (0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK; +} + +void dcss_dtg_enable(struct dcss_dtg *dtg) +{ + dtg->control_status |= DTG_START; + + dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK); + dtg->control_status |= dtg->alpha_cfg; + + dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS); + + dtg->in_use = true; +} + +void dcss_dtg_shutoff(struct dcss_dtg *dtg) +{ + dtg->control_status &= ~DTG_START; + + dcss_writel(dtg->control_status, + dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS); + + dtg->in_use = false; +} + +bool dcss_dtg_is_enabled(struct dcss_dtg *dtg) +{ + return dtg->in_use; +} + +void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en) +{ + u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN}; + u32 control_status; + + control_status = dtg->control_status & ~ch_en_map[ch_num]; + control_status |= en ? ch_en_map[ch_num] : 0; + + control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK); + control_status |= dtg->alpha_cfg; + + if (dtg->control_status != control_status) + dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS); + + dtg->control_status = control_status; +} + +void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en) +{ + u32 status; + u32 mask = en ? LINE1_IRQ : 0; + + if (en) { + status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); + dcss_writel(status & LINE1_IRQ, + dtg->base_reg + DCSS_DTG_INT_CONTROL); + } + + dcss_update(mask, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); +} + +void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en) +{ + u32 status; + u32 mask = en ? LINE0_IRQ : 0; + + if (en) { + status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); + + if (!dtg->ctxld_kick_irq_en) { + dcss_writel(status & LINE0_IRQ, + dtg->base_reg + DCSS_DTG_INT_CONTROL); + enable_irq(dtg->ctxld_kick_irq); + dtg->ctxld_kick_irq_en = true; + dcss_update(mask, LINE0_IRQ, + dtg->base_reg + DCSS_DTG_INT_MASK); + } + + return; + } + + if (!dtg->ctxld_kick_irq_en) + return; + + disable_irq_nosync(dtg->ctxld_kick_irq); + dtg->ctxld_kick_irq_en = false; + + dcss_update(mask, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); +} + +void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg) +{ + dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); +} + +bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg) +{ + return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ); +} + diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c new file mode 100644 index 000000000000..b72e5cef7e40 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops); + +static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static struct drm_driver dcss_kms_driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + DRM_GEM_CMA_DRIVER_OPS, + .fops = &dcss_cma_fops, + .name = "imx-dcss", + .desc = "i.MX8MQ Display Subsystem", + .date = "20190917", + .major = 1, + .minor = 0, + .patchlevel = 0, +}; + +static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + +static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms) +{ + struct drm_mode_config *config = &kms->base.mode_config; + + drm_mode_config_init(&kms->base); + + config->min_width = 1; + config->min_height = 1; + config->max_width = 4096; + config->max_height = 4096; + config->allow_fb_modifiers = true; + config->normalize_zpos = true; + + config->funcs = &dcss_drm_mode_config_funcs; + config->helper_private = &dcss_mode_config_helpers; +} + +static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int dcss_kms_bridge_connector_init(struct dcss_kms_dev *kms) +{ + struct drm_device *ddev = &kms->base; + struct drm_encoder *encoder = &kms->encoder; + struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc; + struct drm_panel *panel; + struct drm_bridge *bridge; + int ret; + + ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0, + &panel, &bridge); + if (ret) + return ret; + + if (!bridge) { + dev_err(ddev->dev, "No bridge found %d.\n", ret); + return -ENODEV; + } + + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = drm_encoder_init(&kms->base, encoder, + &dcss_kms_simple_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) { + dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret); + return ret; + } + + ret = drm_bridge_attach(encoder, bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) { + dev_err(ddev->dev, "Unable to attach bridge %pOF\n", + bridge->of_node); + return ret; + } + + kms->connector = drm_bridge_connector_init(ddev, encoder); + if (IS_ERR(kms->connector)) { + dev_err(ddev->dev, "Unable to create bridge connector.\n"); + return PTR_ERR(kms->connector); + } + + drm_connector_attach_encoder(kms->connector, encoder); + + return 0; +} + +struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss) +{ + struct dcss_kms_dev *kms; + struct drm_device *drm; + struct dcss_crtc *crtc; + int ret; + + kms = devm_drm_dev_alloc(dcss->dev, &dcss_kms_driver, + struct dcss_kms_dev, base); + if (IS_ERR(kms)) + return kms; + + drm = &kms->base; + crtc = &kms->crtc; + + drm->dev_private = dcss; + + dcss_kms_mode_config_init(kms); + + ret = drm_vblank_init(drm, 1); + if (ret) + goto cleanup_mode_config; + + drm->irq_enabled = true; + + ret = dcss_kms_bridge_connector_init(kms); + if (ret) + goto cleanup_mode_config; + + ret = dcss_crtc_init(crtc, drm); + if (ret) + goto cleanup_mode_config; + + drm_mode_config_reset(drm); + + drm_kms_helper_poll_init(drm); + + drm_bridge_connector_enable_hpd(kms->connector); + + ret = drm_dev_register(drm, 0); + if (ret) + goto cleanup_crtc; + + drm_fbdev_generic_setup(drm, 32); + + return kms; + +cleanup_crtc: + drm_bridge_connector_disable_hpd(kms->connector); + drm_kms_helper_poll_fini(drm); + dcss_crtc_deinit(crtc, drm); + +cleanup_mode_config: + drm_mode_config_cleanup(drm); + drm->dev_private = NULL; + + return ERR_PTR(ret); +} + +void dcss_kms_detach(struct dcss_kms_dev *kms) +{ + struct drm_device *drm = &kms->base; + + drm_dev_unregister(drm); + drm_bridge_connector_disable_hpd(kms->connector); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + drm_crtc_vblank_off(&kms->crtc.base); + drm->irq_enabled = false; + drm_mode_config_cleanup(drm); + dcss_crtc_deinit(&kms->crtc, drm); + drm->dev_private = NULL; +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.h b/drivers/gpu/drm/imx/dcss/dcss-kms.h new file mode 100644 index 000000000000..dfe5dd99eea3 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 NXP. + */ + +#ifndef _DCSS_KMS_H_ +#define _DCSS_KMS_H_ + +#include <drm/drm_encoder.h> + +struct dcss_plane { + struct drm_plane base; + + int ch_num; +}; + +struct dcss_crtc { + struct drm_crtc base; + struct drm_crtc_state *state; + + struct dcss_plane *plane[3]; + + int irq; + + bool disable_ctxld_kick_irq; +}; + +struct dcss_kms_dev { + struct drm_device base; + struct dcss_crtc crtc; + struct drm_encoder encoder; + struct drm_connector *connector; +}; + +struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss); +void dcss_kms_detach(struct dcss_kms_dev *kms); +int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm); +void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm); +struct dcss_plane *dcss_plane_init(struct drm_device *drm, + unsigned int possible_crtcs, + enum drm_plane_type type, + unsigned int zpos); + +#endif diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c new file mode 100644 index 000000000000..e13652e3a115 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_cma_helper.h> + +#include "dcss-dev.h" +#include "dcss-kms.h" + +static const u32 dcss_common_formats[] = { + /* RGB */ + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_RGBX1010102, + DRM_FORMAT_BGRX1010102, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, +}; + +static const u64 dcss_video_format_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + +static const u64 dcss_graphics_format_modifiers[] = { + DRM_FORMAT_MOD_VIVANTE_TILED, + DRM_FORMAT_MOD_VIVANTE_SUPER_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + +static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p) +{ + return container_of(p, struct dcss_plane, base); +} + +static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb) +{ + return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) || + ((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 && + fb->modifier == DRM_FORMAT_MOD_LINEAR); +} + +static void dcss_plane_destroy(struct drm_plane *plane) +{ + struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane, + base); + + drm_plane_cleanup(plane); + kfree(dcss_plane); +} + +static bool dcss_plane_format_mod_supported(struct drm_plane *plane, + u32 format, + u64 modifier) +{ + switch (plane->type) { + case DRM_PLANE_TYPE_PRIMARY: + switch (format) { + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB2101010: + return modifier == DRM_FORMAT_MOD_LINEAR || + modifier == DRM_FORMAT_MOD_VIVANTE_TILED || + modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED; + default: + return modifier == DRM_FORMAT_MOD_LINEAR; + } + break; + case DRM_PLANE_TYPE_OVERLAY: + return modifier == DRM_FORMAT_MOD_LINEAR; + default: + return false; + } +} + +static const struct drm_plane_funcs dcss_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = dcss_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .format_mod_supported = dcss_plane_format_mod_supported, +}; + +static bool dcss_plane_can_rotate(const struct drm_format_info *format, + bool mod_present, u64 modifier, + unsigned int rotation) +{ + bool linear_format = !mod_present || + (mod_present && modifier == DRM_FORMAT_MOD_LINEAR); + u32 supported_rotation = DRM_MODE_ROTATE_0; + + if (!format->is_yuv && linear_format) + supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_MASK; + else if (!format->is_yuv && + modifier == DRM_FORMAT_MOD_VIVANTE_TILED) + supported_rotation = DRM_MODE_ROTATE_MASK | + DRM_MODE_REFLECT_MASK; + else if (format->is_yuv && linear_format && + (format->format == DRM_FORMAT_NV12 || + format->format == DRM_FORMAT_NV21)) + supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_MASK; + + return !!(rotation & supported_rotation); +} + +static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt) +{ + if (src_w < 64 && + (pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21)) + return false; + else if (src_w < 32 && + (pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY || + pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU)) + return false; + + return src_w >= 16 && src_h >= 8; +} + +static int dcss_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dcss_plane *dcss_plane = to_dcss_plane(plane); + struct dcss_dev *dcss = plane->dev->dev_private; + struct drm_framebuffer *fb = state->fb; + bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY; + struct drm_gem_cma_object *cma_obj; + struct drm_crtc_state *crtc_state; + int hdisplay, vdisplay; + int min, max; + int ret; + + if (!fb || !state->crtc) + return 0; + + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + WARN_ON(!cma_obj); + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + + hdisplay = crtc_state->adjusted_mode.hdisplay; + vdisplay = crtc_state->adjusted_mode.vdisplay; + + if (!dcss_plane_is_source_size_allowed(state->src_w >> 16, + state->src_h >> 16, + fb->format->format)) { + DRM_DEBUG_KMS("Source plane size is not allowed!\n"); + return -EINVAL; + } + + dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num, + &min, &max); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + min, max, !is_primary_plane, + false); + if (ret) + return ret; + + if (!state->visible) + return 0; + + if (!dcss_plane_can_rotate(fb->format, + !!(fb->flags & DRM_MODE_FB_MODIFIERS), + fb->modifier, + state->rotation)) { + DRM_DEBUG_KMS("requested rotation is not allowed!\n"); + return -EINVAL; + } + + if ((state->crtc_x < 0 || state->crtc_y < 0 || + state->crtc_x + state->crtc_w > hdisplay || + state->crtc_y + state->crtc_h > vdisplay) && + !dcss_plane_fb_is_linear(fb)) { + DRM_DEBUG_KMS("requested cropping operation is not allowed!\n"); + return -EINVAL; + } + + if ((fb->flags & DRM_MODE_FB_MODIFIERS) && + !plane->funcs->format_mod_supported(plane, + fb->format->format, + fb->modifier)) { + DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier); + return -EINVAL; + } + + return 0; +} + +static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane) +{ + struct drm_plane *plane = &dcss_plane->base; + struct drm_plane_state *state = plane->state; + struct dcss_dev *dcss = plane->dev->dev_private; + struct drm_framebuffer *fb = state->fb; + const struct drm_format_info *format = fb->format; + struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + unsigned long p1_ba = 0, p2_ba = 0; + + if (!format->is_yuv || + format->format == DRM_FORMAT_NV12 || + format->format == DRM_FORMAT_NV21) + p1_ba = cma_obj->paddr + fb->offsets[0] + + fb->pitches[0] * (state->src.y1 >> 16) + + format->char_per_block[0] * (state->src.x1 >> 16); + else if (format->format == DRM_FORMAT_UYVY || + format->format == DRM_FORMAT_VYUY || + format->format == DRM_FORMAT_YUYV || + format->format == DRM_FORMAT_YVYU) + p1_ba = cma_obj->paddr + fb->offsets[0] + + fb->pitches[0] * (state->src.y1 >> 16) + + 2 * format->char_per_block[0] * (state->src.x1 >> 17); + + if (format->format == DRM_FORMAT_NV12 || + format->format == DRM_FORMAT_NV21) + p2_ba = cma_obj->paddr + fb->offsets[1] + + (((fb->pitches[1] >> 1) * (state->src.y1 >> 17) + + (state->src.x1 >> 17)) << 1); + + dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba, + fb->pitches[0]); +} + +static bool dcss_plane_needs_setup(struct drm_plane_state *state, + struct drm_plane_state *old_state) +{ + struct drm_framebuffer *fb = state->fb; + struct drm_framebuffer *old_fb = old_state->fb; + + return state->crtc_x != old_state->crtc_x || + state->crtc_y != old_state->crtc_y || + state->crtc_w != old_state->crtc_w || + state->crtc_h != old_state->crtc_h || + state->src_x != old_state->src_x || + state->src_y != old_state->src_y || + state->src_w != old_state->src_w || + state->src_h != old_state->src_h || + fb->format->format != old_fb->format->format || + fb->modifier != old_fb->modifier || + state->rotation != old_state->rotation; +} + +static void dcss_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + struct dcss_plane *dcss_plane = to_dcss_plane(plane); + struct dcss_dev *dcss = plane->dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc_state *crtc_state; + bool modifiers_present; + u32 src_w, src_h, dst_w, dst_h; + struct drm_rect src, dst; + bool enable = true; + + if (!fb || !state->crtc || !state->visible) + return; + + crtc_state = state->crtc->state; + modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS); + + if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) && + !dcss_plane_needs_setup(state, old_state)) { + dcss_plane_atomic_set_base(dcss_plane); + return; + } + + src = plane->state->src; + dst = plane->state->dst; + + /* + * The width and height after clipping. + */ + src_w = drm_rect_width(&src) >> 16; + src_h = drm_rect_height(&src) >> 16; + dst_w = drm_rect_width(&dst); + dst_h = drm_rect_height(&dst); + + if (plane->type == DRM_PLANE_TYPE_OVERLAY && + modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR) + modifiers_present = false; + + dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format, + modifiers_present ? fb->modifier : + DRM_FORMAT_MOD_LINEAR); + dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h); + dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num, + state->rotation); + + dcss_plane_atomic_set_base(dcss_plane); + + dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num, + state->fb->format, src_w, src_h, + dst_w, dst_h, + drm_mode_vrefresh(&crtc_state->mode)); + + dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, + dst.x1, dst.y1, dst_w, dst_h); + dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num, + fb->format, state->alpha >> 8); + + if (!dcss_plane->ch_num && (state->alpha >> 8) == 0) + enable = false; + + dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable); + dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable); + + if (!enable) + dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, + 0, 0, 0, 0); + + dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable); +} + +static void dcss_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct dcss_plane *dcss_plane = to_dcss_plane(plane); + struct dcss_dev *dcss = plane->dev->dev_private; + + dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false); + dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false); + dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0); + dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false); +} + +static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, + .atomic_check = dcss_plane_atomic_check, + .atomic_update = dcss_plane_atomic_update, + .atomic_disable = dcss_plane_atomic_disable, +}; + +struct dcss_plane *dcss_plane_init(struct drm_device *drm, + unsigned int possible_crtcs, + enum drm_plane_type type, + unsigned int zpos) +{ + struct dcss_plane *dcss_plane; + const u64 *format_modifiers = dcss_video_format_modifiers; + int ret; + + if (zpos > 2) + return ERR_PTR(-EINVAL); + + dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL); + if (!dcss_plane) { + DRM_ERROR("failed to allocate plane\n"); + return ERR_PTR(-ENOMEM); + } + + if (type == DRM_PLANE_TYPE_PRIMARY) + format_modifiers = dcss_graphics_format_modifiers; + + ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs, + &dcss_plane_funcs, dcss_common_formats, + ARRAY_SIZE(dcss_common_formats), + format_modifiers, type, NULL); + if (ret) { + DRM_ERROR("failed to initialize plane\n"); + kfree(dcss_plane); + return ERR_PTR(ret); + } + + drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs); + + ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos); + if (ret) + return ERR_PTR(ret); + + drm_plane_create_rotation_property(&dcss_plane->base, + DRM_MODE_ROTATE_0, + DRM_MODE_ROTATE_0 | + DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_180 | + DRM_MODE_ROTATE_270 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + + dcss_plane->ch_num = zpos; + + return dcss_plane; +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c new file mode 100644 index 000000000000..cd21905de580 --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c @@ -0,0 +1,826 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + * + * Scaling algorithms were contributed by Dzung Hoang <dzung.hoang@nxp.com> + */ + +#include <linux/device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_SCALER_CTRL 0x00 +#define SCALER_EN BIT(0) +#define REPEAT_EN BIT(4) +#define SCALE2MEM_EN BIT(8) +#define MEM2OFIFO_EN BIT(12) +#define DCSS_SCALER_OFIFO_CTRL 0x04 +#define OFIFO_LOW_THRES_POS 0 +#define OFIFO_LOW_THRES_MASK GENMASK(9, 0) +#define OFIFO_HIGH_THRES_POS 16 +#define OFIFO_HIGH_THRES_MASK GENMASK(25, 16) +#define UNDERRUN_DETECT_CLR BIT(26) +#define LOW_THRES_DETECT_CLR BIT(27) +#define HIGH_THRES_DETECT_CLR BIT(28) +#define UNDERRUN_DETECT_EN BIT(29) +#define LOW_THRES_DETECT_EN BIT(30) +#define HIGH_THRES_DETECT_EN BIT(31) +#define DCSS_SCALER_SDATA_CTRL 0x08 +#define YUV_EN BIT(0) +#define RTRAM_8LINES BIT(1) +#define Y_UV_BYTE_SWAP BIT(4) +#define A2R10G10B10_FORMAT_POS 8 +#define A2R10G10B10_FORMAT_MASK GENMASK(11, 8) +#define DCSS_SCALER_BIT_DEPTH 0x0C +#define LUM_BIT_DEPTH_POS 0 +#define LUM_BIT_DEPTH_MASK GENMASK(1, 0) +#define CHR_BIT_DEPTH_POS 4 +#define CHR_BIT_DEPTH_MASK GENMASK(5, 4) +#define DCSS_SCALER_SRC_FORMAT 0x10 +#define DCSS_SCALER_DST_FORMAT 0x14 +#define FORMAT_MASK GENMASK(1, 0) +#define DCSS_SCALER_SRC_LUM_RES 0x18 +#define DCSS_SCALER_SRC_CHR_RES 0x1C +#define DCSS_SCALER_DST_LUM_RES 0x20 +#define DCSS_SCALER_DST_CHR_RES 0x24 +#define WIDTH_POS 0 +#define WIDTH_MASK GENMASK(11, 0) +#define HEIGHT_POS 16 +#define HEIGHT_MASK GENMASK(27, 16) +#define DCSS_SCALER_V_LUM_START 0x48 +#define V_START_MASK GENMASK(15, 0) +#define DCSS_SCALER_V_LUM_INC 0x4C +#define V_INC_MASK GENMASK(15, 0) +#define DCSS_SCALER_H_LUM_START 0x50 +#define H_START_MASK GENMASK(18, 0) +#define DCSS_SCALER_H_LUM_INC 0x54 +#define H_INC_MASK GENMASK(15, 0) +#define DCSS_SCALER_V_CHR_START 0x58 +#define DCSS_SCALER_V_CHR_INC 0x5C +#define DCSS_SCALER_H_CHR_START 0x60 +#define DCSS_SCALER_H_CHR_INC 0x64 +#define DCSS_SCALER_COEF_VLUM 0x80 +#define DCSS_SCALER_COEF_HLUM 0x140 +#define DCSS_SCALER_COEF_VCHR 0x200 +#define DCSS_SCALER_COEF_HCHR 0x300 + +struct dcss_scaler_ch { + void __iomem *base_reg; + u32 base_ofs; + struct dcss_scaler *scl; + + u32 sdata_ctrl; + u32 scaler_ctrl; + + bool scaler_ctrl_chgd; + + u32 c_vstart; + u32 c_hstart; +}; + +struct dcss_scaler { + struct device *dev; + + struct dcss_ctxld *ctxld; + u32 ctx_id; + + struct dcss_scaler_ch ch[3]; +}; + +/* scaler coefficients generator */ +#define PSC_FRAC_BITS 30 +#define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS) +#define PSC_BITS_FOR_PHASE 4 +#define PSC_NUM_PHASES 16 +#define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1) +#define PSC_NUM_TAPS 7 +#define PSC_NUM_TAPS_RGBA 5 +#define PSC_COEFF_PRECISION 10 +#define PSC_PHASE_FRACTION_BITS 13 +#define PSC_PHASE_MASK (PSC_NUM_PHASES - 1) +#define PSC_Q_FRACTION 19 +#define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1)) + +/** + * mult_q() - Performs fixed-point multiplication. + * @A: multiplier + * @B: multiplicand + */ +static int mult_q(int A, int B) +{ + int result; + s64 temp; + + temp = (int64_t)A * (int64_t)B; + temp += PSC_Q_ROUND_OFFSET; + result = (int)(temp >> PSC_Q_FRACTION); + return result; +} + +/** + * div_q() - Performs fixed-point division. + * @A: dividend + * @B: divisor + */ +static int div_q(int A, int B) +{ + int result; + s64 temp; + + temp = (int64_t)A << PSC_Q_FRACTION; + if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0)) + temp += B / 2; + else + temp -= B / 2; + + result = (int)(temp / B); + return result; +} + +/** + * exp_approx_q() - Compute approximation to exp(x) function using Taylor + * series. + * @x: fixed-point argument of exp function + */ +static int exp_approx_q(int x) +{ + int sum = 1 << PSC_Q_FRACTION; + int term = 1 << PSC_Q_FRACTION; + + term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION)); + sum += term; + term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION)); + sum += term; + term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION)); + sum += term; + term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION)); + sum += term; + + return sum; +} + +/** + * dcss_scaler_gaussian_filter() - Generate gaussian prototype filter. + * @fc_q: fixed-point cutoff frequency normalized to range [0, 1] + * @use_5_taps: indicates whether to use 5 taps or 7 taps + * @coef: output filter coefficients + */ +static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, + bool phase0_identity, + int coef[][PSC_NUM_TAPS]) +{ + int sigma_q, g0_q, g1_q, g2_q; + int tap_cnt1, tap_cnt2, tap_idx, phase_cnt; + int mid; + int phase; + int i; + int taps; + + if (use_5_taps) + for (phase = 0; phase < PSC_STORED_PHASES; phase++) { + coef[phase][0] = 0; + coef[phase][PSC_NUM_TAPS - 1] = 0; + } + + /* seed coefficient scanner */ + taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS; + mid = (PSC_NUM_PHASES * taps) / 2 - 1; + phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2; + tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2; + tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2; + + /* seed gaussian filter generator */ + sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q); + g0_q = 1 << PSC_Q_FRACTION; + g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET, + mult_q(sigma_q, sigma_q))); + g2_q = mult_q(g1_q, g1_q); + coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q; + + for (i = 0; i < mid; i++) { + phase_cnt++; + tap_cnt1--; + tap_cnt2++; + + g0_q = mult_q(g0_q, g1_q); + g1_q = mult_q(g1_q, g2_q); + + if ((phase_cnt & PSC_PHASE_MASK) <= 8) { + tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE; + coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q; + } + if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) { + tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE; + coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q; + } + } + + phase_cnt++; + tap_cnt1--; + coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0; + + /* override phase 0 with identity filter if specified */ + if (phase0_identity) + for (i = 0; i < PSC_NUM_TAPS; i++) + coef[0][i] = i == (PSC_NUM_TAPS >> 1) ? + (1 << PSC_COEFF_PRECISION) : 0; + + /* normalize coef */ + for (phase = 0; phase < PSC_STORED_PHASES; phase++) { + int sum = 0; + s64 ll_temp; + + for (i = 0; i < PSC_NUM_TAPS; i++) + sum += coef[phase][i]; + for (i = 0; i < PSC_NUM_TAPS; i++) { + ll_temp = coef[phase][i]; + ll_temp <<= PSC_COEFF_PRECISION; + ll_temp += sum >> 1; + ll_temp /= sum; + coef[phase][i] = (int)ll_temp; + } + } +} + +/** + * dcss_scaler_filter_design() - Compute filter coefficients using + * Gaussian filter. + * @src_length: length of input + * @dst_length: length of output + * @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps + * @coef: output coefficients + */ +static void dcss_scaler_filter_design(int src_length, int dst_length, + bool use_5_taps, bool phase0_identity, + int coef[][PSC_NUM_TAPS]) +{ + int fc_q; + + /* compute cutoff frequency */ + if (dst_length >= src_length) + fc_q = div_q(1, PSC_NUM_PHASES); + else + fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES); + + /* compute gaussian filter coefficients */ + dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef); +} + +static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs) +{ + struct dcss_scaler *scl = ch->scl; + + dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs); +} + +static int dcss_scaler_ch_init_all(struct dcss_scaler *scl, + unsigned long scaler_base) +{ + struct dcss_scaler_ch *ch; + int i; + + for (i = 0; i < 3; i++) { + ch = &scl->ch[i]; + + ch->base_ofs = scaler_base + i * 0x400; + + ch->base_reg = ioremap(ch->base_ofs, SZ_4K); + if (!ch->base_reg) { + dev_err(scl->dev, "scaler: unable to remap ch base\n"); + return -ENOMEM; + } + + ch->scl = scl; + } + + return 0; +} + +int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base) +{ + struct dcss_scaler *scaler; + + scaler = kzalloc(sizeof(*scaler), GFP_KERNEL); + if (!scaler) + return -ENOMEM; + + dcss->scaler = scaler; + scaler->dev = dcss->dev; + scaler->ctxld = dcss->ctxld; + scaler->ctx_id = CTX_SB_HP; + + if (dcss_scaler_ch_init_all(scaler, scaler_base)) { + int i; + + for (i = 0; i < 3; i++) { + if (scaler->ch[i].base_reg) + iounmap(scaler->ch[i].base_reg); + } + + kfree(scaler); + + return -ENOMEM; + } + + return 0; +} + +void dcss_scaler_exit(struct dcss_scaler *scl) +{ + int ch_no; + + for (ch_no = 0; ch_no < 3; ch_no++) { + struct dcss_scaler_ch *ch = &scl->ch[ch_no]; + + dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL); + + if (ch->base_reg) + iounmap(ch->base_reg); + } + + kfree(scl); +} + +void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en) +{ + struct dcss_scaler_ch *ch = &scl->ch[ch_num]; + u32 scaler_ctrl; + + scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0; + + if (en) + dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL); + + if (ch->scaler_ctrl != scaler_ctrl) + ch->scaler_ctrl_chgd = true; + + ch->scaler_ctrl = scaler_ctrl; +} + +static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en) +{ + ch->sdata_ctrl &= ~YUV_EN; + ch->sdata_ctrl |= en ? YUV_EN : 0; +} + +static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en) +{ + ch->sdata_ctrl &= ~RTRAM_8LINES; + ch->sdata_ctrl |= en ? RTRAM_8LINES : 0; +} + +static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth) +{ + u32 val; + + val = depth == 30 ? 2 : 0; + + dcss_scaler_write(ch, + ((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) | + ((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK), + DCSS_SCALER_BIT_DEPTH); +} + +enum buffer_format { + BUF_FMT_YUV420, + BUF_FMT_YUV422, + BUF_FMT_ARGB8888_YUV444, +}; + +enum chroma_location { + PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0, + PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1, + PSC_LOC_HORZ_0_VERT_0 = 2, + PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3, + PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4, + PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5 +}; + +static void dcss_scaler_format_set(struct dcss_scaler_ch *ch, + enum buffer_format src_fmt, + enum buffer_format dst_fmt) +{ + dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT); + dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT); +} + +static void dcss_scaler_res_set(struct dcss_scaler_ch *ch, + int src_xres, int src_yres, + int dst_xres, int dst_yres, + u32 pix_format, enum buffer_format dst_format) +{ + u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres; + u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres; + bool src_is_444 = true; + + lsrc_xres = src_xres; + csrc_xres = src_xres; + lsrc_yres = src_yres; + csrc_yres = src_yres; + ldst_xres = dst_xres; + cdst_xres = dst_xres; + ldst_yres = dst_yres; + cdst_yres = dst_yres; + + if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY || + pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) { + csrc_xres >>= 1; + src_is_444 = false; + } else if (pix_format == DRM_FORMAT_NV12 || + pix_format == DRM_FORMAT_NV21) { + csrc_xres >>= 1; + csrc_yres >>= 1; + src_is_444 = false; + } + + if (dst_format == BUF_FMT_YUV422) + cdst_xres >>= 1; + + /* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */ + if (src_is_444 && dst_format == BUF_FMT_YUV422) { + lsrc_yres--; + csrc_yres--; + } + + dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_SRC_LUM_RES); + dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_SRC_CHR_RES); + dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_DST_LUM_RES); + dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | + (((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK), + DCSS_SCALER_DST_CHR_RES); +} + +#define downscale_fp(factor, fp_pos) ((factor) << (fp_pos)) +#define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor)) + +struct dcss_scaler_factors { + int downscale; + int upscale; +}; + +static const struct dcss_scaler_factors dcss_scaler_factors[] = { + {3, 8}, {5, 8}, {5, 8}, +}; + +static void dcss_scaler_fractions_set(struct dcss_scaler_ch *ch, + int src_xres, int src_yres, + int dst_xres, int dst_yres, + u32 src_format, u32 dst_format, + enum chroma_location src_chroma_loc) +{ + int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres; + u32 l_vinc, l_hinc, c_vinc, c_hinc; + u32 c_vstart, c_hstart; + + src_c_xres = src_xres; + src_c_yres = src_yres; + dst_c_xres = dst_xres; + dst_c_yres = dst_yres; + + c_vstart = 0; + c_hstart = 0; + + /* adjustments for source chroma location */ + if (src_format == BUF_FMT_YUV420) { + /* vertical input chroma position adjustment */ + switch (src_chroma_loc) { + case PSC_LOC_HORZ_0_VERT_1_OVER_4: + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4: + /* + * move chroma up to first luma line + * (1/4 chroma input line spacing) + */ + c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2)); + break; + case PSC_LOC_HORZ_0_VERT_1_OVER_2: + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2: + /* + * move chroma up to first luma line + * (1/2 chroma input line spacing) + */ + c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1)); + break; + default: + break; + } + /* horizontal input chroma position adjustment */ + switch (src_chroma_loc) { + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4: + case PSC_LOC_HORZ_1_OVER_4_VERT_0: + case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2: + /* move chroma left 1/4 chroma input sample spacing */ + c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2)); + break; + default: + break; + } + } + + /* adjustments to chroma resolution */ + if (src_format == BUF_FMT_YUV420) { + src_c_xres >>= 1; + src_c_yres >>= 1; + } else if (src_format == BUF_FMT_YUV422) { + src_c_xres >>= 1; + } + + if (dst_format == BUF_FMT_YUV422) + dst_c_xres >>= 1; + + l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres; + c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres; + l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres; + c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres; + + /* save chroma start phase */ + ch->c_vstart = c_vstart; + ch->c_hstart = c_hstart; + + dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START); + dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC); + + dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START); + dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC); + + dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START); + dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC); + + dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START); + dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC); +} + +int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num, + int *min, int *max) +{ + *min = upscale_fp(dcss_scaler_factors[ch_num].upscale, 16); + *max = downscale_fp(dcss_scaler_factors[ch_num].downscale, 16); + + return 0; +} + +static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch, + int base_addr, + int coef[][PSC_NUM_TAPS]) +{ + int i, phase; + + for (i = 0; i < PSC_STORED_PHASES; i++) { + dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 | + (coef[i][2] & 0xfff) << 4 | + (coef[i][3] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 | + (coef[i][4] & 0xfff) << 8 | + (coef[i][5] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24), + base_addr + 0x80 + i * sizeof(u32)); + } + + /* reverse both phase and tap orderings */ + for (phase = (PSC_NUM_PHASES >> 1) - 1; + i < PSC_NUM_PHASES; i++, phase--) { + dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 | + (coef[phase][4] & 0xfff) << 4 | + (coef[phase][3] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 | + (coef[phase][2] & 0xfff) << 8 | + (coef[phase][1] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24), + base_addr + 0x80 + i * sizeof(u32)); + } +} + +static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch, + int base_addr, + int coef[][PSC_NUM_TAPS]) +{ + int i, phase; + + for (i = 0; i < PSC_STORED_PHASES; i++) { + dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 | + (coef[i][1] & 0xfff) << 4 | + (coef[i][2] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 | + (coef[i][3] & 0xfff) << 8 | + (coef[i][4] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 | + (coef[i][5] & 0xfff) << 12 | + (coef[i][6] & 0xfff)), + base_addr + 0x80 + i * sizeof(u32)); + } + + /* reverse both phase and tap orderings */ + for (phase = (PSC_NUM_PHASES >> 1) - 1; + i < PSC_NUM_PHASES; i++, phase--) { + dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 | + (coef[phase][5] & 0xfff) << 4 | + (coef[phase][4] & 0xf00) >> 8), + base_addr + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 | + (coef[phase][3] & 0xfff) << 8 | + (coef[phase][2] & 0xff0) >> 4), + base_addr + 0x40 + i * sizeof(u32)); + dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 | + (coef[phase][1] & 0xfff) << 12 | + (coef[phase][0] & 0xfff)), + base_addr + 0x80 + i * sizeof(u32)); + } +} + +static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch, + enum buffer_format src_format, + enum buffer_format dst_format, + bool use_5_taps, + int src_xres, int src_yres, int dst_xres, + int dst_yres) +{ + int coef[PSC_STORED_PHASES][PSC_NUM_TAPS]; + bool program_5_taps = use_5_taps || + (dst_format == BUF_FMT_YUV422 && + src_format == BUF_FMT_ARGB8888_YUV444); + + /* horizontal luma */ + dcss_scaler_filter_design(src_xres, dst_xres, false, + src_xres == dst_xres, coef); + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); + + /* vertical luma */ + dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, + src_yres == dst_yres, coef); + + if (program_5_taps) + dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); + else + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); + + /* adjust chroma resolution */ + if (src_format != BUF_FMT_ARGB8888_YUV444) + src_xres >>= 1; + if (src_format == BUF_FMT_YUV420) + src_yres >>= 1; + if (dst_format != BUF_FMT_ARGB8888_YUV444) + dst_xres >>= 1; + if (dst_format == BUF_FMT_YUV420) /* should not happen */ + dst_yres >>= 1; + + /* horizontal chroma */ + dcss_scaler_filter_design(src_xres, dst_xres, false, + (src_xres == dst_xres) && (ch->c_hstart == 0), + coef); + + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef); + + /* vertical chroma */ + dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, + (src_yres == dst_yres) && (ch->c_vstart == 0), + coef); + if (program_5_taps) + dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); + else + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); +} + +static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch, + int src_xres, int src_yres, int dst_xres, + int dst_yres) +{ + int coef[PSC_STORED_PHASES][PSC_NUM_TAPS]; + + /* horizontal RGB */ + dcss_scaler_filter_design(src_xres, dst_xres, false, + src_xres == dst_xres, coef); + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); + + /* vertical RGB */ + dcss_scaler_filter_design(src_yres, dst_yres, false, + src_yres == dst_yres, coef); + dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); +} + +static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch, + const struct drm_format_info *format) +{ + u32 a2r10g10b10_format; + + if (format->is_yuv) + return; + + ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK; + + if (format->depth != 30) + return; + + switch (format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_XRGB2101010: + a2r10g10b10_format = 0; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XBGR2101010: + a2r10g10b10_format = 5; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBX1010102: + a2r10g10b10_format = 6; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRX1010102: + a2r10g10b10_format = 11; + break; + + default: + a2r10g10b10_format = 0; + break; + } + + ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS; +} + +void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, + const struct drm_format_info *format, + int src_xres, int src_yres, int dst_xres, int dst_yres, + u32 vrefresh_hz) +{ + struct dcss_scaler_ch *ch = &scl->ch[ch_num]; + unsigned int pixel_depth = 0; + bool rtr_8line_en = false; + bool use_5_taps = false; + enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444; + enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444; + u32 pix_format = format->format; + + if (format->is_yuv) { + dcss_scaler_yuv_enable(ch, true); + + if (pix_format == DRM_FORMAT_NV12 || + pix_format == DRM_FORMAT_NV21) { + rtr_8line_en = true; + src_format = BUF_FMT_YUV420; + } else if (pix_format == DRM_FORMAT_UYVY || + pix_format == DRM_FORMAT_VYUY || + pix_format == DRM_FORMAT_YUYV || + pix_format == DRM_FORMAT_YVYU) { + src_format = BUF_FMT_YUV422; + } + + use_5_taps = !rtr_8line_en; + } else { + dcss_scaler_yuv_enable(ch, false); + + pixel_depth = format->depth; + } + + dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres, + dst_yres, src_format, dst_format, + PSC_LOC_HORZ_0_VERT_1_OVER_4); + + if (format->is_yuv) + dcss_scaler_yuv_coef_set(ch, src_format, dst_format, + use_5_taps, src_xres, src_yres, + dst_xres, dst_yres); + else + dcss_scaler_rgb_coef_set(ch, src_xres, src_yres, + dst_xres, dst_yres); + + dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en); + dcss_scaler_bit_depth_set(ch, pixel_depth); + dcss_scaler_set_rgb10_order(ch, format); + dcss_scaler_format_set(ch, src_format, dst_format); + dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres, + pix_format, dst_format); +} + +/* This function will be called from interrupt context. */ +void dcss_scaler_write_sclctrl(struct dcss_scaler *scl) +{ + int chnum; + + dcss_ctxld_assert_locked(scl->ctxld); + + for (chnum = 0; chnum < 3; chnum++) { + struct dcss_scaler_ch *ch = &scl->ch[chnum]; + + if (ch->scaler_ctrl_chgd) { + dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id, + ch->scaler_ctrl, + ch->base_ofs + + DCSS_SCALER_CTRL); + ch->scaler_ctrl_chgd = false; + } + } +} diff --git a/drivers/gpu/drm/imx/dcss/dcss-ss.c b/drivers/gpu/drm/imx/dcss/dcss-ss.c new file mode 100644 index 000000000000..8ddf08da911b --- /dev/null +++ b/drivers/gpu/drm/imx/dcss/dcss-ss.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP. + */ + +#include <linux/device.h> +#include <linux/slab.h> + +#include "dcss-dev.h" + +#define DCSS_SS_SYS_CTRL 0x00 +#define RUN_EN BIT(0) +#define DCSS_SS_DISPLAY 0x10 +#define LRC_X_POS 0 +#define LRC_X_MASK GENMASK(12, 0) +#define LRC_Y_POS 16 +#define LRC_Y_MASK GENMASK(28, 16) +#define DCSS_SS_HSYNC 0x20 +#define DCSS_SS_VSYNC 0x30 +#define SYNC_START_POS 0 +#define SYNC_START_MASK GENMASK(12, 0) +#define SYNC_END_POS 16 +#define SYNC_END_MASK GENMASK(28, 16) +#define SYNC_POL BIT(31) +#define DCSS_SS_DE_ULC 0x40 +#define ULC_X_POS 0 +#define ULC_X_MASK GENMASK(12, 0) +#define ULC_Y_POS 16 +#define ULC_Y_MASK GENMASK(28, 16) +#define ULC_POL BIT(31) +#define DCSS_SS_DE_LRC 0x50 +#define DCSS_SS_MODE 0x60 +#define PIPE_MODE_POS 0 +#define PIPE_MODE_MASK GENMASK(1, 0) +#define DCSS_SS_COEFF 0x70 +#define HORIZ_A_POS 0 +#define HORIZ_A_MASK GENMASK(3, 0) +#define HORIZ_B_POS 4 +#define HORIZ_B_MASK GENMASK(7, 4) +#define HORIZ_C_POS 8 +#define HORIZ_C_MASK GENMASK(11, 8) +#define HORIZ_H_NORM_POS 12 +#define HORIZ_H_NORM_MASK GENMASK(14, 12) +#define VERT_A_POS 16 +#define VERT_A_MASK GENMASK(19, 16) +#define VERT_B_POS 20 +#define VERT_B_MASK GENMASK(23, 20) +#define VERT_C_POS 24 +#define VERT_C_MASK GENMASK(27, 24) +#define VERT_H_NORM_POS 28 +#define VERT_H_NORM_MASK GENMASK(30, 28) +#define DCSS_SS_CLIP_CB 0x80 +#define DCSS_SS_CLIP_CR 0x90 +#define CLIP_MIN_POS 0 +#define CLIP_MIN_MASK GENMASK(9, 0) +#define CLIP_MAX_POS 0 +#define CLIP_MAX_MASK GENMASK(23, 16) +#define DCSS_SS_INTER_MODE 0xA0 +#define INT_EN BIT(0) +#define VSYNC_SHIFT BIT(1) + +struct dcss_ss { + struct device *dev; + void __iomem *base_reg; + u32 base_ofs; + + struct dcss_ctxld *ctxld; + u32 ctx_id; + + bool in_use; +}; + +static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs) +{ + if (!ss->in_use) + dcss_writel(val, ss->base_reg + ofs); + + dcss_ctxld_write(ss->ctxld, ss->ctx_id, val, + ss->base_ofs + ofs); +} + +int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base) +{ + struct dcss_ss *ss; + + ss = kzalloc(sizeof(*ss), GFP_KERNEL); + if (!ss) + return -ENOMEM; + + dcss->ss = ss; + ss->dev = dcss->dev; + ss->ctxld = dcss->ctxld; + + ss->base_reg = ioremap(ss_base, SZ_4K); + if (!ss->base_reg) { + dev_err(dcss->dev, "ss: unable to remap ss base\n"); + kfree(ss); + return -ENOMEM; + } + + ss->base_ofs = ss_base; + ss->ctx_id = CTX_SB_HP; + + return 0; +} + +void dcss_ss_exit(struct dcss_ss *ss) +{ + /* stop SS */ + dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL); + + if (ss->base_reg) + iounmap(ss->base_reg); + + kfree(ss); +} + +void dcss_ss_subsam_set(struct dcss_ss *ss) +{ + dcss_ss_write(ss, 0x41614161, DCSS_SS_COEFF); + dcss_ss_write(ss, 0, DCSS_SS_MODE); + dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB); + dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR); +} + +void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, + bool phsync, bool pvsync) +{ + u16 lrc_x, lrc_y; + u16 hsync_start, hsync_end; + u16 vsync_start, vsync_end; + u16 de_ulc_x, de_ulc_y; + u16 de_lrc_x, de_lrc_y; + + lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + + vm->hactive - 1; + lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + + vm->vactive - 1; + + dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY); + + hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len + + vm->hactive - 1; + hsync_end = vm->hsync_len - 1; + + dcss_ss_write(ss, (phsync ? SYNC_POL : 0) | + ((u32)hsync_end << SYNC_END_POS) | hsync_start, + DCSS_SS_HSYNC); + + vsync_start = vm->vfront_porch - 1; + vsync_end = vm->vfront_porch + vm->vsync_len - 1; + + dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) | + ((u32)vsync_end << SYNC_END_POS) | vsync_start, + DCSS_SS_VSYNC); + + de_ulc_x = vm->hsync_len + vm->hback_porch - 1; + de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch; + + dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x, + DCSS_SS_DE_ULC); + + de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1; + de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch + + vm->vactive - 1; + + dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC); +} + +void dcss_ss_enable(struct dcss_ss *ss) +{ + dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL); + ss->in_use = true; +} + +void dcss_ss_shutoff(struct dcss_ss *ss) +{ + dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL); + ss->in_use = false; +} diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index d412fc265395..7ebd99ee3240 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -47,7 +47,7 @@ static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc) } static void ipu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); @@ -79,8 +79,10 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, } static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); @@ -225,24 +227,26 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc, } static int ipu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); u32 primary_plane_mask = drm_plane_mask(crtc->primary); - if (state->active && (primary_plane_mask & state->plane_mask) == 0) + if (crtc_state->active && (primary_plane_mask & crtc_state->plane_mask) == 0) return -EINVAL; return 0; } static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index a3d1617d7c67..b9c156e13156 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -10,13 +10,16 @@ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/of_device.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_color_mgmt.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> @@ -40,12 +43,21 @@ struct ingenic_dma_hwdesc { u32 addr; u32 id; u32 cmd; -} __packed; +} __aligned(16); + +struct ingenic_dma_hwdescs { + struct ingenic_dma_hwdesc hwdesc_f0; + struct ingenic_dma_hwdesc hwdesc_f1; + struct ingenic_dma_hwdesc hwdesc_pal; + u16 palette[256] __aligned(16); +}; struct jz_soc_info { bool needs_dev_clk; bool has_osd; unsigned int max_width, max_height; + const u32 *formats_f0, *formats_f1; + unsigned int num_formats_f0, num_formats_f1; }; struct ingenic_drm { @@ -63,17 +75,26 @@ struct ingenic_drm { struct clk *lcd_clk, *pix_clk; const struct jz_soc_info *soc_info; - struct ingenic_dma_hwdesc *dma_hwdesc_f0, *dma_hwdesc_f1; - dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1; + struct ingenic_dma_hwdescs *dma_hwdescs; + dma_addr_t dma_hwdescs_phys; bool panel_is_sharp; bool no_vblank; -}; -static const u32 ingenic_drm_primary_formats[] = { - DRM_FORMAT_XRGB1555, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, + /* + * clk_mutex is used to synchronize the pixel clock rate update with + * the VBLANK. When the pixel clock's parent clock needs to be updated, + * clock_nb's notifier function will lock the mutex, then wait until the + * next VBLANK. At that point, the parent clock's rate can be updated, + * and the mutex is then unlocked. If an atomic commit happens in the + * meantime, it will lock on the mutex, effectively waiting until the + * clock update process finishes. Finally, the pixel clock's rate will + * be recomputed when the mutex has been released, in the pending atomic + * commit, or a future one. + */ + struct mutex clk_mutex; + bool update_clk_rate; + struct notifier_block clock_nb; }; static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg) @@ -111,8 +132,31 @@ static inline struct ingenic_drm *drm_crtc_get_priv(struct drm_crtc *crtc) return container_of(crtc, struct ingenic_drm, crtc); } +static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb) +{ + return container_of(nb, struct ingenic_drm, clock_nb); +} + +static int ingenic_drm_update_pixclk(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct ingenic_drm *priv = drm_nb_get_priv(nb); + + switch (action) { + case PRE_RATE_CHANGE: + mutex_lock(&priv->clk_mutex); + priv->update_clk_rate = true; + drm_crtc_wait_one_vblank(&priv->crtc); + return NOTIFY_OK; + default: + mutex_unlock(&priv->clk_mutex); + return NOTIFY_OK; + } +} + static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); @@ -126,7 +170,7 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc, } static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); unsigned int var; @@ -195,22 +239,33 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv, } static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct ingenic_drm *priv = drm_crtc_get_priv(crtc); struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL; - if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) { - f1_state = drm_atomic_get_plane_state(state->state, &priv->f1); + if (crtc_state->gamma_lut && + drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) { + dev_dbg(priv->dev, "Invalid palette size\n"); + return -EINVAL; + } + + if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) { + f1_state = drm_atomic_get_plane_state(crtc_state->state, + &priv->f1); if (IS_ERR(f1_state)) return PTR_ERR(f1_state); - f0_state = drm_atomic_get_plane_state(state->state, &priv->f0); + f0_state = drm_atomic_get_plane_state(crtc_state->state, + &priv->f0); if (IS_ERR(f0_state)) return PTR_ERR(f0_state); if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && priv->ipu_plane) { - ipu_state = drm_atomic_get_plane_state(state->state, priv->ipu_plane); + ipu_state = drm_atomic_get_plane_state(crtc_state->state, + priv->ipu_plane); if (IS_ERR(ipu_state)) return PTR_ERR(ipu_state); @@ -248,7 +303,7 @@ ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode } static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *oldstate) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); u32 ctrl = 0; @@ -268,20 +323,27 @@ static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc, } static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *oldstate) + struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); - struct drm_crtc_state *state = crtc->state; - struct drm_pending_vblank_event *event = state->event; + struct drm_crtc_state *crtc_state = crtc->state; + struct drm_pending_vblank_event *event = crtc_state->event; - if (drm_atomic_crtc_needs_modeset(state)) { - ingenic_drm_crtc_update_timings(priv, &state->mode); + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + ingenic_drm_crtc_update_timings(priv, &crtc_state->mode); + priv->update_clk_rate = true; + } - clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000); + if (priv->update_clk_rate) { + mutex_lock(&priv->clk_mutex); + clk_set_rate(priv->pix_clk, + crtc_state->adjusted_mode.clock * 1000); + priv->update_clk_rate = false; + mutex_unlock(&priv->clk_mutex); } if (event) { - state->event = NULL; + crtc_state->event = NULL; spin_lock_irq(&crtc->dev->event_lock); if (drm_crtc_vblank_get(crtc) == 0) @@ -398,24 +460,39 @@ void ingenic_drm_plane_config(struct device *dev, case DRM_FORMAT_RGB565: ctrl |= JZ_LCD_OSDCTRL_BPP_15_16; break; + case DRM_FORMAT_RGB888: + ctrl |= JZ_LCD_OSDCTRL_BPP_24_COMP; + break; case DRM_FORMAT_XRGB8888: ctrl |= JZ_LCD_OSDCTRL_BPP_18_24; break; + case DRM_FORMAT_XRGB2101010: + ctrl |= JZ_LCD_OSDCTRL_BPP_30; + break; } regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL, JZ_LCD_OSDCTRL_BPP_MASK, ctrl); } else { switch (fourcc) { + case DRM_FORMAT_C8: + ctrl |= JZ_LCD_CTRL_BPP_8; + break; case DRM_FORMAT_XRGB1555: ctrl |= JZ_LCD_CTRL_RGB555; fallthrough; case DRM_FORMAT_RGB565: ctrl |= JZ_LCD_CTRL_BPP_15_16; break; + case DRM_FORMAT_RGB888: + ctrl |= JZ_LCD_CTRL_BPP_24_COMP; + break; case DRM_FORMAT_XRGB8888: ctrl |= JZ_LCD_CTRL_BPP_18_24; break; + case DRM_FORMAT_XRGB2101010: + ctrl |= JZ_LCD_CTRL_BPP_30; + break; } regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, @@ -440,32 +517,64 @@ void ingenic_drm_plane_config(struct device *dev, } } +static void ingenic_drm_update_palette(struct ingenic_drm *priv, + const struct drm_color_lut *lut) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(priv->dma_hwdescs->palette); i++) { + u16 color = drm_color_lut_extract(lut[i].red, 5) << 11 + | drm_color_lut_extract(lut[i].green, 6) << 5 + | drm_color_lut_extract(lut[i].blue, 5); + + priv->dma_hwdescs->palette[i] = color; + } +} + static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *oldstate) { struct ingenic_drm *priv = drm_device_get_priv(plane->dev); struct drm_plane_state *state = plane->state; + struct drm_crtc_state *crtc_state; struct ingenic_dma_hwdesc *hwdesc; - unsigned int width, height, cpp; + unsigned int width, height, cpp, offset; dma_addr_t addr; + u32 fourcc; if (state && state->fb) { + crtc_state = state->crtc->state; + addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); width = state->src_w >> 16; height = state->src_h >> 16; cpp = state->fb->format->cpp[0]; if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY) - hwdesc = priv->dma_hwdesc_f0; + hwdesc = &priv->dma_hwdescs->hwdesc_f0; else - hwdesc = priv->dma_hwdesc_f1; + hwdesc = &priv->dma_hwdescs->hwdesc_f1; hwdesc->addr = addr; hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4); - if (drm_atomic_crtc_needs_modeset(state->crtc->state)) - ingenic_drm_plane_config(priv->dev, plane, - state->fb->format->format); + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + fourcc = state->fb->format->format; + + ingenic_drm_plane_config(priv->dev, plane, fourcc); + + if (fourcc == DRM_FORMAT_C8) + offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_pal); + else + offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); + + priv->dma_hwdescs->hwdesc_f0.next = priv->dma_hwdescs_phys + offset; + + crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8; + } + + if (crtc_state->color_mgmt_changed) + ingenic_drm_update_palette(priv, crtc_state->gamma_lut->data); } } @@ -686,6 +795,11 @@ static void ingenic_drm_unbind_all(void *d) component_unbind_all(priv->dev, &priv->drm); } +static void __maybe_unused ingenic_drm_release_rmem(void *d) +{ + of_reserved_mem_device_release(d); +} + static int ingenic_drm_bind(struct device *dev, bool has_components) { struct platform_device *pdev = to_platform_device(dev); @@ -699,6 +813,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) void __iomem *base; long parent_rate; unsigned int i, clone_mask = 0; + dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1; int ret, irq; soc_info = of_device_get_match_data(dev); @@ -707,6 +822,19 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return -EINVAL; } + if (IS_ENABLED(CONFIG_OF_RESERVED_MEM)) { + ret = of_reserved_mem_device_init(dev); + + if (ret && ret != -ENODEV) + dev_warn(dev, "Failed to get reserved memory: %d\n", ret); + + if (!ret) { + ret = devm_add_action_or_reset(dev, ingenic_drm_release_rmem, dev); + if (ret) + return ret; + } + } + priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data, struct ingenic_drm, drm); if (IS_ERR(priv)) @@ -760,26 +888,34 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return PTR_ERR(priv->pix_clk); } - priv->dma_hwdesc_f1 = dmam_alloc_coherent(dev, sizeof(*priv->dma_hwdesc_f1), - &priv->dma_hwdesc_phys_f1, - GFP_KERNEL); - if (!priv->dma_hwdesc_f1) + priv->dma_hwdescs = dmam_alloc_coherent(dev, + sizeof(*priv->dma_hwdescs), + &priv->dma_hwdescs_phys, + GFP_KERNEL); + if (!priv->dma_hwdescs) return -ENOMEM; - priv->dma_hwdesc_f1->next = priv->dma_hwdesc_phys_f1; - priv->dma_hwdesc_f1->id = 0xf1; - if (priv->soc_info->has_osd) { - priv->dma_hwdesc_f0 = dmam_alloc_coherent(dev, - sizeof(*priv->dma_hwdesc_f0), - &priv->dma_hwdesc_phys_f0, - GFP_KERNEL); - if (!priv->dma_hwdesc_f0) - return -ENOMEM; + /* Configure DMA hwdesc for foreground0 plane */ + dma_hwdesc_phys_f0 = priv->dma_hwdescs_phys + + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); + priv->dma_hwdescs->hwdesc_f0.next = dma_hwdesc_phys_f0; + priv->dma_hwdescs->hwdesc_f0.id = 0xf0; - priv->dma_hwdesc_f0->next = priv->dma_hwdesc_phys_f0; - priv->dma_hwdesc_f0->id = 0xf0; - } + /* Configure DMA hwdesc for foreground1 plane */ + dma_hwdesc_phys_f1 = priv->dma_hwdescs_phys + + offsetof(struct ingenic_dma_hwdescs, hwdesc_f1); + priv->dma_hwdescs->hwdesc_f1.next = dma_hwdesc_phys_f1; + priv->dma_hwdescs->hwdesc_f1.id = 0xf1; + + /* Configure DMA hwdesc for palette */ + priv->dma_hwdescs->hwdesc_pal.next = priv->dma_hwdescs_phys + + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); + priv->dma_hwdescs->hwdesc_pal.id = 0xc0; + priv->dma_hwdescs->hwdesc_pal.addr = priv->dma_hwdescs_phys + + offsetof(struct ingenic_dma_hwdescs, palette); + priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL + | (sizeof(priv->dma_hwdescs->palette) / 4); if (soc_info->has_osd) priv->ipu_plane = drm_plane_from_index(drm, 0); @@ -788,8 +924,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) ret = drm_universal_plane_init(drm, &priv->f1, 1, &ingenic_drm_primary_plane_funcs, - ingenic_drm_primary_formats, - ARRAY_SIZE(ingenic_drm_primary_formats), + priv->soc_info->formats_f1, + priv->soc_info->num_formats_f1, NULL, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { dev_err(dev, "Failed to register plane: %i\n", ret); @@ -805,14 +941,17 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return ret; } + drm_crtc_enable_color_mgmt(&priv->crtc, 0, false, + ARRAY_SIZE(priv->dma_hwdescs->palette)); + if (soc_info->has_osd) { drm_plane_helper_add(&priv->f0, &ingenic_drm_plane_helper_funcs); ret = drm_universal_plane_init(drm, &priv->f0, 1, &ingenic_drm_primary_plane_funcs, - ingenic_drm_primary_formats, - ARRAY_SIZE(ingenic_drm_primary_formats), + priv->soc_info->formats_f0, + priv->soc_info->num_formats_f0, NULL, DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) { @@ -927,23 +1066,35 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) } /* Set address of our DMA descriptor chain */ - regmap_write(priv->map, JZ_REG_LCD_DA0, priv->dma_hwdesc_phys_f0); - regmap_write(priv->map, JZ_REG_LCD_DA1, priv->dma_hwdesc_phys_f1); + regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_phys_f0); + regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_phys_f1); /* Enable OSD if available */ if (soc_info->has_osd) regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN); + mutex_init(&priv->clk_mutex); + priv->clock_nb.notifier_call = ingenic_drm_update_pixclk; + + parent_clk = clk_get_parent(priv->pix_clk); + ret = clk_notifier_register(parent_clk, &priv->clock_nb); + if (ret) { + dev_err(dev, "Unable to register clock notifier\n"); + goto err_devclk_disable; + } + ret = drm_dev_register(drm, 0); if (ret) { dev_err(dev, "Failed to register DRM driver\n"); - goto err_devclk_disable; + goto err_clk_notifier_unregister; } drm_fbdev_generic_setup(drm, 32); return 0; +err_clk_notifier_unregister: + clk_notifier_unregister(parent_clk, &priv->clock_nb); err_devclk_disable: if (priv->lcd_clk) clk_disable_unprepare(priv->lcd_clk); @@ -965,7 +1116,9 @@ static int compare_of(struct device *dev, void *data) static void ingenic_drm_unbind(struct device *dev) { struct ingenic_drm *priv = dev_get_drvdata(dev); + struct clk *parent_clk = clk_get_parent(priv->pix_clk); + clk_notifier_unregister(parent_clk, &priv->clock_nb); if (priv->lcd_clk) clk_disable_unprepare(priv->lcd_clk); clk_disable_unprepare(priv->pix_clk); @@ -1011,11 +1164,50 @@ static int ingenic_drm_remove(struct platform_device *pdev) return 0; } +static const u32 jz4740_formats[] = { + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, +}; + +static const u32 jz4725b_formats_f1[] = { + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, +}; + +static const u32 jz4725b_formats_f0[] = { + DRM_FORMAT_C8, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, +}; + +static const u32 jz4770_formats_f1[] = { + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XRGB2101010, +}; + +static const u32 jz4770_formats_f0[] = { + DRM_FORMAT_C8, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XRGB2101010, +}; + static const struct jz_soc_info jz4740_soc_info = { .needs_dev_clk = true, .has_osd = false, .max_width = 800, .max_height = 600, + .formats_f1 = jz4740_formats, + .num_formats_f1 = ARRAY_SIZE(jz4740_formats), + /* JZ4740 has only one plane */ }; static const struct jz_soc_info jz4725b_soc_info = { @@ -1023,6 +1215,10 @@ static const struct jz_soc_info jz4725b_soc_info = { .has_osd = true, .max_width = 800, .max_height = 600, + .formats_f1 = jz4725b_formats_f1, + .num_formats_f1 = ARRAY_SIZE(jz4725b_formats_f1), + .formats_f0 = jz4725b_formats_f0, + .num_formats_f0 = ARRAY_SIZE(jz4725b_formats_f0), }; static const struct jz_soc_info jz4770_soc_info = { @@ -1030,6 +1226,10 @@ static const struct jz_soc_info jz4770_soc_info = { .has_osd = true, .max_width = 1280, .max_height = 720, + .formats_f1 = jz4770_formats_f1, + .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1), + .formats_f0 = jz4770_formats_f0, + .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0), }; static const struct of_device_id ingenic_drm_of_match[] = { diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h index 43f7d959cff7..9b48ce02803d 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.h +++ b/drivers/gpu/drm/ingenic/ingenic-drm.h @@ -124,6 +124,8 @@ #define JZ_LCD_CTRL_BPP_8 0x3 #define JZ_LCD_CTRL_BPP_15_16 0x4 #define JZ_LCD_CTRL_BPP_18_24 0x5 +#define JZ_LCD_CTRL_BPP_24_COMP 0x6 +#define JZ_LCD_CTRL_BPP_30 0x7 #define JZ_LCD_CTRL_BPP_MASK (JZ_LCD_CTRL_RGB555 | 0x7) #define JZ_LCD_CMD_SOF_IRQ BIT(31) @@ -145,6 +147,7 @@ #define JZ_LCD_OSDCTRL_CHANGE BIT(3) #define JZ_LCD_OSDCTRL_BPP_15_16 0x4 #define JZ_LCD_OSDCTRL_BPP_18_24 0x5 +#define JZ_LCD_OSDCTRL_BPP_24_COMP 0x6 #define JZ_LCD_OSDCTRL_BPP_30 0x7 #define JZ_LCD_OSDCTRL_BPP_MASK (JZ_LCD_OSDCTRL_RGB555 | 0x7) diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 155f2b4b4030..11223fe348df 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) return ret; if (bo->base.sgt) { - dma_unmap_sg(dev, bo->base.sgt->sgl, - bo->base.sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(bo->base.sgt); } else { bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); @@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) } } - dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL); + ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); + if (ret) { + sg_free_table(&sgt); + kfree(bo->base.sgt); + bo->base.sgt = NULL; + return ret; + } *bo->base.sgt = sgt; diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c index 5b92fb82674a..2b2739adc7f5 100644 --- a/drivers/gpu/drm/lima/lima_vm.c +++ b/drivers/gpu/drm/lima/lima_vm.c @@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) if (err) goto err_out1; - for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) { + for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), bo_va->node.start + offset); if (err) @@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff) mutex_lock(&vm->lock); base = bo_va->node.start + (pageoff << PAGE_SHIFT); - for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, - bo->base.sgt->nents, pageoff) { + for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), base + offset); if (err) diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index c592957ed07f..f9b5f450a9cb 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -331,8 +331,8 @@ static int mcde_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { - ret = -EINVAL; + if (irq < 0) { + ret = irq; goto clk_disable; } diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index aa74aac3cbcc..65cd03a4be29 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -24,6 +24,6 @@ config DRM_MEDIATEK_HDMI tristate "DRM HDMI Support for Mediatek SoCs" depends on DRM_MEDIATEK select SND_SOC_HDMI_CODEC if SND_SOC - select GENERIC_PHY + select PHY_MTK_HDMI help DRM/KMS HDMI driver for Mediatek SoCs diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index b7a82ed5788f..77b0fd86063d 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -19,9 +19,6 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o mediatek-drm-hdmi-objs := mtk_cec.o \ mtk_hdmi.o \ - mtk_hdmi_ddc.o \ - mtk_mt2701_hdmi_phy.o \ - mtk_mt8173_hdmi_phy.o \ - mtk_hdmi_phy.o + mtk_hdmi_ddc.o obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index d4f0fb7ad312..cf11c4850b40 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -64,7 +64,8 @@ enum mtk_dpi_out_color_format { struct mtk_dpi { struct mtk_ddp_comp ddp_comp; struct drm_encoder encoder; - struct drm_bridge *bridge; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; void __iomem *regs; struct device *dev; struct clk *engine_clk; @@ -83,9 +84,9 @@ struct mtk_dpi { int refcount; }; -static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e) +static inline struct mtk_dpi *bridge_to_dpi(struct drm_bridge *b) { - return container_of(e, struct mtk_dpi, encoder); + return container_of(b, struct mtk_dpi, bridge); } enum mtk_dpi_polarity { @@ -521,50 +522,53 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, return 0; } -static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder) { - return true; + drm_encoder_cleanup(encoder); } -static void mtk_dpi_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = { + .destroy = mtk_dpi_encoder_destroy, +}; + +static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) { - struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder); + struct mtk_dpi *dpi = bridge_to_dpi(bridge); + + return drm_bridge_attach(bridge->encoder, dpi->next_bridge, + &dpi->bridge, flags); +} + +static void mtk_dpi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct mtk_dpi *dpi = bridge_to_dpi(bridge); drm_mode_copy(&dpi->mode, adjusted_mode); } -static void mtk_dpi_encoder_disable(struct drm_encoder *encoder) +static void mtk_dpi_bridge_disable(struct drm_bridge *bridge) { - struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder); + struct mtk_dpi *dpi = bridge_to_dpi(bridge); mtk_dpi_power_off(dpi); } -static void mtk_dpi_encoder_enable(struct drm_encoder *encoder) +static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) { - struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder); + struct mtk_dpi *dpi = bridge_to_dpi(bridge); mtk_dpi_power_on(dpi); mtk_dpi_set_display_mode(dpi, &dpi->mode); } -static int mtk_dpi_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - return 0; -} - -static const struct drm_encoder_helper_funcs mtk_dpi_encoder_helper_funcs = { - .mode_fixup = mtk_dpi_encoder_mode_fixup, - .mode_set = mtk_dpi_encoder_mode_set, - .disable = mtk_dpi_encoder_disable, - .enable = mtk_dpi_encoder_enable, - .atomic_check = mtk_dpi_atomic_check, +static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = { + .attach = mtk_dpi_bridge_attach, + .mode_set = mtk_dpi_bridge_mode_set, + .disable = mtk_dpi_bridge_disable, + .enable = mtk_dpi_bridge_enable, }; static void mtk_dpi_start(struct mtk_ddp_comp *comp) @@ -605,12 +609,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) dev_err(dev, "Failed to initialize decoder: %d\n", ret); goto err_unregister; } - drm_encoder_helper_add(&dpi->encoder, &mtk_dpi_encoder_helper_funcs); - /* Currently DPI0 is fixed to be driven by OVL1 */ - dpi->encoder.possible_crtcs = BIT(1); + dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp); - ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0); + ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0); if (ret) { dev_err(dev, "Failed to attach bridge: %d\n", ret); goto err_cleanup; @@ -770,11 +772,11 @@ static int mtk_dpi_probe(struct platform_device *pdev) } ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, - NULL, &dpi->bridge); + NULL, &dpi->next_bridge); if (ret) return ret; - dev_info(dev, "Found bridge node: %pOF\n", dpi->bridge->of_node); + dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node); comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI); if (comp_id < 0) { @@ -791,8 +793,15 @@ static int mtk_dpi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dpi); + dpi->bridge.funcs = &mtk_dpi_bridge_funcs; + dpi->bridge.of_node = dev->of_node; + dpi->bridge.type = DRM_MODE_CONNECTOR_DPI; + + drm_bridge_add(&dpi->bridge); + ret = component_add(dev, &mtk_dpi_component_ops); if (ret) { + drm_bridge_remove(&dpi->bridge); dev_err(dev, "Failed to add component: %d\n", ret); return ret; } @@ -802,7 +811,10 @@ static int mtk_dpi_probe(struct platform_device *pdev) static int mtk_dpi_remove(struct platform_device *pdev) { + struct mtk_dpi *dpi = platform_get_drvdata(pdev); + component_del(&pdev->dev, &mtk_dpi_component_ops); + drm_bridge_remove(&dpi->bridge); return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 3fc5511330b9..23f5c10b0c67 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -481,7 +481,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) mbox_flush(mtk_crtc->cmdq_client->chan, 2000); cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); - cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle); cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); @@ -517,7 +517,7 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, } static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; @@ -542,7 +542,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, } static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; @@ -575,24 +575,24 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, } static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { - struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state); + struct mtk_crtc_state *crtc_state = to_mtk_crtc_state(crtc->state); struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - if (mtk_crtc->event && state->base.event) + if (mtk_crtc->event && crtc_state->base.event) DRM_ERROR("new event while there is still a pending event\n"); - if (state->base.event) { - state->base.event->pipe = drm_crtc_index(crtc); + if (crtc_state->base.event) { + crtc_state->base.event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); - mtk_crtc->event = state->base.event; - state->base.event = NULL; + mtk_crtc->event = crtc_state->base.event; + crtc_state->base.event = NULL; } } static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); int i; @@ -831,13 +831,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, drm_crtc_index(&mtk_crtc->base)); mtk_crtc->cmdq_client = NULL; } - ret = of_property_read_u32_index(priv->mutex_node, - "mediatek,gce-events", - drm_crtc_index(&mtk_crtc->base), - &mtk_crtc->cmdq_event); - if (ret) - dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", - drm_crtc_index(&mtk_crtc->base)); + + if (mtk_crtc->cmdq_client) { + ret = of_property_read_u32_index(priv->mutex_node, + "mediatek,gce-events", + drm_crtc_index(&mtk_crtc->base), + &mtk_crtc->cmdq_event); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", + drm_crtc_index(&mtk_crtc->base)); + cmdq_mbox_destroy(mtk_crtc->cmdq_client); + mtk_crtc->cmdq_client = NULL; + } + } #endif return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 57c88de9a329..8eba44be3a8a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -13,6 +13,8 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/soc/mediatek/mtk-cmdq.h> +#include <drm/drm_print.h> + #include "mtk_drm_drv.h" #include "mtk_drm_plane.h" #include "mtk_drm_ddp_comp.h" @@ -412,6 +414,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; +static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp, + const enum mtk_ddp_comp_id *path, + unsigned int path_len) +{ + unsigned int i; + + if (path == NULL) + return false; + + for (i = 0U; i < path_len; i++) + if (ddp_comp.id == path[i]) + return true; + + return false; +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type) { @@ -427,6 +445,26 @@ int mtk_ddp_comp_get_id(struct device_node *node, return -EINVAL; } +unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm, + struct mtk_ddp_comp ddp_comp) +{ + struct mtk_drm_private *private = drm->dev_private; + unsigned int ret = 0; + + if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len)) + ret = BIT(0); + else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path, + private->data->ext_len)) + ret = BIT(1); + else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path, + private->data->third_len)) + ret = BIT(2); + else + DRM_INFO("Failed to find comp in ddp table\n"); + + return ret; +} + int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id, const struct mtk_ddp_comp_funcs *funcs) @@ -496,6 +534,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, #if IS_REACHABLE(CONFIG_MTK_CMDQ) if (of_address_to_resource(node, 0, &res) != 0) { dev_err(dev, "Missing reg in %s node\n", node->full_name); + put_device(&larb_pdev->dev); return -EINVAL; } comp->regs_pa = res.start; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index debe36395fe7..1d9e00b69462 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -202,6 +202,8 @@ static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp, int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); +unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm, + struct mtk_ddp_comp ddp_comp); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id, const struct mtk_ddp_comp_funcs *funcs); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 040a8f393fe2..7f3398a7c2b0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -27,7 +27,6 @@ #include "mtk_drm_crtc.h" #include "mtk_drm_ddp.h" -#include "mtk_drm_ddp.h" #include "mtk_drm_ddp_comp.h" #include "mtk_drm_drv.h" #include "mtk_drm_gem.h" @@ -74,6 +73,19 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = { DDP_COMPONENT_DPI0, }; +static const enum mtk_ddp_comp_id mt7623_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_BLS, + DDP_COMPONENT_DPI0, +}; + +static const enum mtk_ddp_comp_id mt7623_mtk_ddp_ext[] = { + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DSI0, +}; + static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, @@ -127,6 +139,14 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .shadow_register = true, }; +static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = { + .main_path = mt7623_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt7623_mtk_ddp_main), + .ext_path = mt7623_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext), + .shadow_register = true, +}; + static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = { .main_path = mt2712_mtk_ddp_main, .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main), @@ -165,7 +185,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) ret = drmm_mode_config_init(drm); if (ret) - return ret; + goto put_mutex_dev; drm->mode_config.min_width = 64; drm->mode_config.min_height = 64; @@ -182,7 +202,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) ret = component_bind_all(drm->dev, drm); if (ret) - return ret; + goto put_mutex_dev; /* * We currently support two fixed data streams, each optional, @@ -229,7 +249,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) } if (!dma_dev->dma_parms) { ret = -ENOMEM; - goto err_component_unbind; + goto put_dma_dev; } ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); @@ -256,9 +276,12 @@ static int mtk_drm_kms_init(struct drm_device *drm) err_unset_dma_parms: if (private->dma_parms_allocated) dma_dev->dma_parms = NULL; +put_dma_dev: + put_device(private->dma_dev); err_component_unbind: component_unbind_all(drm->dev, drm); - +put_mutex_dev: + put_device(private->mutex_dev); return ret; } @@ -301,18 +324,13 @@ struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, static struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .gem_free_object_unlocked = mtk_drm_gem_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = mtk_drm_gem_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = mtk_drm_gem_prime_import, - .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf, - .gem_prime_vmap = mtk_drm_gem_prime_vmap, - .gem_prime_vunmap = mtk_drm_gem_prime_vunmap, .fops = &mtk_drm_fops, .name = DRIVER_NAME, @@ -422,6 +440,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { static const struct of_device_id mtk_drm_of_ids[] = { { .compatible = "mediatek,mt2701-mmsys", .data = &mt2701_mmsys_driver_data}, + { .compatible = "mediatek,mt7623-mmsys", + .data = &mt7623_mmsys_driver_data}, { .compatible = "mediatek,mt2712-mmsys", .data = &mt2712_mmsys_driver_data}, { .compatible = "mediatek,mt8173-mmsys", @@ -544,8 +564,13 @@ err_pm: pm_runtime_disable(dev); err_node: of_node_put(private->mutex_node); - for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) + for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) { of_node_put(private->comp_node[i]); + if (private->ddp_comp[i]) { + put_device(private->ddp_comp[i]->larb_dev); + private->ddp_comp[i] = NULL; + } + } return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 6190cc3b7b0d..cdd1a6e61564 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -8,11 +8,20 @@ #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_cma_helper.h> #include <drm/drm_prime.h> #include "mtk_drm_drv.h" #include "mtk_drm_gem.h" +static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = { + .free = mtk_drm_gem_free_object, + .get_sg_table = mtk_gem_prime_get_sg_table, + .vmap = mtk_drm_gem_prime_vmap, + .vunmap = mtk_drm_gem_prime_vunmap, + .vm_ops = &drm_gem_cma_vm_ops, +}; + static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, unsigned long size) { @@ -25,6 +34,8 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, if (!mtk_gem_obj) return ERR_PTR(-ENOMEM); + mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs; + ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size); if (ret < 0) { DRM_ERROR("failed to initialize gem object\n"); @@ -212,46 +223,28 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { struct mtk_drm_gem_obj *mtk_gem; - int ret; - struct scatterlist *s; - unsigned int i; - dma_addr_t expected; - mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { + DRM_ERROR("sg_table is not contiguous"); + return ERR_PTR(-EINVAL); + } + mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); if (IS_ERR(mtk_gem)) return ERR_CAST(mtk_gem); - expected = sg_dma_address(sg->sgl); - for_each_sg(sg->sgl, s, sg->nents, i) { - if (!sg_dma_len(s)) - break; - - if (sg_dma_address(s) != expected) { - DRM_ERROR("sg_table is not contiguous"); - ret = -EINVAL; - goto err_gem_free; - } - expected = sg_dma_address(s) + sg_dma_len(s); - } - mtk_gem->dma_addr = sg_dma_address(sg->sgl); mtk_gem->sg = sg; return &mtk_gem->base; - -err_gem_free: - kfree(mtk_gem); - return ERR_PTR(ret); } void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); struct sg_table *sgt; - struct sg_page_iter iter; unsigned int npages; - unsigned int i = 0; if (mtk_gem->kvaddr) return mtk_gem->kvaddr; @@ -265,11 +258,8 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) if (!mtk_gem->pages) goto out; - for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { - mtk_gem->pages[i++] = sg_page_iter_page(&iter); - if (i > npages) - break; - } + drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 16fd99dcdacf..4a188a942c38 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -466,14 +466,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) - horizontal_backporch_byte = - (vm->hback_porch * dsi_tmp_buf_bpp - 10); + horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp; else - horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) * - dsi_tmp_buf_bpp - 10); + horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * + dsi_tmp_buf_bpp; data_phy_cycles = timing->lpx + timing->da_hs_prepare + - timing->da_hs_zero + timing->da_hs_exit + 3; + timing->da_hs_zero + timing->da_hs_exit; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > @@ -970,11 +969,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) return ret; } - /* - * Currently display data paths are statically assigned to a crtc each. - * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 - */ - dsi->encoder.possible_crtcs = 1; + dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp); ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index f2e9b429960b..97a1ff529a1d 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/of.h> @@ -145,11 +146,16 @@ struct hdmi_audio_param { struct hdmi_codec_params codec_params; }; +struct mtk_hdmi_conf { + bool tz_disabled; +}; + struct mtk_hdmi { struct drm_bridge bridge; struct drm_bridge *next_bridge; struct drm_connector conn; struct device *dev; + const struct mtk_hdmi_conf *conf; struct phy *phy; struct device *cec_dev; struct i2c_adapter *ddc_adpt; @@ -234,7 +240,6 @@ static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black) static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable) { struct arm_smccc_res res; - struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(hdmi->phy); /* * MT8173 HDMI hardware has an output control bit to enable/disable HDMI @@ -242,7 +247,7 @@ static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable) * The ARM trusted firmware provides an API for the HDMI driver to set * this control bit to enable HDMI output in supervisor mode. */ - if (hdmi_phy->conf && hdmi_phy->conf->tz_disabled) + if (hdmi->conf && hdmi->conf->tz_disabled) regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20, 0x80008005, enable ? 0x80000005 : 0x8000); @@ -1507,25 +1512,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to get system configuration registers: %d\n", ret); - return ret; + goto put_device; } hdmi->sys_regmap = regmap; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(hdmi->regs)) - return PTR_ERR(hdmi->regs); + if (IS_ERR(hdmi->regs)) { + ret = PTR_ERR(hdmi->regs); + goto put_device; + } remote = of_graph_get_remote_node(np, 1, 0); - if (!remote) - return -EINVAL; + if (!remote) { + ret = -EINVAL; + goto put_device; + } if (!of_device_is_compatible(remote, "hdmi-connector")) { hdmi->next_bridge = of_drm_find_bridge(remote); if (!hdmi->next_bridge) { dev_err(dev, "Waiting for external bridge\n"); of_node_put(remote); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto put_device; } } @@ -1534,7 +1544,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n", remote); of_node_put(remote); - return -EINVAL; + ret = -EINVAL; + goto put_device; } of_node_put(remote); @@ -1542,10 +1553,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); - return -EINVAL; + ret = -EINVAL; + goto put_device; } return 0; +put_device: + put_device(hdmi->cec_dev); + return ret; } /* @@ -1723,6 +1738,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev) return -ENOMEM; hdmi->dev = dev; + hdmi->conf = of_device_get_match_data(dev); ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev); if (ret) @@ -1803,8 +1819,16 @@ static int mtk_hdmi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, mtk_hdmi_suspend, mtk_hdmi_resume); +static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = { + .tz_disabled = true, +}; + static const struct of_device_id mtk_drm_hdmi_of_ids[] = { - { .compatible = "mediatek,mt8173-hdmi", }, + { .compatible = "mediatek,mt2701-hdmi", + .data = &mtk_hdmi_conf_mt2701, + }, + { .compatible = "mediatek,mt8173-hdmi", + }, {} }; @@ -1819,7 +1843,6 @@ static struct platform_driver mtk_hdmi_driver = { }; static struct platform_driver * const mtk_hdmi_drivers[] = { - &mtk_hdmi_phy_driver, &mtk_hdmi_ddc_driver, &mtk_cec_driver, &mtk_hdmi_driver, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h index bb3653de6bd1..472bf141c92b 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.h +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h @@ -5,7 +5,6 @@ */ #ifndef _MTK_HDMI_CTRL_H #define _MTK_HDMI_CTRL_H -#include "mtk_hdmi_phy.h" struct platform_driver; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c deleted file mode 100644 index 5223498502c4..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c +++ /dev/null @@ -1,210 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018 MediaTek Inc. - * Author: Jie Qiu <jie.qiu@mediatek.com> - */ - -#include "mtk_hdmi_phy.h" - -static int mtk_hdmi_phy_power_on(struct phy *phy); -static int mtk_hdmi_phy_power_off(struct phy *phy); - -static const struct phy_ops mtk_hdmi_phy_dev_ops = { - .power_on = mtk_hdmi_phy_power_on, - .power_off = mtk_hdmi_phy_power_off, - .owner = THIS_MODULE, -}; - -void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, - u32 bits) -{ - void __iomem *reg = hdmi_phy->regs + offset; - u32 tmp; - - tmp = readl(reg); - tmp &= ~bits; - writel(tmp, reg); -} - -void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, - u32 bits) -{ - void __iomem *reg = hdmi_phy->regs + offset; - u32 tmp; - - tmp = readl(reg); - tmp |= bits; - writel(tmp, reg); -} - -void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, - u32 val, u32 mask) -{ - void __iomem *reg = hdmi_phy->regs + offset; - u32 tmp; - - tmp = readl(reg); - tmp = (tmp & ~mask) | (val & mask); - writel(tmp, reg); -} - -inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw) -{ - return container_of(hw, struct mtk_hdmi_phy, pll_hw); -} - -static int mtk_hdmi_phy_power_on(struct phy *phy) -{ - struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy); - int ret; - - ret = clk_prepare_enable(hdmi_phy->pll); - if (ret < 0) - return ret; - - hdmi_phy->conf->hdmi_phy_enable_tmds(hdmi_phy); - return 0; -} - -static int mtk_hdmi_phy_power_off(struct phy *phy) -{ - struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy); - - hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy); - clk_disable_unprepare(hdmi_phy->pll); - - return 0; -} - -static const struct phy_ops * -mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy) -{ - if (hdmi_phy && hdmi_phy->conf && - hdmi_phy->conf->hdmi_phy_enable_tmds && - hdmi_phy->conf->hdmi_phy_disable_tmds) - return &mtk_hdmi_phy_dev_ops; - - dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n"); - return NULL; -} - -static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy, - struct clk_init_data *clk_init) -{ - clk_init->flags = hdmi_phy->conf->flags; - clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops; -} - -static int mtk_hdmi_phy_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct mtk_hdmi_phy *hdmi_phy; - struct resource *mem; - struct clk *ref_clk; - const char *ref_clk_name; - struct clk_init_data clk_init = { - .num_parents = 1, - .parent_names = (const char * const *)&ref_clk_name, - }; - - struct phy *phy; - struct phy_provider *phy_provider; - int ret; - - hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL); - if (!hdmi_phy) - return -ENOMEM; - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdmi_phy->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(hdmi_phy->regs)) { - ret = PTR_ERR(hdmi_phy->regs); - dev_err(dev, "Failed to get memory resource: %d\n", ret); - return ret; - } - - ref_clk = devm_clk_get(dev, "pll_ref"); - if (IS_ERR(ref_clk)) { - ret = PTR_ERR(ref_clk); - dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n", - ret); - return ret; - } - ref_clk_name = __clk_get_name(ref_clk); - - ret = of_property_read_string(dev->of_node, "clock-output-names", - &clk_init.name); - if (ret < 0) { - dev_err(dev, "Failed to read clock-output-names: %d\n", ret); - return ret; - } - - hdmi_phy->dev = dev; - hdmi_phy->conf = - (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev); - mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init); - hdmi_phy->pll_hw.init = &clk_init; - hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); - if (IS_ERR(hdmi_phy->pll)) { - ret = PTR_ERR(hdmi_phy->pll); - dev_err(dev, "Failed to register PLL: %d\n", ret); - return ret; - } - - ret = of_property_read_u32(dev->of_node, "mediatek,ibias", - &hdmi_phy->ibias); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret); - return ret; - } - - ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up", - &hdmi_phy->ibias_up); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret); - return ret; - } - - dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n"); - hdmi_phy->drv_imp_clk = 0x30; - hdmi_phy->drv_imp_d2 = 0x30; - hdmi_phy->drv_imp_d1 = 0x30; - hdmi_phy->drv_imp_d0 = 0x30; - - phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy)); - if (IS_ERR(phy)) { - dev_err(dev, "Failed to create HDMI PHY\n"); - return PTR_ERR(phy); - } - phy_set_drvdata(phy, hdmi_phy); - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) { - dev_err(dev, "Failed to register HDMI PHY\n"); - return PTR_ERR(phy_provider); - } - - return of_clk_add_provider(dev->of_node, of_clk_src_simple_get, - hdmi_phy->pll); -} - -static const struct of_device_id mtk_hdmi_phy_match[] = { - { .compatible = "mediatek,mt2701-hdmi-phy", - .data = &mtk_hdmi_phy_2701_conf, - }, - { .compatible = "mediatek,mt8173-hdmi-phy", - .data = &mtk_hdmi_phy_8173_conf, - }, - {}, -}; - -struct platform_driver mtk_hdmi_phy_driver = { - .probe = mtk_hdmi_phy_probe, - .driver = { - .name = "mediatek-hdmi-phy", - .of_match_table = mtk_hdmi_phy_match, - }, -}; - -MODULE_DESCRIPTION("MediaTek HDMI PHY Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h deleted file mode 100644 index 2d8b3182470d..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2018 MediaTek Inc. - * Author: Chunhui Dai <chunhui.dai@mediatek.com> - */ - -#ifndef _MTK_HDMI_PHY_H -#define _MTK_HDMI_PHY_H -#include <linux/clk.h> -#include <linux/clk-provider.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/mfd/syscon.h> -#include <linux/module.h> -#include <linux/of_device.h> -#include <linux/phy/phy.h> -#include <linux/platform_device.h> -#include <linux/types.h> - -struct mtk_hdmi_phy; - -struct mtk_hdmi_phy_conf { - bool tz_disabled; - unsigned long flags; - const struct clk_ops *hdmi_phy_clk_ops; - void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy); - void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy); -}; - -struct mtk_hdmi_phy { - void __iomem *regs; - struct device *dev; - struct mtk_hdmi_phy_conf *conf; - struct clk *pll; - struct clk_hw pll_hw; - unsigned long pll_rate; - unsigned char drv_imp_clk; - unsigned char drv_imp_d2; - unsigned char drv_imp_d1; - unsigned char drv_imp_d0; - unsigned int ibias; - unsigned int ibias_up; -}; - -void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, - u32 bits); -void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, - u32 bits); -void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, - u32 val, u32 mask); -struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw); - -extern struct platform_driver mtk_hdmi_phy_driver; -extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf; -extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf; - -#endif /* _MTK_HDMI_PHY_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c deleted file mode 100644 index d3cc4022e988..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c +++ /dev/null @@ -1,249 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018 MediaTek Inc. - * Author: Chunhui Dai <chunhui.dai@mediatek.com> - */ - -#include "mtk_hdmi_phy.h" - -#define HDMI_CON0 0x00 -#define RG_HDMITX_DRV_IBIAS 0 -#define RG_HDMITX_DRV_IBIAS_MASK (0x3f << 0) -#define RG_HDMITX_EN_SER 12 -#define RG_HDMITX_EN_SER_MASK (0x0f << 12) -#define RG_HDMITX_EN_SLDO 16 -#define RG_HDMITX_EN_SLDO_MASK (0x0f << 16) -#define RG_HDMITX_EN_PRED 20 -#define RG_HDMITX_EN_PRED_MASK (0x0f << 20) -#define RG_HDMITX_EN_IMP 24 -#define RG_HDMITX_EN_IMP_MASK (0x0f << 24) -#define RG_HDMITX_EN_DRV 28 -#define RG_HDMITX_EN_DRV_MASK (0x0f << 28) - -#define HDMI_CON1 0x04 -#define RG_HDMITX_PRED_IBIAS 18 -#define RG_HDMITX_PRED_IBIAS_MASK (0x0f << 18) -#define RG_HDMITX_PRED_IMP (0x01 << 22) -#define RG_HDMITX_DRV_IMP 26 -#define RG_HDMITX_DRV_IMP_MASK (0x3f << 26) - -#define HDMI_CON2 0x08 -#define RG_HDMITX_EN_TX_CKLDO (0x01 << 0) -#define RG_HDMITX_EN_TX_POSDIV (0x01 << 1) -#define RG_HDMITX_TX_POSDIV 3 -#define RG_HDMITX_TX_POSDIV_MASK (0x03 << 3) -#define RG_HDMITX_EN_MBIAS (0x01 << 6) -#define RG_HDMITX_MBIAS_LPF_EN (0x01 << 7) - -#define HDMI_CON4 0x10 -#define RG_HDMITX_RESERVE_MASK (0xffffffff << 0) - -#define HDMI_CON6 0x18 -#define RG_HTPLL_BR 0 -#define RG_HTPLL_BR_MASK (0x03 << 0) -#define RG_HTPLL_BC 2 -#define RG_HTPLL_BC_MASK (0x03 << 2) -#define RG_HTPLL_BP 4 -#define RG_HTPLL_BP_MASK (0x0f << 4) -#define RG_HTPLL_IR 8 -#define RG_HTPLL_IR_MASK (0x0f << 8) -#define RG_HTPLL_IC 12 -#define RG_HTPLL_IC_MASK (0x0f << 12) -#define RG_HTPLL_POSDIV 16 -#define RG_HTPLL_POSDIV_MASK (0x03 << 16) -#define RG_HTPLL_PREDIV 18 -#define RG_HTPLL_PREDIV_MASK (0x03 << 18) -#define RG_HTPLL_FBKSEL 20 -#define RG_HTPLL_FBKSEL_MASK (0x03 << 20) -#define RG_HTPLL_RLH_EN (0x01 << 22) -#define RG_HTPLL_FBKDIV 24 -#define RG_HTPLL_FBKDIV_MASK (0x7f << 24) -#define RG_HTPLL_EN (0x01 << 31) - -#define HDMI_CON7 0x1c -#define RG_HTPLL_AUTOK_EN (0x01 << 23) -#define RG_HTPLL_DIVEN 28 -#define RG_HTPLL_DIVEN_MASK (0x07 << 28) - -static int mtk_hdmi_pll_prepare(struct clk_hw *hw) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS); - usleep_range(80, 100); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); - usleep_range(80, 100); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); - usleep_range(80, 100); - return 0; -} - -static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - usleep_range(80, 100); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN); - usleep_range(80, 100); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN); - usleep_range(80, 100); -} - -static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - return rate; -} - -static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - u32 pos_div; - - if (rate <= 64000000) - pos_div = 3; - else if (rate <= 128000000) - pos_div = 2; - else - pos_div = 1; - - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC), - RG_HTPLL_IC_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR), - RG_HTPLL_IR_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV), - RG_HDMITX_TX_POSDIV_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL), - RG_HTPLL_FBKSEL_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV), - RG_HTPLL_FBKDIV_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN), - RG_HTPLL_DIVEN_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP), - RG_HTPLL_BP_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC), - RG_HTPLL_BC_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR), - RG_HTPLL_BR_MASK); - - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS), - RG_HDMITX_PRED_IBIAS_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP), - RG_HDMITX_DRV_IMP_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS), - RG_HDMITX_DRV_IBIAS_MASK); - return 0; -} - -static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - unsigned long out_rate, val; - - val = (readl(hdmi_phy->regs + HDMI_CON6) - & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV; - switch (val) { - case 0x00: - out_rate = parent_rate; - break; - case 0x01: - out_rate = parent_rate / 2; - break; - default: - out_rate = parent_rate / 4; - break; - } - - val = (readl(hdmi_phy->regs + HDMI_CON6) - & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV; - out_rate *= (val + 1) * 2; - val = (readl(hdmi_phy->regs + HDMI_CON2) - & RG_HDMITX_TX_POSDIV_MASK); - out_rate >>= (val >> RG_HDMITX_TX_POSDIV); - - if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV) - out_rate /= 5; - - return out_rate; -} - -static const struct clk_ops mtk_hdmi_phy_pll_ops = { - .prepare = mtk_hdmi_pll_prepare, - .unprepare = mtk_hdmi_pll_unprepare, - .set_rate = mtk_hdmi_pll_set_rate, - .round_rate = mtk_hdmi_pll_round_rate, - .recalc_rate = mtk_hdmi_pll_recalc_rate, -}; - -static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy) -{ - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS); - usleep_range(80, 100); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); - usleep_range(80, 100); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); - usleep_range(80, 100); -} - -static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) -{ - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - usleep_range(80, 100); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN); - usleep_range(80, 100); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN); - usleep_range(80, 100); -} - -struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = { - .tz_disabled = true, - .flags = CLK_SET_RATE_GATE, - .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, - .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, - .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, -}; - -MODULE_AUTHOR("Chunhui Dai <chunhui.dai@mediatek.com>"); -MODULE_DESCRIPTION("MediaTek HDMI PHY Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c deleted file mode 100644 index 827b93786fac..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ /dev/null @@ -1,282 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014 MediaTek Inc. - * Author: Jie Qiu <jie.qiu@mediatek.com> - */ - -#include "mtk_hdmi_phy.h" - -#define HDMI_CON0 0x00 -#define RG_HDMITX_PLL_EN BIT(31) -#define RG_HDMITX_PLL_FBKDIV (0x7f << 24) -#define PLL_FBKDIV_SHIFT 24 -#define RG_HDMITX_PLL_FBKSEL (0x3 << 22) -#define PLL_FBKSEL_SHIFT 22 -#define RG_HDMITX_PLL_PREDIV (0x3 << 20) -#define PREDIV_SHIFT 20 -#define RG_HDMITX_PLL_POSDIV (0x3 << 18) -#define POSDIV_SHIFT 18 -#define RG_HDMITX_PLL_RST_DLY (0x3 << 16) -#define RG_HDMITX_PLL_IR (0xf << 12) -#define PLL_IR_SHIFT 12 -#define RG_HDMITX_PLL_IC (0xf << 8) -#define PLL_IC_SHIFT 8 -#define RG_HDMITX_PLL_BP (0xf << 4) -#define PLL_BP_SHIFT 4 -#define RG_HDMITX_PLL_BR (0x3 << 2) -#define PLL_BR_SHIFT 2 -#define RG_HDMITX_PLL_BC (0x3 << 0) -#define PLL_BC_SHIFT 0 -#define HDMI_CON1 0x04 -#define RG_HDMITX_PLL_DIVEN (0x7 << 29) -#define PLL_DIVEN_SHIFT 29 -#define RG_HDMITX_PLL_AUTOK_EN BIT(28) -#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26) -#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24) -#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23) -#define RG_HDMITX_PLL_BAND (0x3f << 16) -#define RG_HDMITX_PLL_REF_SEL BIT(15) -#define RG_HDMITX_PLL_BIAS_EN BIT(14) -#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13) -#define RG_HDMITX_PLL_TXDIV_EN BIT(12) -#define RG_HDMITX_PLL_TXDIV (0x3 << 10) -#define PLL_TXDIV_SHIFT 10 -#define RG_HDMITX_PLL_LVROD_EN BIT(9) -#define RG_HDMITX_PLL_MONVC_EN BIT(8) -#define RG_HDMITX_PLL_MONCK_EN BIT(7) -#define RG_HDMITX_PLL_MONREF_EN BIT(6) -#define RG_HDMITX_PLL_TST_EN BIT(5) -#define RG_HDMITX_PLL_TST_CK_EN BIT(4) -#define RG_HDMITX_PLL_TST_SEL (0xf << 0) -#define HDMI_CON2 0x08 -#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8) -#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1) -#define RG_HDMITX_EN_TX_CKLDO BIT(0) -#define HDMI_CON3 0x0c -#define RG_HDMITX_SER_EN (0xf << 28) -#define RG_HDMITX_PRD_EN (0xf << 24) -#define RG_HDMITX_PRD_IMP_EN (0xf << 20) -#define RG_HDMITX_DRV_EN (0xf << 16) -#define RG_HDMITX_DRV_IMP_EN (0xf << 12) -#define DRV_IMP_EN_SHIFT 12 -#define RG_HDMITX_MHLCK_FORCE BIT(10) -#define RG_HDMITX_MHLCK_PPIX_EN BIT(9) -#define RG_HDMITX_MHLCK_EN BIT(8) -#define RG_HDMITX_SER_DIN_SEL (0xf << 4) -#define RG_HDMITX_SER_5T1_BIST_EN BIT(3) -#define RG_HDMITX_SER_BIST_TOG BIT(2) -#define RG_HDMITX_SER_DIN_TOG BIT(1) -#define RG_HDMITX_SER_CLKDIG_INV BIT(0) -#define HDMI_CON4 0x10 -#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24) -#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16) -#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8) -#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0) -#define PRD_IBIAS_CLK_SHIFT 24 -#define PRD_IBIAS_D2_SHIFT 16 -#define PRD_IBIAS_D1_SHIFT 8 -#define PRD_IBIAS_D0_SHIFT 0 -#define HDMI_CON5 0x14 -#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24) -#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16) -#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8) -#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0) -#define DRV_IBIAS_CLK_SHIFT 24 -#define DRV_IBIAS_D2_SHIFT 16 -#define DRV_IBIAS_D1_SHIFT 8 -#define DRV_IBIAS_D0_SHIFT 0 -#define HDMI_CON6 0x18 -#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24) -#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16) -#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8) -#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0) -#define DRV_IMP_CLK_SHIFT 24 -#define DRV_IMP_D2_SHIFT 16 -#define DRV_IMP_D1_SHIFT 8 -#define DRV_IMP_D0_SHIFT 0 -#define HDMI_CON7 0x1c -#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27) -#define RG_HDMITX_SER_DIN (0x3ff << 16) -#define RG_HDMITX_CHLDC_TST (0xf << 12) -#define RG_HDMITX_CHLCK_TST (0xf << 8) -#define RG_HDMITX_RESERVE (0xff << 0) -#define HDMI_CON8 0x20 -#define RGS_HDMITX_2T1_LEV (0xf << 16) -#define RGS_HDMITX_2T1_EDG (0xf << 12) -#define RGS_HDMITX_5T1_LEV (0xf << 8) -#define RGS_HDMITX_5T1_EDG (0xf << 4) -#define RGS_HDMITX_PLUG_TST BIT(0) - -static int mtk_hdmi_pll_prepare(struct clk_hw *hw) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN); - usleep_range(100, 150); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN); - usleep_range(100, 150); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN); - - return 0; -} - -static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN); - usleep_range(100, 150); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN); - usleep_range(100, 150); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN); - usleep_range(100, 150); -} - -static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - hdmi_phy->pll_rate = rate; - if (rate <= 74250000) - *parent_rate = rate; - else - *parent_rate = rate / 2; - - return rate; -} - -static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - unsigned int pre_div; - unsigned int div; - unsigned int pre_ibias; - unsigned int hdmi_ibias; - unsigned int imp_en; - - dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, - rate, parent_rate); - - if (rate <= 27000000) { - pre_div = 0; - div = 3; - } else if (rate <= 74250000) { - pre_div = 1; - div = 2; - } else { - pre_div = 1; - div = 1; - } - - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, - (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, - (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT), - RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, - (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, - (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT), - RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, - (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, - (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) | - (0x1 << PLL_BR_SHIFT), - RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | - RG_HDMITX_PLL_BR); - if (rate < 165000000) { - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, - RG_HDMITX_PRD_IMP_EN); - pre_ibias = 0x3; - imp_en = 0x0; - hdmi_ibias = hdmi_phy->ibias; - } else { - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, - RG_HDMITX_PRD_IMP_EN); - pre_ibias = 0x6; - imp_en = 0xf; - hdmi_ibias = hdmi_phy->ibias_up; - } - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, - (pre_ibias << PRD_IBIAS_CLK_SHIFT) | - (pre_ibias << PRD_IBIAS_D2_SHIFT) | - (pre_ibias << PRD_IBIAS_D1_SHIFT) | - (pre_ibias << PRD_IBIAS_D0_SHIFT), - RG_HDMITX_PRD_IBIAS_CLK | - RG_HDMITX_PRD_IBIAS_D2 | - RG_HDMITX_PRD_IBIAS_D1 | - RG_HDMITX_PRD_IBIAS_D0); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, - (imp_en << DRV_IMP_EN_SHIFT), - RG_HDMITX_DRV_IMP_EN); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, - (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | - (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | - (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) | - (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT), - RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | - RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); - mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, - (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) | - (hdmi_ibias << DRV_IBIAS_D2_SHIFT) | - (hdmi_ibias << DRV_IBIAS_D1_SHIFT) | - (hdmi_ibias << DRV_IBIAS_D0_SHIFT), - RG_HDMITX_DRV_IBIAS_CLK | - RG_HDMITX_DRV_IBIAS_D2 | - RG_HDMITX_DRV_IBIAS_D1 | - RG_HDMITX_DRV_IBIAS_D0); - return 0; -} - -static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - return hdmi_phy->pll_rate; -} - -static const struct clk_ops mtk_hdmi_phy_pll_ops = { - .prepare = mtk_hdmi_pll_prepare, - .unprepare = mtk_hdmi_pll_unprepare, - .set_rate = mtk_hdmi_pll_set_rate, - .round_rate = mtk_hdmi_pll_round_rate, - .recalc_rate = mtk_hdmi_pll_recalc_rate, -}; - -static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy) -{ - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, - RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN | - RG_HDMITX_DRV_EN); - usleep_range(100, 150); -} - -static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) -{ - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, - RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN | - RG_HDMITX_SER_EN); -} - -struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = { - .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, - .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, - .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, - .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, -}; - -MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>"); -MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 2854272dc2d9..d70616da8ce2 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -82,7 +82,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { }; static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct drm_crtc_state *crtc_state = crtc->state; @@ -118,7 +118,7 @@ static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc, } static void meson_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct drm_crtc_state *crtc_state = crtc->state; @@ -146,7 +146,7 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc, } static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_drm *priv = meson_crtc->priv; @@ -171,7 +171,7 @@ static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc, } static void meson_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_drm *priv = meson_crtc->priv; @@ -201,7 +201,7 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, } static void meson_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); unsigned long flags; @@ -217,7 +217,7 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, } static void meson_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_drm *priv = meson_crtc->priv; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 6deaa7d01654..e5816b498494 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -6,8 +6,8 @@ config DRM_MSM depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK depends on MMU - depends on INTERCONNECT || !INTERCONNECT depends on QCOM_OCMEM || QCOM_OCMEM=n + select IOMMU_IO_PGTABLE select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER @@ -57,6 +57,15 @@ config DRM_MSM_HDMI_HDCP help Choose this option to enable HDCP state machine +config DRM_MSM_DP + bool "Enable DisplayPort support in MSM DRM driver" + depends on DRM_MSM + default y + help + Compile in support for DP driver in MSM DRM driver. DP external + display support is enabled through this config option. It can + be primary or secondary display on device. + config DRM_MSM_DSI bool "Enable DSI support in MSM DRM driver" depends on DRM_MSM @@ -110,3 +119,11 @@ config DRM_MSM_DSI_10NM_PHY default y help Choose this option if DSI PHY on SDM845 is used on the platform. + +config DRM_MSM_DSI_7NM_PHY + bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on SM8150/SM8250 is used on the + platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 42f8aae28b31..340682cd0f32 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -2,6 +2,7 @@ ccflags-y := -I $(srctree)/$(src) ccflags-y += -I $(srctree)/$(src)/disp/dpu1 ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi +ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp msm-y := \ adreno/adreno_device.o \ @@ -95,10 +96,23 @@ msm-y := \ msm_gpu_tracepoints.o \ msm_gpummu.o -msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o +msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ + dp/dp_debug.o msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o +msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ + dp/dp_catalog.o \ + dp/dp_ctrl.o \ + dp/dp_display.o \ + dp/dp_drm.o \ + dp/dp_hpd.o \ + dp/dp_link.o \ + dp/dp_panel.o \ + dp/dp_parser.o \ + dp/dp_power.o \ + dp/dp_audio.o + msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o @@ -119,6 +133,7 @@ msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o +msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o @@ -126,6 +141,7 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o +msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 48fa49f69d6d..7e82c41a85f1 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -10,6 +10,48 @@ extern bool hang_debug; static void a2xx_dump(struct msm_gpu *gpu); static bool a2xx_idle(struct msm_gpu *gpu); +static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (priv->lastctx == submit->queue->ctx) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + OUT_PKT3(ring, CP_INTERRUPT, 1); + OUT_RING(ring, 0x80000000); + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); +} + static bool a2xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb[0]; @@ -53,7 +95,7 @@ static bool a2xx_me_init(struct msm_gpu *gpu) OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1); - gpu->funcs->flush(gpu, ring); + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); return a2xx_idle(gpu); } @@ -421,16 +463,11 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) return aspace; } -/* Register offset defines for A2XX - copy of A3XX */ -static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL), -}; +static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); + return ring->memptrs->rptr; +} static const struct adreno_gpu_funcs funcs = { .base = { @@ -439,8 +476,7 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a2xx_recover, - .submit = adreno_submit, - .flush = adreno_flush, + .submit = a2xx_submit, .active_ring = adreno_active_ring, .irq = a2xx_irq, .destroy = a2xx_destroy, @@ -450,6 +486,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a2xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = a2xx_create_address_space, + .get_rptr = a2xx_get_rptr, }, }; @@ -491,8 +528,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) else adreno_gpu->registers = a220_registers; - adreno_gpu->reg_offsets = a2xx_register_offsets; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index f6471145a7a6..f29c77d9cd42 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -28,6 +28,61 @@ extern bool hang_debug; static void a3xx_dump(struct msm_gpu *gpu); static bool a3xx_idle(struct msm_gpu *gpu); +static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (priv->lastctx == submit->queue->ctx) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* Flush HLSQ lazy updates to make sure there is nothing + * pending for indirect loads after the timestamp has + * passed: + */ + OUT_PKT3(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, HLSQ_FLUSH); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + +#if 0 + /* Dummy set-constant to trigger context rollover */ + OUT_PKT3(ring, CP_SET_CONSTANT, 2); + OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); + OUT_RING(ring, 0x00000000); +#endif + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); +} + static bool a3xx_me_init(struct msm_gpu *gpu) { struct msm_ringbuffer *ring = gpu->rb[0]; @@ -51,7 +106,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); - gpu->funcs->flush(gpu, ring); + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); return a3xx_idle(gpu); } @@ -423,16 +478,11 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu) return state; } -/* Register offset defines for A3XX */ -static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL), -}; +static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); + return ring->memptrs->rptr; +} static const struct adreno_gpu_funcs funcs = { .base = { @@ -441,8 +491,7 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a3xx_recover, - .submit = adreno_submit, - .flush = adreno_flush, + .submit = a3xx_submit, .active_ring = adreno_active_ring, .irq = a3xx_irq, .destroy = a3xx_destroy, @@ -452,6 +501,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a3xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a3xx_get_rptr, }, }; @@ -490,7 +540,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); adreno_gpu->registers = a3xx_registers; - adreno_gpu->reg_offsets = a3xx_register_offsets; ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 954753600625..2b93b33b05e4 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -22,6 +22,54 @@ extern bool hang_debug; static void a4xx_dump(struct msm_gpu *gpu); static bool a4xx_idle(struct msm_gpu *gpu); +static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (priv->lastctx == submit->queue->ctx) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* Flush HLSQ lazy updates to make sure there is nothing + * pending for indirect loads after the timestamp has + * passed: + */ + OUT_PKT3(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, HLSQ_FLUSH); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + + adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); +} + /* * a4xx_enable_hwcg() - Program the clock control registers * @device: The adreno device pointer @@ -129,7 +177,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); - gpu->funcs->flush(gpu, ring); + adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); return a4xx_idle(gpu); } @@ -515,17 +563,6 @@ static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu) return state; } -/* Register offset defines for A4XX, in order of enum adreno_regs */ -static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR), - REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL), -}; - static void a4xx_dump(struct msm_gpu *gpu) { printk("status: %08x\n", @@ -576,6 +613,12 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) return 0; } +static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR); + return ring->memptrs->rptr; +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -583,8 +626,7 @@ static const struct adreno_gpu_funcs funcs = { .pm_suspend = a4xx_pm_suspend, .pm_resume = a4xx_pm_resume, .recover = a4xx_recover, - .submit = adreno_submit, - .flush = adreno_flush, + .submit = a4xx_submit, .active_ring = adreno_active_ring, .irq = a4xx_irq, .destroy = a4xx_destroy, @@ -594,6 +636,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a4xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a4xx_get_rptr, }, .get_timestamp = a4xx_get_timestamp, }; @@ -631,15 +674,12 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers : a4xx_registers; - adreno_gpu->reg_offsets = a4xx_register_offsets; /* if needed, allocate gmem: */ - if (adreno_is_a4xx(adreno_gpu)) { - ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, - &a4xx_gpu->ocmem); - if (ret) - goto fail; - } + ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, + &a4xx_gpu->ocmem); + if (ret) + goto fail; if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index 68eddac7771c..fc2c905b6c9e 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -11,7 +11,7 @@ #include "a5xx_gpu.h" -static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p) +static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -22,11 +22,9 @@ static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x\n", i, gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); } - - return 0; } -static int me_print(struct msm_gpu *gpu, struct drm_printer *p) +static void me_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -37,11 +35,9 @@ static int me_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x\n", i, gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); } - - return 0; } -static int meq_print(struct msm_gpu *gpu, struct drm_printer *p) +static void meq_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -52,11 +48,9 @@ static int meq_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x\n", i, gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); } - - return 0; } -static int roq_print(struct msm_gpu *gpu, struct drm_printer *p) +static void roq_print(struct msm_gpu *gpu, struct drm_printer *p) { int i; @@ -71,8 +65,6 @@ static int roq_print(struct msm_gpu *gpu, struct drm_printer *p) drm_printf(p, " %02x: %08x %08x %08x %08x\n", i, val[0], val[1], val[2], val[3]); } - - return 0; } static int show(struct seq_file *m, void *arg) @@ -81,10 +73,11 @@ static int show(struct seq_file *m, void *arg) struct drm_device *dev = node->minor->dev; struct msm_drm_private *priv = dev->dev_private; struct drm_printer p = drm_seq_file_printer(m); - int (*show)(struct msm_gpu *gpu, struct drm_printer *p) = + void (*show)(struct msm_gpu *gpu, struct drm_printer *p) = node->info_ent->data; - return show(priv->gpu, &p); + show(priv->gpu, &p); + return 0; } #define ENT(n) { .name = #n, .show = show, .data = n ##_print } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 91726da82ed6..d6804a802355 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -18,13 +18,24 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 -static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + bool sync) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); uint32_t wptr; unsigned long flags; + /* + * Most flush operations need to issue a WHERE_AM_I opcode to sync up + * the rptr shadow + */ + if (a5xx_gpu->has_whereami && sync) { + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); + } + spin_lock_irqsave(&ring->lock, flags); /* Copy the shadow to the actual register */ @@ -43,8 +54,7 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); } -static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; @@ -57,7 +67,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == ctx) + if (priv->lastctx == submit->queue->ctx) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -91,7 +101,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit } } - a5xx_flush(gpu, ring); + a5xx_flush(gpu, ring, true); a5xx_preempt_trigger(gpu); /* we might not necessarily have a cmd from userspace to @@ -103,8 +113,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit msm_gpu_retire(gpu); } -static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); @@ -114,7 +123,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { priv->lastctx = NULL; - a5xx_submit_in_rb(gpu, submit, ctx); + a5xx_submit_in_rb(gpu, submit); return; } @@ -148,7 +157,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == ctx) + if (priv->lastctx == submit->queue->ctx) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -206,7 +215,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* Set bit 0 to trigger an interrupt on preempt complete */ OUT_RING(ring, 0x01); - a5xx_flush(gpu, ring); + /* A WHERE_AM_I packet is not needed after a YIELD */ + a5xx_flush(gpu, ring, false); /* Check to see if we need to start preemption */ a5xx_preempt_trigger(gpu); @@ -365,7 +375,7 @@ static int a5xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000); - gpu->funcs->flush(gpu, ring); + a5xx_flush(gpu, ring, true); return a5xx_idle(gpu, ring) ? 0 : -EINVAL; } @@ -407,11 +417,31 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) OUT_RING(ring, 0x01); OUT_RING(ring, 0x01); - gpu->funcs->flush(gpu, ring); + /* The WHERE_AMI_I packet is not needed after a YIELD is issued */ + a5xx_flush(gpu, ring, false); return a5xx_idle(gpu, ring) ? 0 : -EINVAL; } +static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu, + struct drm_gem_object *obj) +{ + u32 *buf = msm_gem_get_vaddr_active(obj); + + if (IS_ERR(buf)) + return; + + /* + * If the lowest nibble is 0xa that is an indication that this microcode + * has been patched. The actual version is in dword [3] but we only care + * about the patchlevel which is the lowest nibble of dword [3] + */ + if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) + a5xx_gpu->has_whereami = true; + + msm_gem_put_vaddr(obj); +} + static int a5xx_ucode_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -447,6 +477,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) } msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw"); + a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo); } gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, @@ -506,6 +537,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) static int a5xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); int ret; gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); @@ -714,9 +746,36 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI, gpu->rb[0]->iova); + /* + * If the microcode supports the WHERE_AM_I opcode then we can use that + * in lieu of the RPTR shadow and enable preemption. Otherwise, we + * can't safely use the RPTR shadow or preemption. In either case, the + * RPTR shadow should be disabled in hardware. + */ gpu_write(gpu, REG_A5XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + /* Disable preemption if WHERE_AM_I isn't available */ + if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) { + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; + } else { + /* Create a privileged buffer for the RPTR shadow */ + if (!a5xx_gpu->shadow_bo) { + a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, + gpu->aspace, &a5xx_gpu->shadow_bo, + &a5xx_gpu->shadow_iova); + + if (IS_ERR(a5xx_gpu->shadow)) + return PTR_ERR(a5xx_gpu->shadow); + } + + gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, + REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0])); + } + a5xx_preempt_hw_init(gpu); /* Disable the interrupts through the initial bringup stage */ @@ -740,7 +799,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1); OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT)); - gpu->funcs->flush(gpu, gpu->rb[0]); + a5xx_flush(gpu, gpu->rb[0], true); if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; } @@ -758,7 +817,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); OUT_RING(gpu->rb[0], 0x00000000); - gpu->funcs->flush(gpu, gpu->rb[0]); + a5xx_flush(gpu, gpu->rb[0], true); if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; } else if (ret == -ENODEV) { @@ -825,6 +884,11 @@ static void a5xx_destroy(struct msm_gpu *gpu) drm_gem_object_put(a5xx_gpu->gpmu_bo); } + if (a5xx_gpu->shadow_bo) { + msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->shadow_bo); + } + adreno_gpu_cleanup(adreno_gpu); kfree(a5xx_gpu); } @@ -1057,17 +1121,6 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) return IRQ_HANDLED; } -static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, - REG_A5XX_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL), -}; - static const u32 a5xx_registers[] = { 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, @@ -1432,6 +1485,17 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } +static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (a5xx_gpu->has_whereami) + return a5xx_gpu->shadow[ring->id]; + + return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR); +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -1440,7 +1504,6 @@ static const struct adreno_gpu_funcs funcs = { .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, .submit = a5xx_submit, - .flush = a5xx_flush, .active_ring = a5xx_active_ring, .irq = a5xx_irq, .destroy = a5xx_destroy, @@ -1454,6 +1517,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_get = a5xx_gpu_state_get, .gpu_state_put = a5xx_gpu_state_put, .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a5xx_get_rptr, }, .get_timestamp = a5xx_get_timestamp, }; @@ -1512,14 +1576,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) gpu = &adreno_gpu->base; adreno_gpu->registers = a5xx_registers; - adreno_gpu->reg_offsets = a5xx_register_offsets; a5xx_gpu->lm_leakage = 0x4E001A; check_speed_bin(&pdev->dev); - /* Restricting nr_rings to 1 to temporarily disable preemption */ - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 1e5b1a15a70f..c7187bcc5e90 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -37,6 +37,13 @@ struct a5xx_gpu { atomic_t preempt_state; struct timer_list preempt_timer; + + struct drm_gem_object *shadow_bo; + uint64_t shadow_iova; + uint32_t *shadow; + + /* True if the microcode supports the WHERE_AM_I opcode */ + bool has_whereami; }; #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) @@ -141,6 +148,9 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, return -ETIMEDOUT; } +#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \ + ((ring)->id * sizeof(uint32_t))) + bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); @@ -150,6 +160,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu); void a5xx_preempt_irq(struct msm_gpu *gpu); void a5xx_preempt_fini(struct msm_gpu *gpu); +void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync); + /* Return true if we are in a preempt state */ static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu) { diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 321a8061fd32..f176a6f3eff6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -240,7 +240,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1); - gpu->funcs->flush(gpu, ring); + a5xx_flush(gpu, ring, true); if (!a5xx_idle(gpu, ring)) { DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 9f3fe177b00e..7e04509c4e1f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -259,8 +259,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->magic = A5XX_PREEMPT_RECORD_MAGIC; ptr->info = 0; ptr->data = 0; - ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; - ptr->rptr_addr = rbmemptr(ring, rptr); + ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE; + + ptr->rptr_addr = shadowptr(a5xx_gpu, ring); ptr->counter = counters_iova; return 0; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index e1c7bcd1b1eb..491fee410daf 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -11,6 +11,7 @@ #include "a6xx_gpu.h" #include "a6xx_gmu.xml.h" #include "msm_gem.h" +#include "msm_gpu_trace.h" #include "msm_mmu.h" static void a6xx_gmu_fault(struct a6xx_gmu *gmu) @@ -124,6 +125,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) gmu->current_perf_index = perf_index; gmu->freq = gmu->gpu_freqs[perf_index]; + trace_msm_gmu_freq_change(gmu->freq, perf_index); + /* * This can get called from devfreq while the hardware is idle. Don't * bring up the power if it isn't already active diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 66a95e22b7b3..948f3656c20c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -51,9 +51,20 @@ bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); uint32_t wptr; unsigned long flags; + /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */ + if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); + } + spin_lock_irqsave(&ring->lock, flags); /* Copy the shadow to the actual register */ @@ -81,8 +92,50 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, OUT_RING(ring, upper_32_bits(iova)); } -static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, + struct msm_ringbuffer *ring, struct msm_file_private *ctx) +{ + phys_addr_t ttbr; + u32 asid; + u64 memptr = rbmemptr(ring, ttbr0); + + if (ctx == a6xx_gpu->cur_ctx) + return; + + if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) + return; + + /* Execute the table update */ + OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr))); + + OUT_RING(ring, + CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) | + CP_SMMU_TABLE_UPDATE_1_ASID(asid)); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0)); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0)); + + /* + * Write the new TTBR0 to the memstore. This is good for debugging. + */ + OUT_PKT7(ring, CP_MEM_WRITE, 4); + OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); + OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); + OUT_RING(ring, lower_32_bits(ttbr)); + OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); + + /* + * And finally, trigger a uche flush to be sure there isn't anything + * lingering in that part of the GPU + */ + + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, 0x31); + + a6xx_gpu->cur_ctx = ctx; +} + +static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; struct msm_drm_private *priv = gpu->dev->dev_private; @@ -91,6 +144,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_ringbuffer *ring = submit->ring; unsigned int i; + a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO, rbmemptr_stats(ring, index, cpcycles_start)); @@ -115,7 +170,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == ctx) + if (priv->lastctx == submit->queue->ctx) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -464,6 +519,30 @@ static int a6xx_cp_init(struct msm_gpu *gpu) return a6xx_idle(gpu, ring) ? 0 : -EINVAL; } +static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, + struct drm_gem_object *obj) +{ + u32 *buf = msm_gem_get_vaddr_active(obj); + + if (IS_ERR(buf)) + return; + + /* + * If the lowest nibble is 0xa that is an indication that this microcode + * has been patched. The actual version is in dword [3] but we only care + * about the patchlevel which is the lowest nibble of dword [3] + * + * Otherwise check that the firmware is greater than or equal to 1.90 + * which was the first version that had this fix built in + */ + if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) + a6xx_gpu->has_whereami = true; + else if ((buf[0] & 0xfff) > 0x190) + a6xx_gpu->has_whereami = true; + + msm_gem_put_vaddr(obj); +} + static int a6xx_ucode_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -484,6 +563,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu) } msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); + a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo); } gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO, @@ -699,12 +779,43 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI, gpu->rb[0]->iova); - gpu_write(gpu, REG_A6XX_CP_RB_CNTL, - MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + /* Targets that support extended APRIV can use the RPTR shadow from + * hardware but all the other ones need to disable the feature. Targets + * that support the WHERE_AM_I opcode can use that instead + */ + if (adreno_gpu->base.hw_apriv) + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); + else + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* + * Expanded APRIV and targets that support WHERE_AM_I both need a + * privileged buffer to store the RPTR shadow + */ + + if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) { + if (!a6xx_gpu->shadow_bo) { + a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->shadow_bo, + &a6xx_gpu->shadow_iova); + + if (IS_ERR(a6xx_gpu->shadow)) + return PTR_ERR(a6xx_gpu->shadow); + } + + gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO, + REG_A6XX_CP_RB_RPTR_ADDR_HI, + shadowptr(a6xx_gpu, gpu->rb[0])); + } /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; + a6xx_gpu->cur_ctx = NULL; + /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); @@ -911,18 +1022,6 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu) return IRQ_HANDLED; } -static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, - REG_A6XX_CP_RB_RPTR_ADDR_LO), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI, - REG_A6XX_CP_RB_RPTR_ADDR_HI), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR), - REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL), -}; - static int a6xx_pm_resume(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -931,6 +1030,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) gpu->needs_hw_init = true; + trace_msm_gpu_resume(0); + ret = a6xx_gmu_resume(a6xx_gpu); if (ret) return ret; @@ -945,6 +1046,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + trace_msm_gpu_suspend(0); + devfreq_suspend_device(gpu->devfreq.devfreq); return a6xx_gmu_stop(a6xx_gpu); @@ -983,6 +1086,11 @@ static void a6xx_destroy(struct msm_gpu *gpu) drm_gem_object_put(a6xx_gpu->sqe_bo); } + if (a6xx_gpu->shadow_bo) { + msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); + drm_gem_object_put(a6xx_gpu->shadow_bo); + } + a6xx_gmu_remove(a6xx_gpu); adreno_gpu_cleanup(adreno_gpu); @@ -1017,6 +1125,31 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } +static struct msm_gem_address_space * +a6xx_create_private_address_space(struct msm_gpu *gpu) +{ + struct msm_mmu *mmu; + + mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); + + if (IS_ERR(mmu)) + return ERR_CAST(mmu); + + return msm_gem_address_space_create(mmu, + "gpu", 0x100000000ULL, 0x1ffffffffULL); +} + +static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) + return a6xx_gpu->shadow[ring->id]; + + return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -1025,7 +1158,6 @@ static const struct adreno_gpu_funcs funcs = { .pm_resume = a6xx_pm_resume, .recover = a6xx_recover, .submit = a6xx_submit, - .flush = a6xx_flush, .active_ring = a6xx_active_ring, .irq = a6xx_irq, .destroy = a6xx_destroy, @@ -1040,6 +1172,8 @@ static const struct adreno_gpu_funcs funcs = { .gpu_state_put = a6xx_gpu_state_put, #endif .create_address_space = adreno_iommu_create_address_space, + .create_private_address_space = a6xx_create_private_address_space, + .get_rptr = a6xx_get_rptr, }, .get_timestamp = a6xx_get_timestamp, }; @@ -1048,6 +1182,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; + const struct adreno_info *info; struct device_node *node; struct a6xx_gpu *a6xx_gpu; struct adreno_gpu *adreno_gpu; @@ -1062,9 +1198,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) gpu = &adreno_gpu->base; adreno_gpu->registers = NULL; - adreno_gpu->reg_offsets = a6xx_register_offsets; - if (adreno_is_a650(adreno_gpu)) + /* + * We need to know the platform type before calling into adreno_gpu_init + * so that the hw_apriv flag can be correctly set. Snoop into the info + * and grab the revision number + */ + info = adreno_info(config->rev); + + if (info && info->revn == 650) adreno_gpu->base.hw_apriv = true; ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 03ba60d5b07f..3eeebf6a754b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -19,8 +19,15 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; + struct msm_file_private *cur_ctx; struct a6xx_gmu gmu; + + struct drm_gem_object *shadow_bo; + uint64_t shadow_iova; + uint32_t *shadow; + + bool has_whereami; }; #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) @@ -50,6 +57,9 @@ static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) return true; } +#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \ + ((_ring)->id * sizeof(uint32_t))) + int a6xx_gmu_resume(struct a6xx_gpu *gpu); int a6xx_gmu_stop(struct a6xx_gpu *gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index b12f5b4a1bea..e9ede19193b0 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu, int i; a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count, - sizeof(a6xx_state->indexed_regs)); + sizeof(*a6xx_state->indexed_regs)); if (!a6xx_state->indexed_regs) return; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 9eeb46bf2a5d..58e03b20e1c7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -282,7 +282,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) int ret; if (pdev) - gpu = platform_get_drvdata(pdev); + gpu = dev_to_gpu(&pdev->dev); if (!gpu) { dev_err_once(dev->dev, "no GPU device was found\n"); @@ -417,15 +417,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(gpu); } - dev_set_drvdata(dev, gpu); - return 0; } static void adreno_unbind(struct device *dev, struct device *master, void *data) { - struct msm_gpu *gpu = dev_get_drvdata(dev); + struct msm_gpu *gpu = dev_to_gpu(dev); pm_runtime_force_suspend(dev); gpu->funcs->destroy(gpu); @@ -490,16 +488,14 @@ static const struct of_device_id dt_match[] = { #ifdef CONFIG_PM static int adreno_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct msm_gpu *gpu = platform_get_drvdata(pdev); + struct msm_gpu *gpu = dev_to_gpu(dev); return gpu->funcs->pm_resume(gpu); } static int adreno_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct msm_gpu *gpu = platform_get_drvdata(pdev); + struct msm_gpu *gpu = dev_to_gpu(dev); return gpu->funcs->pm_suspend(gpu); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 862dd35b27d3..458b5b26d3c2 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -189,12 +189,27 @@ struct msm_gem_address_space * adreno_iommu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { - struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type); - struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu); + struct iommu_domain *iommu; + struct msm_mmu *mmu; struct msm_gem_address_space *aspace; + u64 start, size; - aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, - 0xffffffff - SZ_16M); + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + mmu = msm_iommu_new(&pdev->dev, iommu); + + /* + * Use the aperture start or SZ_16M, whichever is greater. This will + * ensure that we align with the allocated pagetable range while still + * allowing room in the lower 32 bits for GMEM and whatnot + */ + start = max_t(u64, SZ_16M, iommu->geometry.aperture_start); + size = iommu->geometry.aperture_end - start + 1; + + aspace = msm_gem_address_space_create(mmu, "gpu", + start & GENMASK_ULL(48, 0), size); if (IS_ERR(aspace) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); @@ -407,8 +422,9 @@ int adreno_hw_init(struct msm_gpu *gpu) static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, struct msm_ringbuffer *ring) { - return ring->memptrs->rptr = adreno_gpu_read( - adreno_gpu, REG_ADRENO_CP_RB_RPTR); + struct msm_gpu *gpu = &adreno_gpu->base; + + return gpu->funcs->get_rptr(gpu, ring); } struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) @@ -434,81 +450,8 @@ void adreno_recover(struct msm_gpu *gpu) } } -void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct msm_drm_private *priv = gpu->dev->dev_private; - struct msm_ringbuffer *ring = submit->ring; - unsigned i; - - for (i = 0; i < submit->nr_cmds; i++) { - switch (submit->cmd[i].type) { - case MSM_SUBMIT_CMD_IB_TARGET_BUF: - /* ignore IB-targets */ - break; - case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - /* ignore if there has not been a ctx switch: */ - if (priv->lastctx == ctx) - break; - fallthrough; - case MSM_SUBMIT_CMD_BUF: - OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ? - CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); - OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, submit->cmd[i].size); - OUT_PKT2(ring); - break; - } - } - - OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); - OUT_RING(ring, submit->seqno); - - if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { - /* Flush HLSQ lazy updates to make sure there is nothing - * pending for indirect loads after the timestamp has - * passed: - */ - OUT_PKT3(ring, CP_EVENT_WRITE, 1); - OUT_RING(ring, HLSQ_FLUSH); - } - - /* wait for idle before cache flush/interrupt */ - OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); - OUT_RING(ring, 0x00000000); - - if (!adreno_is_a2xx(adreno_gpu)) { - /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ - OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); - OUT_RING(ring, rbmemptr(ring, fence)); - OUT_RING(ring, submit->seqno); - } else { - /* BIT(31) means something else on a2xx */ - OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS); - OUT_RING(ring, rbmemptr(ring, fence)); - OUT_RING(ring, submit->seqno); - OUT_PKT3(ring, CP_INTERRUPT, 1); - OUT_RING(ring, 0x80000000); - } - -#if 0 - if (adreno_is_a3xx(adreno_gpu)) { - /* Dummy set-constant to trigger context rollover */ - OUT_PKT3(ring, CP_SET_CONSTANT, 2); - OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); - OUT_RING(ring, 0x00000000); - } -#endif - - gpu->funcs->flush(gpu, ring); -} - -void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr; /* Copy the shadow to the actual register */ @@ -524,7 +467,7 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* ensure writes to ringbuffer have hit system memory: */ mb(); - adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); + gpu_write(gpu, reg, wptr); } bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index e55abae365b5..c3775f79525a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -17,29 +17,8 @@ #include "adreno_common.xml.h" #include "adreno_pm4.xml.h" -#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1 -#define REG_SKIP ~0 -#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP - extern bool snapshot_debugbus; -/** - * adreno_regs: List of registers that are used in across all - * 3D devices. Each device type has different offset value for the same - * register, so an array of register offsets are declared for every device - * and are indexed by the enumeration values defined in this enum - */ -enum adreno_regs { - REG_ADRENO_CP_RB_BASE, - REG_ADRENO_CP_RB_BASE_HI, - REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, - REG_ADRENO_CP_RB_RPTR, - REG_ADRENO_CP_RB_WPTR, - REG_ADRENO_CP_RB_CNTL, - REG_ADRENO_REGISTER_MAX, -}; - enum { ADRENO_FW_PM4 = 0, ADRENO_FW_SQE = 0, /* a6xx */ @@ -176,11 +155,6 @@ static inline bool adreno_is_a225(struct adreno_gpu *gpu) return gpu->revn == 225; } -static inline bool adreno_is_a3xx(struct adreno_gpu *gpu) -{ - return (gpu->revn >= 300) && (gpu->revn < 400); -} - static inline bool adreno_is_a305(struct adreno_gpu *gpu) { return gpu->revn == 305; @@ -207,11 +181,6 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu) return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); } -static inline bool adreno_is_a4xx(struct adreno_gpu *gpu) -{ - return (gpu->revn >= 400) && (gpu->revn < 500); -} - static inline int adreno_is_a405(struct adreno_gpu *gpu) { return gpu->revn == 405; @@ -269,9 +238,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova); int adreno_hw_init(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu); -void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx); -void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg); bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, @@ -365,59 +332,12 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); } -/* - * adreno_reg_check() - Checks the validity of a register enum - * @gpu: Pointer to struct adreno_gpu - * @offset_name: The register enum that is checked - */ -static inline bool adreno_reg_check(struct adreno_gpu *gpu, - enum adreno_regs offset_name) -{ - BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]); - - /* - * REG_SKIP is a special value that tell us that the register in - * question isn't implemented on target but don't trigger a BUG(). This - * is used to cleanly implement adreno_gpu_write64() and - * adreno_gpu_read64() in a generic fashion - */ - if (gpu->reg_offsets[offset_name] == REG_SKIP) - return false; - - return true; -} - -static inline u32 adreno_gpu_read(struct adreno_gpu *gpu, - enum adreno_regs offset_name) -{ - u32 reg = gpu->reg_offsets[offset_name]; - u32 val = 0; - if(adreno_reg_check(gpu,offset_name)) - val = gpu_read(&gpu->base, reg - 1); - return val; -} - -static inline void adreno_gpu_write(struct adreno_gpu *gpu, - enum adreno_regs offset_name, u32 data) -{ - u32 reg = gpu->reg_offsets[offset_name]; - if(adreno_reg_check(gpu, offset_name)) - gpu_write(&gpu->base, reg - 1, data); -} - struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); struct msm_gpu *a6xx_gpu_init(struct drm_device *dev); -static inline void adreno_gpu_write64(struct adreno_gpu *gpu, - enum adreno_regs lo, enum adreno_regs hi, u64 data) -{ - adreno_gpu_write(gpu, lo, lower_32_bits(data)); - adreno_gpu_write(gpu, hi, upper_32_bits(data)); -} - static inline uint32_t get_wptr(struct msm_ringbuffer *ring) { return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 3931eecadaff..59bb8c1ffce6 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -298,6 +298,7 @@ enum adreno_pm4_type3_packets { CP_SET_BIN_DATA5_OFFSET = 46, CP_SET_CTXSWITCH_IB = 85, CP_REG_WRITE = 109, + CP_WHERE_AM_I = 98, }; enum adreno_state_block { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c index f1bc6a1af7a7..84ea09d9692f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c @@ -288,19 +288,6 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) } #ifdef CONFIG_DEBUG_FS -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ -static int __prefix ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __prefix ## _show, inode->i_private); \ -} \ -static const struct file_operations __prefix ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __prefix ## _open, \ - .release = single_release, \ - .read = seq_read, \ - .llseek = seq_lseek, \ -} - static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) { struct dpu_irq *irq_obj = s->private; @@ -328,7 +315,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) return 0; } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, struct dentry *parent) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index b36919d95362..393858ef8a83 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -30,6 +30,74 @@ enum dpu_perf_mode { DPU_PERF_MODE_MAX }; +/** + * @_dpu_core_perf_calc_bw() - to calculate BW per crtc + * @kms - pointer to the dpu_kms + * @crtc - pointer to a crtc + * Return: returns aggregated BW for all planes in crtc. + */ +static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms, + struct drm_crtc *crtc) +{ + struct drm_plane *plane; + struct dpu_plane_state *pstate; + u64 crtc_plane_bw = 0; + u32 bw_factor; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_dpu_plane_state(plane->state); + if (!pstate) + continue; + + crtc_plane_bw += pstate->plane_fetch_bw; + } + + bw_factor = kms->catalog->perf.bw_inefficiency_factor; + if (bw_factor) { + crtc_plane_bw *= bw_factor; + do_div(crtc_plane_bw, 100); + } + + return crtc_plane_bw; +} + +/** + * _dpu_core_perf_calc_clk() - to calculate clock per crtc + * @kms - pointer to the dpu_kms + * @crtc - pointer to a crtc + * @state - pointer to a crtc state + * Return: returns max clk for all planes in crtc. + */ +static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms, + struct drm_crtc *crtc, struct drm_crtc_state *state) +{ + struct drm_plane *plane; + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + u64 crtc_clk; + u32 clk_factor; + + mode = &state->adjusted_mode; + + crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); + + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_dpu_plane_state(plane->state); + if (!pstate) + continue; + + crtc_clk = max(pstate->plane_clk, crtc_clk); + } + + clk_factor = kms->catalog->perf.clk_inefficiency_factor; + if (clk_factor) { + crtc_clk *= clk_factor; + do_div(crtc_clk, 100); + } + + return crtc_clk; +} + static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv; @@ -52,12 +120,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, dpu_cstate = to_dpu_crtc_state(state); memset(perf, 0, sizeof(struct dpu_core_perf_params)); - if (!dpu_cstate->bw_control) { - perf->bw_ctl = kms->catalog->perf.max_bw_high * - 1000ULL; - perf->max_per_pipe_ib = perf->bw_ctl; - perf->core_clk_rate = kms->perf.max_core_clk_rate; - } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { perf->bw_ctl = 0; perf->max_per_pipe_ib = 0; perf->core_clk_rate = 0; @@ -65,6 +128,10 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, perf->bw_ctl = kms->perf.fix_core_ab_vote; perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote; perf->core_clk_rate = kms->perf.fix_core_clk_rate; + } else { + perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc); + perf->max_per_pipe_ib = kms->catalog->perf.min_dram_ib; + perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state); } DPU_DEBUG( @@ -116,11 +183,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n", tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl, tmp_cstate->bw_control); - /* - * For bw check only use the bw if the - * atomic property has been already set - */ - if (tmp_cstate->bw_control) + bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl; } @@ -132,9 +195,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, DPU_DEBUG("final threshold bw limit = %d\n", threshold); - if (!dpu_cstate->bw_control) { - DPU_DEBUG("bypass bandwidth check\n"); - } else if (!threshold) { + if (!threshold) { DPU_ERROR("no bandwidth limits specified\n"); return -E2BIG; } else if (bw > threshold) { @@ -155,7 +216,11 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, = dpu_crtc_get_client_type(crtc); struct drm_crtc *tmp_crtc; struct dpu_crtc_state *dpu_cstate; - int ret = 0; + int i, ret = 0; + u64 avg_bw; + + if (!kms->num_paths) + return -EINVAL; drm_for_each_crtc(tmp_crtc, crtc->dev) { if (tmp_crtc->enabled && @@ -166,10 +231,20 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, perf.max_per_pipe_ib = max(perf.max_per_pipe_ib, dpu_cstate->new_perf.max_per_pipe_ib); - DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id, - dpu_cstate->new_perf.bw_ctl); + perf.bw_ctl += dpu_cstate->new_perf.bw_ctl; + + DPU_DEBUG("crtc=%d bw=%llu paths:%d\n", + tmp_crtc->base.id, + dpu_cstate->new_perf.bw_ctl, kms->num_paths); } } + + avg_bw = perf.bw_ctl; + do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ + + for (i = 0; i < kms->num_paths; i++) + icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); + return ret; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index c2729f71e2fa..e55be2922c2f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -11,6 +11,7 @@ #include <linux/ktime.h> #include <linux/bits.h> +#include <drm/drm_atomic.h> #include <drm/drm_crtc.h> #include <drm/drm_flip_work.h> #include <drm/drm_mode.h> @@ -265,11 +266,6 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) { struct drm_encoder *encoder; - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return INTF_MODE_NONE; - } - /* * TODO: This function is called from dpu debugfs and as part of atomic * check. When called from debugfs, the crtc->mutex must be held to @@ -297,7 +293,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) dpu_crtc->vblank_cb_time = ktime_get(); else dpu_crtc->vblank_cb_count++; - _dpu_crtc_complete_flip(crtc); drm_crtc_handle_vblank(crtc); trace_dpu_crtc_vblank_cb(DRMID(crtc)); } @@ -402,6 +397,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) void dpu_crtc_complete_commit(struct drm_crtc *crtc) { trace_dpu_crtc_complete_commit(DRMID(crtc)); + _dpu_crtc_complete_flip(crtc); } static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, @@ -421,8 +417,6 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); } - - drm_mode_debug_printmodeline(adj_mode); } static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, @@ -457,7 +451,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) struct dpu_crtc_mixer *mixer = cstate->mixers; struct dpu_hw_pcc_cfg cfg; struct dpu_hw_ctl *ctl; - struct dpu_hw_mixer *lm; struct dpu_hw_dspp *dspp; int i; @@ -467,7 +460,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) for (i = 0; i < cstate->num_mixers; i++) { ctl = mixer[i].lm_ctl; - lm = mixer[i].hw_lm; dspp = mixer[i].hw_dspp; if (!dspp || !dspp->ops.setup_pcc) @@ -494,18 +486,10 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) } static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { - struct dpu_crtc *dpu_crtc; - struct dpu_crtc_state *cstate; + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); struct drm_encoder *encoder; - struct drm_device *dev; - unsigned long flags; - - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return; - } if (!crtc->state->enable) { DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n", @@ -515,21 +499,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, DPU_DEBUG("crtc%d\n", crtc->base.id); - dpu_crtc = to_dpu_crtc(crtc); - cstate = to_dpu_crtc_state(crtc->state); - dev = crtc->dev; - _dpu_crtc_setup_lm_bounds(crtc, crtc->state); - if (dpu_crtc->event) { - WARN_ON(dpu_crtc->event); - } else { - spin_lock_irqsave(&dev->event_lock, flags); - dpu_crtc->event = crtc->state->event; - crtc->state->event = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - } - /* encoder will trigger pending mask now */ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) dpu_encoder_trigger_kickoff_pending(encoder); @@ -556,7 +527,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, } static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct dpu_crtc *dpu_crtc; struct drm_device *dev; @@ -583,14 +554,11 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, return; } - if (dpu_crtc->event) { - DPU_DEBUG("already received dpu_crtc->event\n"); - } else { - spin_lock_irqsave(&dev->event_lock, flags); - dpu_crtc->event = crtc->state->event; - crtc->state->event = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - } + WARN_ON(dpu_crtc->event); + spin_lock_irqsave(&dev->event_lock, flags); + dpu_crtc->event = crtc->state->event; + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); /* * If no mixers has been allocated in dpu_crtc_atomic_check(), @@ -635,14 +603,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, static void dpu_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct dpu_crtc_state *cstate; - - if (!crtc || !state) { - DPU_ERROR("invalid argument(s)\n"); - return; - } - - cstate = to_dpu_crtc_state(state); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); DPU_DEBUG("crtc%d\n", crtc->base.id); @@ -731,14 +692,8 @@ static void dpu_crtc_reset(struct drm_crtc *crtc) */ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) { - struct dpu_crtc_state *cstate, *old_cstate; + struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); - if (!crtc || !crtc->state) { - DPU_ERROR("invalid argument(s)\n"); - return NULL; - } - - old_cstate = to_dpu_crtc_state(crtc->state); cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); if (!cstate) { DPU_ERROR("failed to allocate state\n"); @@ -752,21 +707,16 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) } static void dpu_crtc_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { - struct dpu_crtc *dpu_crtc; - struct dpu_crtc_state *cstate; + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); struct drm_encoder *encoder; unsigned long flags; bool release_bandwidth = false; - if (!crtc || !crtc->state) { - DPU_ERROR("invalid crtc\n"); - return; - } - dpu_crtc = to_dpu_crtc(crtc); - cstate = to_dpu_crtc_state(crtc->state); - DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); /* Disable/save vblank irq handling */ @@ -823,21 +773,15 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, } static void dpu_crtc_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { - struct dpu_crtc *dpu_crtc; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); struct drm_encoder *encoder; bool request_bandwidth = false; - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return; - } - pm_runtime_get_sync(crtc->dev->dev); DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); - dpu_crtc = to_dpu_crtc(crtc); drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { /* in video mode, we hold an extra bandwidth reference @@ -871,17 +815,19 @@ struct plane_state { }; static int dpu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct dpu_crtc *dpu_crtc; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); struct plane_state *pstates; - struct dpu_crtc_state *cstate; const struct drm_plane_state *pstate; struct drm_plane *plane; struct drm_display_mode *mode; - int cnt = 0, rc = 0, mixer_width, i, z_pos; + int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; int multirect_count = 0; @@ -889,40 +835,35 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, int left_zpos_cnt = 0, right_zpos_cnt = 0; struct drm_rect crtc_rect = { 0 }; - if (!crtc) { - DPU_ERROR("invalid crtc\n"); - return -EINVAL; - } - pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); - dpu_crtc = to_dpu_crtc(crtc); - cstate = to_dpu_crtc_state(state); - - if (!state->enable || !state->active) { + if (!crtc_state->enable || !crtc_state->active) { DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n", - crtc->base.id, state->enable, state->active); + crtc->base.id, crtc_state->enable, + crtc_state->active); goto end; } - mode = &state->adjusted_mode; + mode = &crtc_state->adjusted_mode; DPU_DEBUG("%s: check", dpu_crtc->name); /* force a full mode set if active state changed */ - if (state->active_changed) - state->mode_changed = true; + if (crtc_state->active_changed) + crtc_state->mode_changed = true; memset(pipe_staged, 0, sizeof(pipe_staged)); - mixer_width = mode->hdisplay / cstate->num_mixers; + if (cstate->num_mixers) { + mixer_width = mode->hdisplay / cstate->num_mixers; - _dpu_crtc_setup_lm_bounds(crtc, state); + _dpu_crtc_setup_lm_bounds(crtc, crtc_state); + } crtc_rect.x2 = mode->hdisplay; crtc_rect.y2 = mode->vdisplay; /* get plane state for all drm planes associated with crtc state */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { struct drm_rect dst, clip = crtc_rect; if (IS_ERR_OR_NULL(pstate)) { @@ -1028,7 +969,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); - rc = dpu_core_perf_crtc_check(crtc, state); + rc = dpu_core_perf_crtc_check(crtc, crtc_state); if (rc) { DPU_ERROR("crtc%d failed performance check %d\n", crtc->base.id, rc); @@ -1242,23 +1183,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data) return 0; } -static int _dpu_debugfs_status_open(struct inode *inode, struct file *file) -{ - return single_open(file, _dpu_debugfs_status_show, inode->i_private); -} - -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ -static int __prefix ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __prefix ## _show, inode->i_private); \ -} \ -static const struct file_operations __prefix ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __prefix ## _open, \ - .release = single_release, \ - .read = seq_read, \ - .llseek = seq_lseek, \ -} +DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) { @@ -1275,25 +1200,18 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) return 0; } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state); +DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); - static const struct file_operations debugfs_status_fops = { - .open = _dpu_debugfs_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - }; - dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, crtc->dev->primary->debugfs_root); debugfs_create_file("status", 0400, dpu_crtc->debugfs_root, - dpu_crtc, &debugfs_status_fops); + dpu_crtc, &_dpu_debugfs_status_fops); debugfs_create_file("state", 0600, dpu_crtc->debugfs_root, &dpu_crtc->base, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index bd6def436c65..f7f5c258b553 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1001,6 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, trace_dpu_enc_mode_set(DRMID(drm_enc)); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) + msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode); + list_for_each_entry(conn_iter, connector_list, head) if (conn_iter->encoder == drm_enc) conn = conn_iter; @@ -1109,6 +1112,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) return; } + + if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && + dpu_enc->cur_master->hw_mdptop && + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( + dpu_enc->cur_master->hw_mdptop); + _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && @@ -1146,6 +1156,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = NULL; int ret = 0; + struct msm_drm_private *priv; struct drm_display_mode *cur_mode = NULL; if (!drm_enc) { @@ -1156,6 +1167,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) mutex_lock(&dpu_enc->enc_lock); cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; + priv = drm_enc->dev->dev_private; trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, cur_mode->vdisplay); @@ -1176,6 +1188,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) _dpu_encoder_virt_enable_helper(drm_enc); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { + ret = msm_dp_display_enable(priv->dp, + drm_enc); + if (ret) { + DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n", + ret); + goto out; + } + } dpu_enc->enabled = true; out: @@ -1211,6 +1232,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) /* wait for idle */ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { + if (msm_dp_display_pre_disable(priv->dp, drm_enc)) + DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n"); + } + dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); for (i = 0; i < dpu_enc->num_phys_encs; i++) { @@ -1220,6 +1246,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) phys->ops.disable(phys); } + /* after phys waits for frame-done, should be no more frames pending */ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); @@ -1234,6 +1261,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { + if (msm_dp_display_disable(priv->dp, drm_enc)) + DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n"); + } + mutex_unlock(&dpu_enc->enc_lock); } @@ -1880,24 +1912,13 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data) return 0; } -static int _dpu_encoder_debugfs_status_open(struct inode *inode, - struct file *file) -{ - return single_open(file, _dpu_encoder_status_show, inode->i_private); -} +DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); int i; - static const struct file_operations debugfs_status_fops = { - .open = _dpu_encoder_debugfs_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - }; - char name[DPU_NAME_SIZE]; if (!drm_enc->dev) { @@ -1913,7 +1934,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) /* don't error check these */ debugfs_create_file("status", 0600, - dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops); + dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); for (i = 0; i < dpu_enc->num_phys_encs; i++) if (dpu_enc->phys_encs[i]->ops.late_register) @@ -2008,7 +2029,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, { int ret = 0; int i = 0; - enum dpu_intf_type intf_type; + enum dpu_intf_type intf_type = INTF_NONE; struct dpu_enc_phys_init_params phys_params; if (!dpu_enc) { @@ -2030,9 +2051,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, case DRM_MODE_ENCODER_DSI: intf_type = INTF_DSI; break; - default: - DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n"); - return -EINVAL; + case DRM_MODE_ENCODER_TMDS: + intf_type = INTF_DP; + break; } WARN_ON(disp_info->num_of_h_tiles < 1); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index b5a49050d131..805e059b50b7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -100,6 +100,14 @@ static void drm_mode_to_intf_timing_params( * display_v_end -= mode->hsync_start - mode->hdisplay; * } */ + /* for DP/EDP, Shift timings to align it to bottom right */ + if ((phys_enc->hw_intf->cap->type == INTF_DP) || + (phys_enc->hw_intf->cap->type == INTF_EDP)) { + timing->h_back_porch += timing->h_front_porch; + timing->h_front_porch = 0; + timing->v_back_porch += timing->v_front_porch; + timing->v_front_porch = 0; + } } static u32 get_horizontal_total(const struct intf_timing_params *timing) @@ -298,7 +306,6 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) struct dpu_hw_ctl *hw_ctl; unsigned long lock_flags; u32 flush_register = 0; - int new_cnt = -1, old_cnt = -1; hw_ctl = phys_enc->hw_ctl; @@ -308,7 +315,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent, phys_enc); - old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt); + atomic_read(&phys_enc->pending_kickoff_cnt); /* * only decrement the pending flush count if we've actually flushed @@ -320,8 +327,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) flush_register = hw_ctl->ops.get_flush_register(hw_ctl); if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl))) - new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, - -1, 0); + atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); /* Signal any waiting atomic commit thread */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 97d122eee96d..60b304b72b7c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -684,7 +684,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = { .max_bw_high = 6800000, .min_core_ib = 2400000, .min_llcc_ib = 800000, - .min_dram_ib = 800000, + .min_dram_ib = 1600000, + .min_prefill_lines = 24, .danger_lut_tbl = {0xff, 0xffff, 0x0}, .qos_lut_tbl = { {.nentry = ARRAY_SIZE(sc7180_qos_linear), @@ -701,6 +702,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = { {.rd_enable = 1, .wr_enable = 1}, {.rd_enable = 1, .wr_enable = 0} }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, }; static const struct dpu_perf_cfg sm8150_perf_data = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 1b7a9213a756..3544af1a45c5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -659,6 +659,8 @@ struct dpu_perf_cdp_cfg { * @downscaling_prefill_lines downscaling latency in lines * @amortizable_theshold minimum y position for traffic shaping prefill * @min_prefill_lines minimum pipeline latency in lines + * @clk_inefficiency_factor DPU src clock inefficiency factor + * @bw_inefficiency_factor DPU axi bus bw inefficiency factor * @safe_lut_tbl: LUT tables for safe signals * @danger_lut_tbl: LUT tables for danger signals * @qos_lut_tbl: LUT tables for QoS signals @@ -683,6 +685,8 @@ struct dpu_perf_cfg { u32 downscaling_prefill_lines; u32 amortizable_threshold; u32 min_prefill_lines; + u32 clk_inefficiency_factor; + u32 bw_inefficiency_factor; u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index c0a4d4e16d82..d93c44f6996d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -85,30 +85,17 @@ static int _dpu_danger_signal_status(struct seq_file *s, return 0; } -#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ -static int __prefix ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __prefix ## _show, inode->i_private); \ -} \ -static const struct file_operations __prefix ## _fops = { \ - .owner = THIS_MODULE, \ - .open = __prefix ## _open, \ - .release = single_release, \ - .read = seq_read, \ - .llseek = seq_lseek, \ -} - static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v) { return _dpu_danger_signal_status(s, true); } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats); static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) { return _dpu_danger_signal_status(s, false); } -DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats); +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats); static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, struct dentry *parent) @@ -195,10 +182,15 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) struct dpu_kms *dpu_kms = to_dpu_kms(kms); void *p = dpu_hw_util_get_log_mask_ptr(); struct dentry *entry; + struct drm_device *dev; + struct msm_drm_private *priv; if (!p) return -EINVAL; + dev = dpu_kms->dev; + priv = dev->dev_private; + entry = debugfs_create_dir("debug", minor->debugfs_root); debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); @@ -207,6 +199,9 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) dpu_debugfs_vbif_init(dpu_kms, entry); dpu_debugfs_core_irq_init(dpu_kms, entry); + if (priv->dp) + msm_dp_debugfs_init(priv->dp, minor); + return dpu_core_perf_debugfs_init(dpu_kms, entry); } #endif @@ -290,6 +285,28 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) return 0; } +static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms) +{ + struct icc_path *path0; + struct icc_path *path1; + struct drm_device *dev = dpu_kms->dev; + + path0 = of_icc_get(dev->dev, "mdp0-mem"); + path1 = of_icc_get(dev->dev, "mdp1-mem"); + + if (IS_ERR_OR_NULL(path0)) + return PTR_ERR_OR_ZERO(path0); + + dpu_kms->path[0] = path0; + dpu_kms->num_paths = 1; + + if (!IS_ERR_OR_NULL(path1)) { + dpu_kms->path[1] = path1; + dpu_kms->num_paths++; + } + return 0; +} + static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { return dpu_crtc_vblank(crtc, true); @@ -479,6 +496,33 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev, return rc; } +static int _dpu_kms_initialize_displayport(struct drm_device *dev, + struct msm_drm_private *priv, + struct dpu_kms *dpu_kms) +{ + struct drm_encoder *encoder = NULL; + int rc = 0; + + if (!priv->dp) + return rc; + + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } + + rc = msm_dp_modeset_init(priv->dp, dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); + drm_encoder_cleanup(encoder); + return rc; + } + + priv->encoders[priv->num_encoders++] = encoder; + return rc; +} + /** * _dpu_kms_setup_displays - create encoders, bridges and connectors * for underlying displays @@ -491,12 +535,21 @@ static int _dpu_kms_setup_displays(struct drm_device *dev, struct msm_drm_private *priv, struct dpu_kms *dpu_kms) { - /** - * Extend this function to initialize other - * types of displays - */ + int rc = 0; - return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); + rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_dsi failed, rc = %d\n", rc); + return rc; + } + + rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_DP failed, rc = %d\n", rc); + return rc; + } + + return rc; } static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) @@ -681,13 +734,20 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE : MSM_DISPLAY_CAP_VID_MODE; - /* TODO: No support for DSI swap */ - for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { - if (priv->dsi[i]) { - info.h_tile_instance[info.num_of_h_tiles] = i; - info.num_of_h_tiles++; + switch (info.intf_type) { + case DRM_MODE_ENCODER_DSI: + /* TODO: No support for DSI swap */ + for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { + if (priv->dsi[i]) { + info.h_tile_instance[info.num_of_h_tiles] = i; + info.num_of_h_tiles++; + } } - } + break; + case DRM_MODE_ENCODER_TMDS: + info.num_of_h_tiles = 1; + break; + }; rc = dpu_encoder_setup(encoder->dev, encoder, &info); if (rc) @@ -709,6 +769,23 @@ static void dpu_irq_preinstall(struct msm_kms *kms) dpu_core_irq_preinstall(dpu_kms); } +static int dpu_irq_postinstall(struct msm_kms *kms) +{ + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + + if (!dpu_kms || !dpu_kms->dev) + return -EINVAL; + + priv = dpu_kms->dev->dev_private; + if (!priv) + return -EINVAL; + + msm_dp_irq_postinstall(priv->dp); + + return 0; +} + static void dpu_irq_uninstall(struct msm_kms *kms) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -719,6 +796,7 @@ static void dpu_irq_uninstall(struct msm_kms *kms) static const struct msm_kms_funcs kms_funcs = { .hw_init = dpu_kms_hw_init, .irq_preinstall = dpu_irq_preinstall, + .irq_postinstall = dpu_irq_postinstall, .irq_uninstall = dpu_irq_uninstall, .irq = dpu_irq, .enable_commit = dpu_kms_enable_commit, @@ -952,6 +1030,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_vbif_init_memtypes(dpu_kms); + if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) + dpu_kms_parse_data_bus_icc_path(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); return 0; @@ -1079,7 +1160,7 @@ static int dpu_dev_remove(struct platform_device *pdev) static int __maybe_unused dpu_runtime_suspend(struct device *dev) { - int rc = -1; + int i, rc = -1; struct platform_device *pdev = to_platform_device(dev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); struct dss_module_power *mp = &dpu_kms->mp; @@ -1090,6 +1171,9 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) if (rc) DPU_ERROR("clock disable failed rc:%d\n", rc); + for (i = 0; i < dpu_kms->num_paths; i++) + icc_set_bw(dpu_kms->path[i], 0, 0); + return rc; } @@ -1101,8 +1185,15 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev) struct drm_encoder *encoder; struct drm_device *ddev; struct dss_module_power *mp = &dpu_kms->mp; + int i; ddev = dpu_kms->dev; + + /* Min vote of BW is required before turning on AXI clk */ + for (i = 0; i < dpu_kms->num_paths; i++) + icc_set_bw(dpu_kms->path[i], 0, + dpu_kms->catalog->perf.min_dram_ib); + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { DPU_ERROR("clock enable failed rc:%d\n", rc); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index e140cd633071..1c0e4c0c9ffb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -8,6 +8,8 @@ #ifndef __DPU_KMS_H__ #define __DPU_KMS_H__ +#include <linux/interconnect.h> + #include <drm/drm_drv.h> #include "msm_drv.h" @@ -140,6 +142,8 @@ struct dpu_kms { * when disabled. */ atomic_t bandwidth_ref; + struct icc_path *path[2]; + u32 num_paths; }; struct vsync_info { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 7d3fdbb00e7e..cd4078807db1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -8,7 +8,6 @@ #include <linux/irqdesc.h> #include <linux/irqchip/chained_irq.h> #include "dpu_kms.h" -#include <linux/interconnect.h> #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base) @@ -277,9 +276,11 @@ int dpu_mdss_init(struct drm_device *dev) DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio); - ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss); - if (ret) - return ret; + if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) { + ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss); + if (ret) + return ret; + } mp = &dpu_mdss->mp; ret = msm_dss_parse_clock(pdev, mp); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 29e373d2e7b5..7ea90d25a3b6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -132,6 +132,86 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) } /** + * _dpu_plane_calc_bw - calculate bandwidth required for a plane + * @Plane: Pointer to drm plane. + * Result: Updates calculated bandwidth in the plane state. + * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest) + * Prefill BW Equation: line src bytes * line_time + */ +static void _dpu_plane_calc_bw(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + const struct dpu_format *fmt = NULL; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + int src_width, src_height, dst_height, fps; + u64 plane_prefill_bw; + u64 plane_bw; + u32 hw_latency_lines; + u64 scale_factor; + int vbp, vpw; + + pstate = to_dpu_plane_state(plane->state); + mode = &plane->state->crtc->mode; + + fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier); + + src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect); + src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect); + dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect); + fps = drm_mode_vrefresh(mode); + vbp = mode->vtotal - mode->vsync_end; + vpw = mode->vsync_end - mode->vsync_start; + hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines; + scale_factor = src_height > dst_height ? + mult_frac(src_height, 1, dst_height) : 1; + + plane_bw = + src_width * mode->vtotal * fps * fmt->bpp * + scale_factor; + + plane_prefill_bw = + src_width * hw_latency_lines * fps * fmt->bpp * + scale_factor * mode->vtotal; + + do_div(plane_prefill_bw, (vbp+vpw)); + + pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw); +} + +/** + * _dpu_plane_calc_clk - calculate clock required for a plane + * @Plane: Pointer to drm plane. + * Result: Updates calculated clock in the plane state. + * Clock equation: dst_w * v_total * fps * (src_h / dst_h) + */ +static void _dpu_plane_calc_clk(struct drm_plane *plane) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + int dst_width, src_height, dst_height, fps; + + pstate = to_dpu_plane_state(plane->state); + mode = &plane->state->crtc->mode; + + src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect); + dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect); + dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect); + fps = drm_mode_vrefresh(mode); + + pstate->plane_clk = + dst_width * mode->vtotal * fps; + + if (src_height > dst_height) { + pstate->plane_clk *= src_height; + do_div(pstate->plane_clk, dst_height); + } +} + +/** * _dpu_plane_calc_fill_level - calculate fill level of the given source format * @plane: Pointer to drm plane * @fmt: Pointer to source buffer format @@ -1102,6 +1182,10 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) } _dpu_plane_set_qos_remap(plane); + + _dpu_plane_calc_bw(plane, fb); + + _dpu_plane_calc_clk(plane); } static void _dpu_plane_atomic_disable(struct drm_plane *plane) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index 456949713e90..ca83b8753d59 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -25,6 +25,8 @@ * @scaler3_cfg: configuration data for scaler3 * @pixel_ext: configuration data for pixel extensions * @cdp_cfg: CDP configuration + * @plane_fetch_bw: calculated BW per plane + * @plane_clk: calculated clk per plane */ struct dpu_plane_state { struct drm_plane_state base; @@ -39,6 +41,8 @@ struct dpu_plane_state { struct dpu_hw_pixel_ext pixel_ext; struct dpu_hw_pipe_cdp_cfg cdp_cfg; + u64 plane_fetch_bw; + u64 plane_clk; }; /** diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index a0253297bc76..34e3186e236d 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -264,7 +264,7 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc); @@ -284,7 +284,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, } static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc); @@ -307,7 +307,7 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, } static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); DBG("%s: check", mdp4_crtc->name); @@ -316,14 +316,14 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, } static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); DBG("%s: begin", mdp4_crtc->name); } static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_device *dev = crtc->dev; diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c index 5d8956055286..88645dbc3785 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c @@ -25,54 +25,9 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -/* not ironically named at all.. no, really.. */ -static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) -{ - struct drm_device *dev = mdp4_dtv_encoder->base.dev; - struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0"); - - if (!dtv_pdata) { - DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n"); - return; - } - - if (dtv_pdata->bus_scale_table) { - mdp4_dtv_encoder->bsc = msm_bus_scale_register_client( - dtv_pdata->bus_scale_table); - DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc); - DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save); - if (dtv_pdata->lcdc_power_save) - dtv_pdata->lcdc_power_save(1); - } -} - -static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) -{ - if (mdp4_dtv_encoder->bsc) { - msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc); - mdp4_dtv_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) -{ - if (mdp4_dtv_encoder->bsc) { - DBG("set bus scaling: %d", idx); - msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} -static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {} -static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {} -#endif - static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) { struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - bs_fini(mdp4_dtv_encoder); drm_encoder_cleanup(encoder); kfree(mdp4_dtv_encoder); } @@ -162,8 +117,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); - bs_set(mdp4_dtv_encoder, 0); - mdp4_dtv_encoder->enabled = false; } @@ -185,8 +138,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) MDP4_DMA_CONFIG_PACK(0x21)); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1); - bs_set(mdp4_dtv_encoder, 1); - DBG("setting mdp_clk=%lu", pc); ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc); @@ -252,8 +203,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) goto fail; } - bs_init(mdp4_dtv_encoder); - return encoder; fail: diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h index 18933bd81c77..e8ee92ab7956 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h @@ -222,17 +222,4 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev) } #endif -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -/* bus scaling data is associated with extra pointless platform devices, - * "dtv", etc.. this is a bit of a hack, but we need a way for encoders - * to find their pdata to make the bus-scaling stuff work. - */ -static inline void *mdp4_find_pdata(const char *devname) -{ - struct device *dev; - dev = bus_find_device_by_name(&platform_bus_type, NULL, devname); - return dev ? dev->platform_data : NULL; -} -#endif - #endif /* __MDP4_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c index 871f3514ef69..10eb3e5b218e 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c @@ -30,51 +30,10 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) -{ - struct drm_device *dev = mdp4_lcdc_encoder->base.dev; - struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0"); - - if (!lcdc_pdata) { - DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n"); - return; - } - - if (lcdc_pdata->bus_scale_table) { - mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client( - lcdc_pdata->bus_scale_table); - DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc); - } -} - -static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) -{ - if (mdp4_lcdc_encoder->bsc) { - msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc); - mdp4_lcdc_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) -{ - if (mdp4_lcdc_encoder->bsc) { - DBG("set bus scaling: %d", idx); - msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} -static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {} -static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {} -#endif - static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder) { struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = to_mdp4_lcdc_encoder(encoder); - bs_fini(mdp4_lcdc_encoder); drm_encoder_cleanup(encoder); kfree(mdp4_lcdc_encoder); } @@ -348,8 +307,6 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret); } - bs_set(mdp4_lcdc_encoder, 0); - mdp4_lcdc_encoder->enabled = false; } @@ -382,8 +339,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) mdp4_crtc_set_config(encoder->crtc, config); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); - bs_set(mdp4_lcdc_encoder, 1); - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); if (ret) @@ -480,8 +435,6 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, } mdp4_lcdc_encoder->regs[2] = reg; - bs_init(mdp4_lcdc_encoder); - return encoder; fail: diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c index eeef41fcd4e1..ff2c1d583c79 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -14,27 +14,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -#include <linux/msm-bus.h> -#include <linux/msm-bus-board.h> - -static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) -{ - if (mdp5_cmd_enc->bsc) { - DBG("set bus scaling: %d", idx); - /* HACK: scaling down, and then immediately back up - * seems to leave things broken (underflow).. so - * never disable: - */ - idx = 1; - msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx); - } -} -#else -static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {} -#endif - #define VSYNC_CLK_RATE 19200000 static int pingpong_tearcheck_setup(struct drm_encoder *encoder, struct drm_display_mode *mode) @@ -146,8 +125,6 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) mdp5_ctl_set_encoder_state(ctl, pipeline, false); mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); - bs_set(mdp5_cmd_enc, 0); - mdp5_cmd_enc->enabled = false; } @@ -161,7 +138,6 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) if (WARN_ON(mdp5_cmd_enc->enabled)) return; - bs_set(mdp5_cmd_enc, 1); if (pingpong_tearcheck_enable(encoder)) return; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index c39dad151bb6..4a53d7b42e9c 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -7,6 +7,7 @@ #include <linux/sort.h> +#include <drm/drm_atomic.h> #include <drm/drm_mode.h> #include <drm/drm_crtc.h> #include <drm/drm_flip_work.h> @@ -483,7 +484,7 @@ static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc) } static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); @@ -529,7 +530,7 @@ static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) } static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); @@ -682,15 +683,17 @@ static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, } static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; struct drm_device *dev = crtc->dev; struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; const struct drm_plane_state *pstate; - const struct drm_display_mode *mode = &state->adjusted_mode; + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; bool cursor_plane = false; bool need_right_mixer = false; int cnt = 0, i; @@ -699,7 +702,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, DBG("%s: check", crtc->name); - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { if (!pstate->visible) continue; @@ -731,7 +734,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, if (mode->hdisplay > hw_cfg->lm.max_width) need_right_mixer = true; - ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer); + ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer); if (ret) { DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret); return ret; @@ -744,7 +747,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, WARN_ON(cursor_plane && (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); - start = get_start_stage(crtc, state, &pstates[0].state->base); + start = get_start_stage(crtc, crtc_state, &pstates[0].state->base); /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: @@ -769,13 +772,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, } static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { DBG("%s: begin", crtc->name); } static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c index f48827283c2b..79d67c495780 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c @@ -16,72 +16,9 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -#include <mach/msm_bus.h> -#include <mach/msm_bus_board.h> -#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ - { \ - .src = MSM_BUS_MASTER_MDP_PORT0, \ - .dst = MSM_BUS_SLAVE_EBI_CH0, \ - .ab = (ab_val), \ - .ib = (ib_val), \ - } - -static struct msm_bus_vectors mdp_bus_vectors[] = { - MDP_BUS_VECTOR_ENTRY(0, 0), - MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), -}; -static struct msm_bus_paths mdp_bus_usecases[] = { { - .num_paths = 1, - .vectors = &mdp_bus_vectors[0], -}, { - .num_paths = 1, - .vectors = &mdp_bus_vectors[1], -} }; -static struct msm_bus_scale_pdata mdp_bus_scale_table = { - .usecase = mdp_bus_usecases, - .num_usecases = ARRAY_SIZE(mdp_bus_usecases), - .name = "mdss_mdp", -}; - -static void bs_init(struct mdp5_encoder *mdp5_encoder) -{ - mdp5_encoder->bsc = msm_bus_scale_register_client( - &mdp_bus_scale_table); - DBG("bus scale client: %08x", mdp5_encoder->bsc); -} - -static void bs_fini(struct mdp5_encoder *mdp5_encoder) -{ - if (mdp5_encoder->bsc) { - msm_bus_scale_unregister_client(mdp5_encoder->bsc); - mdp5_encoder->bsc = 0; - } -} - -static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) -{ - if (mdp5_encoder->bsc) { - DBG("set bus scaling: %d", idx); - /* HACK: scaling down, and then immediately back up - * seems to leave things broken (underflow).. so - * never disable: - */ - idx = 1; - msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx); - } -} -#else -static void bs_init(struct mdp5_encoder *mdp5_encoder) {} -static void bs_fini(struct mdp5_encoder *mdp5_encoder) {} -static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {} -#endif - static void mdp5_encoder_destroy(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - bs_fini(mdp5_encoder); drm_encoder_cleanup(encoder); kfree(mdp5_encoder); } @@ -222,8 +159,6 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) */ mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf)); - bs_set(mdp5_encoder, 0); - mdp5_encoder->enabled = false; } @@ -240,7 +175,6 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) if (WARN_ON(mdp5_encoder->enabled)) return; - bs_set(mdp5_encoder, 1); spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); @@ -426,8 +360,6 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); - bs_init(mdp5_encoder); - return encoder; fail: diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c new file mode 100644 index 000000000000..82a8673ab8da --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -0,0 +1,638 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/of_platform.h> + +#include <drm/drm_dp_helper.h> +#include <drm/drm_edid.h> + +#include "dp_catalog.h" +#include "dp_audio.h" +#include "dp_panel.h" +#include "dp_display.h" + +#define HEADER_BYTE_2_BIT 0 +#define PARITY_BYTE_2_BIT 8 +#define HEADER_BYTE_1_BIT 16 +#define PARITY_BYTE_1_BIT 24 +#define HEADER_BYTE_3_BIT 16 +#define PARITY_BYTE_3_BIT 24 + +struct dp_audio_private { + struct platform_device *audio_pdev; + struct platform_device *pdev; + struct dp_catalog *catalog; + struct dp_panel *panel; + + bool engine_on; + u32 channels; + + struct dp_audio dp_audio; +}; + +static u8 dp_audio_get_g0_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[3]; + g[1] = c[0] ^ c[3]; + g[2] = c[1]; + g[3] = c[2]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static u8 dp_audio_get_g1_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[0] ^ c[3]; + g[1] = c[0] ^ c[1] ^ c[3]; + g[2] = c[1] ^ c[2]; + g[3] = c[2] ^ c[3]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static u8 dp_audio_calculate_parity(u32 data) +{ + u8 x0 = 0; + u8 x1 = 0; + u8 ci = 0; + u8 iData = 0; + u8 i = 0; + u8 parity_byte; + u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2; + + for (i = 0; i < num_byte; i++) { + iData = (data >> i*4) & 0xF; + + ci = iData ^ x1; + x1 = x0 ^ dp_audio_get_g1_value(ci); + x0 = dp_audio_get_g0_value(ci); + } + + parity_byte = x1 | (x0 << 4); + + return parity_byte; +} + +static u32 dp_audio_get_header(struct dp_catalog *catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + dp_catalog_audio_get_header(catalog); + + return catalog->audio_data; +} + +static void dp_audio_set_header(struct dp_catalog *catalog, + u32 data, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + catalog->audio_data = data; + dp_catalog_audio_set_header(catalog); +} + +static void dp_audio_stream_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x02; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + new_value = value; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); + + new_value = audio->channels - 1; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x1; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x17; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x84; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x1b; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + new_value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x05; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x0F; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); + + new_value = 0x0; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x06; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x0F; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); +} + +static void dp_audio_setup_sdp(struct dp_audio_private *audio) +{ + dp_catalog_audio_config_sdp(audio->catalog); + + dp_audio_stream_sdp(audio); + dp_audio_timestamp_sdp(audio); + dp_audio_infoframe_sdp(audio); + dp_audio_copy_management_sdp(audio); + dp_audio_isrc_sdp(audio); +} + +static void dp_audio_setup_acr(struct dp_audio_private *audio) +{ + u32 select = 0; + struct dp_catalog *catalog = audio->catalog; + + switch (audio->dp_audio.bw_code) { + case DP_LINK_BW_1_62: + select = 0; + break; + case DP_LINK_BW_2_7: + select = 1; + break; + case DP_LINK_BW_5_4: + select = 2; + break; + case DP_LINK_BW_8_1: + select = 3; + break; + default: + DRM_DEBUG_DP("Unknown link rate\n"); + select = 0; + break; + } + + catalog->audio_data = select; + dp_catalog_audio_config_acr(catalog); +} + +static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 safe_to_exit_level = 0; + + switch (audio->dp_audio.lane_count) { + case 1: + safe_to_exit_level = 14; + break; + case 2: + safe_to_exit_level = 8; + break; + case 4: + safe_to_exit_level = 5; + break; + default: + DRM_DEBUG_DP("setting the default safe_to_exit_level = %u\n", + safe_to_exit_level); + safe_to_exit_level = 14; + break; + } + + catalog->audio_data = safe_to_exit_level; + dp_catalog_audio_sfe_level(catalog); +} + +static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +{ + struct dp_catalog *catalog = audio->catalog; + + catalog->audio_data = enable; + dp_catalog_audio_enable(catalog); + + audio->engine_on = enable; +} + +static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +{ + struct dp_audio *dp_audio; + struct msm_dp *dp_display; + + if (!pdev) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + dp_audio = dp_display->dp_audio; + + if (!dp_audio) { + DRM_ERROR("invalid dp_audio data\n"); + return ERR_PTR(-EINVAL); + } + + return container_of(dp_audio, struct dp_audio_private, dp_audio); +} + +static int dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + if (!pdev) { + pr_err("invalid input\n"); + return -ENODEV; + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + pr_err("invalid input\n"); + return -ENODEV; + } + + return dp_display_set_plugged_cb(dp_display, fn, codec_dev); +} + +static int dp_audio_get_eld(struct device *dev, + void *data, uint8_t *buf, size_t len) +{ + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + + if (!pdev) { + DRM_ERROR("invalid input\n"); + return -ENODEV; + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return -ENODEV; + } + + memcpy(buf, dp_display->connector->eld, + min(sizeof(dp_display->connector->eld), len)); + + return 0; +} + +int dp_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + int rc = 0; + struct dp_audio_private *audio; + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + dp_display = platform_get_drvdata(pdev); + + /* + * there could be cases where sound card can be opened even + * before OR even when DP is not connected . This can cause + * unclocked access as the audio subsystem relies on the DP + * driver to maintain the correct state of clocks. To protect + * such cases check for connection status and bail out if not + * connected. + */ + if (!dp_display->power_on) { + rc = -EINVAL; + goto end; + } + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + audio->channels = params->channels; + + dp_audio_setup_sdp(audio); + dp_audio_setup_acr(audio); + dp_audio_safe_to_exit_level(audio); + dp_audio_enable(audio, true); + dp_display->audio_enabled = true; + +end: + return rc; +} + +static void dp_audio_shutdown(struct device *dev, void *data) +{ + struct dp_audio_private *audio; + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + dp_display = platform_get_drvdata(pdev); + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + DRM_ERROR("failed to get audio data\n"); + return; + } + + /* + * if audio was not enabled there is no need + * to execute the shutdown and we can bail out early. + * This also makes sure that we dont cause an unclocked + * access when audio subsystem calls this without DP being + * connected. is_connected cannot be used here as its set + * to false earlier than this call + */ + if (!dp_display->audio_enabled) + return; + + dp_audio_enable(audio, false); + /* signal the dp display to safely shutdown clocks */ + dp_display_signal_audio_complete(dp_display); +} + +static const struct hdmi_codec_ops dp_audio_codec_ops = { + .hw_params = dp_audio_hw_params, + .audio_shutdown = dp_audio_shutdown, + .get_eld = dp_audio_get_eld, + .hook_plugged_cb = dp_audio_hook_plugged_cb, +}; + +static struct hdmi_codec_pdata codec_data = { + .ops = &dp_audio_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, +}; + +int dp_register_audio_driver(struct device *dev, + struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio_priv; + + audio_priv = container_of(dp_audio, + struct dp_audio_private, dp_audio); + + audio_priv->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + return PTR_ERR_OR_ZERO(audio_priv->audio_pdev); +} + +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog *catalog) +{ + int rc = 0; + struct dp_audio_private *audio; + struct dp_audio *dp_audio; + + if (!pdev || !panel || !catalog) { + DRM_ERROR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL); + if (!audio) { + rc = -ENOMEM; + goto error; + } + + audio->pdev = pdev; + audio->panel = panel; + audio->catalog = catalog; + + dp_audio = &audio->dp_audio; + + dp_catalog_audio_init(catalog); + + return dp_audio; +error: + return ERR_PTR(rc); +} + +void dp_audio_put(struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio; + + if (!dp_audio) + return; + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + + devm_kfree(&audio->pdev->dev, audio); +} diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h new file mode 100644 index 000000000000..84e5f4a5d26b --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUDIO_H_ +#define _DP_AUDIO_H_ + +#include <linux/platform_device.h> + +#include "dp_panel.h" +#include "dp_catalog.h" +#include <sound/hdmi-codec.h> + +/** + * struct dp_audio + * @lane_count: number of lanes configured in current session + * @bw_code: link rate's bandwidth code for current session + */ +struct dp_audio { + u32 lane_count; + u32 bw_code; +}; + +/** + * dp_audio_get() + * + * Creates and instance of dp audio. + * + * @pdev: caller's platform device instance. + * @panel: an instance of dp_panel module. + * @catalog: an instance of dp_catalog module. + * + * Returns the error code in case of failure, otherwize + * an instance of newly created dp_module. + */ +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog *catalog); + +/** + * dp_register_audio_driver() + * + * Registers DP device with hdmi_codec interface. + * + * @dev: DP device instance. + * @dp_audio: an instance of dp_audio module. + * + * + * Returns the error code in case of failure, otherwise + * zero on success. + */ +int dp_register_audio_driver(struct device *dev, + struct dp_audio *dp_audio); + +/** + * dp_audio_put() + * + * Cleans the dp_audio instance. + * + * @dp_audio: an instance of dp_audio. + */ +void dp_audio_put(struct dp_audio *dp_audio); + +int dp_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params); + +#endif /* _DP_AUDIO_H_ */ + + diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c new file mode 100644 index 000000000000..19b35ae3e927 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/delay.h> +#include <drm/drm_print.h> + +#include "dp_reg.h" +#include "dp_aux.h" + +#define DP_AUX_ENUM_STR(x) #x + +struct dp_aux_private { + struct device *dev; + struct dp_catalog *catalog; + + struct mutex mutex; + struct completion comp; + + u32 aux_error_num; + u32 retry_cnt; + bool cmd_busy; + bool native; + bool read; + bool no_send_addr; + bool no_send_stop; + u32 offset; + u32 segment; + u32 isr; + + struct drm_dp_aux dp_aux; +}; + +static const char *dp_aux_get_error(u32 aux_error) +{ + switch (aux_error) { + case DP_AUX_ERR_NONE: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE); + case DP_AUX_ERR_ADDR: + return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR); + case DP_AUX_ERR_TOUT: + return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT); + case DP_AUX_ERR_NACK: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK); + case DP_AUX_ERR_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER); + case DP_AUX_ERR_NACK_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER); + default: + return "unknown"; + } +} + +static u32 dp_aux_write(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data[4], reg, len; + u8 *msgdata = msg->buffer; + int const AUX_CMD_FIFO_LEN = 128; + int i = 0; + + if (aux->read) + len = 4; + else + len = msg->size + 4; + + /* + * cmd fifo only has depth of 144 bytes + * limit buf length to 128 bytes here + */ + if (len > AUX_CMD_FIFO_LEN) { + DRM_ERROR("buf size greater than allowed size of 128 bytes\n"); + return 0; + } + + /* Pack cmd and write to HW */ + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ + if (aux->read) + data[0] |= BIT(4); /* R/W */ + + data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ + data[2] = msg->address & 0xff; /* addr[7:0] */ + data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ + + for (i = 0; i < len; i++) { + reg = (i < 4) ? data[i] : msgdata[i - 4]; + /* index = 0, write */ + reg = (((reg) << DP_AUX_DATA_OFFSET) + & DP_AUX_DATA_MASK) | DP_AUX_DATA_WRITE; + if (i == 0) + reg |= DP_AUX_DATA_INDEX_WRITE; + aux->catalog->aux_data = reg; + dp_catalog_aux_write_data(aux->catalog); + } + + dp_catalog_aux_clear_trans(aux->catalog, false); + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + + reg = 0; /* Transaction number == 1 */ + if (!aux->native) { /* i2c */ + reg |= DP_AUX_TRANS_CTRL_I2C; + + if (aux->no_send_addr) + reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR; + + if (aux->no_send_stop) + reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP; + } + + reg |= DP_AUX_TRANS_CTRL_GO; + aux->catalog->aux_data = reg; + dp_catalog_aux_write_trans(aux->catalog); + + return len; +} + +static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 ret, len, timeout; + int aux_timeout_ms = HZ/4; + + reinit_completion(&aux->comp); + + len = dp_aux_write(aux, msg); + if (len == 0) { + DRM_ERROR("DP AUX write failed\n"); + return -EINVAL; + } + + timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms); + if (!timeout) { + DRM_ERROR("aux %s timeout\n", (aux->read ? "read" : "write")); + return -ETIMEDOUT; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + ret = len; + } else { + DRM_ERROR_RATELIMITED("aux err: %s\n", + dp_aux_get_error(aux->aux_error_num)); + + ret = -EINVAL; + } + + return ret; +} + +static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data; + u8 *dp; + u32 i, actual_i; + u32 len = msg->size; + + dp_catalog_aux_clear_trans(aux->catalog, true); + + data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ + data |= DP_AUX_DATA_READ; /* read */ + + aux->catalog->aux_data = data; + dp_catalog_aux_write_data(aux->catalog); + + dp = msg->buffer; + + /* discard first byte */ + data = dp_catalog_aux_read_data(aux->catalog); + + for (i = 0; i < len; i++) { + data = dp_catalog_aux_read_data(aux->catalog); + *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff); + + actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF; + if (i != actual_i) + DRM_ERROR("Index mismatch: expected %d, found %d\n", + i, actual_i); + } +} + +static void dp_aux_native_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) + aux->aux_error_num = DP_AUX_ERR_NONE; + else if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + } + + complete(&aux->comp); +} + +static void dp_aux_i2c_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) { + if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER)) + aux->aux_error_num = DP_AUX_ERR_NACK; + else + aux->aux_error_num = DP_AUX_ERR_NONE; + } else { + if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; + if (isr & DP_INTR_I2C_NACK) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_I2C_DEFER) + aux->aux_error_num = DP_AUX_ERR_DEFER; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + } + } + + complete(&aux->comp); +} + +static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg) +{ + u32 edid_address = 0x50; + u32 segment_address = 0x30; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + u8 *data; + + if (aux->native || i2c_read || ((input_msg->address != edid_address) && + (input_msg->address != segment_address))) + return; + + + data = input_msg->buffer; + if (input_msg->address == segment_address) + aux->segment = *data; + else + aux->offset = *data; +} + +/** + * dp_aux_transfer_helper() - helper function for EDID read transactions + * + * @aux: DP AUX private structure + * @input_msg: input message from DRM upstream APIs + * @send_seg: send the segment to sink + * + * return: void + * + * This helper function is used to fix EDID reads for non-compliant + * sinks that do not handle the i2c middle-of-transaction flag correctly. + */ +static void dp_aux_transfer_helper(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg, + bool send_seg) +{ + struct drm_dp_aux_msg helper_msg; + u32 message_size = 0x10; + u32 segment_address = 0x30; + u32 const edid_block_length = 0x80; + bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + if (!i2c_mot || !i2c_read || (input_msg->size == 0)) + return; + + /* + * Sending the segment value and EDID offset will be performed + * from the DRM upstream EDID driver for each block. Avoid + * duplicate AUX transactions related to this while reading the + * first 16 bytes of each block. + */ + if (!(aux->offset % edid_block_length) || !send_seg) + goto end; + + aux->read = false; + aux->cmd_busy = true; + aux->no_send_addr = true; + aux->no_send_stop = true; + + /* + * Send the segment address for every i2c read in which the + * middle-of-tranaction flag is set. This is required to support EDID + * reads of more than 2 blocks as the segment address is reset to 0 + * since we are overriding the middle-of-transaction flag for read + * transactions. + */ + + if (aux->segment) { + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = segment_address; + helper_msg.buffer = &aux->segment; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + } + + /* + * Send the offset address for every i2c read in which the + * middle-of-transaction flag is set. This will ensure that the sink + * will update its read pointer and return the correct portion of the + * EDID buffer in the subsequent i2c read trasntion triggered in the + * native AUX transfer function. + */ + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = input_msg->address; + helper_msg.buffer = &aux->offset; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + +end: + aux->offset += message_size; + if (aux->offset == 0x80 || aux->offset == 0x100) + aux->segment = 0x0; /* reset segment at end of block */ +} + +/* + * This function does the real job to process an AUX transaction. + * It will call aux_reset() function to reset the AUX channel, + * if the waiting is timeout. + */ +static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, + struct drm_dp_aux_msg *msg) +{ + ssize_t ret; + int const aux_cmd_native_max = 16; + int const aux_cmd_i2c_max = 128; + int const retry_count = 5; + struct dp_aux_private *aux = container_of(dp_aux, + struct dp_aux_private, dp_aux); + + mutex_lock(&aux->mutex); + + aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + + /* Ignore address only message */ + if ((msg->size == 0) || (msg->buffer == NULL)) { + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + ret = msg->size; + goto unlock_exit; + } + + /* msg sanity check */ + if ((aux->native && (msg->size > aux_cmd_native_max)) || + (msg->size > aux_cmd_i2c_max)) { + DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n", + __func__, msg->size, msg->request); + ret = -EINVAL; + goto unlock_exit; + } + + dp_aux_update_offset_and_segment(aux, msg); + dp_aux_transfer_helper(aux, msg, true); + + aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + aux->cmd_busy = true; + + if (aux->read) { + aux->no_send_addr = true; + aux->no_send_stop = false; + } else { + aux->no_send_addr = true; + aux->no_send_stop = true; + } + + ret = dp_aux_cmd_fifo_tx(aux, msg); + + if (ret < 0) { + if (aux->native) { + aux->retry_cnt++; + if (!(aux->retry_cnt % retry_count)) + dp_catalog_aux_update_cfg(aux->catalog); + dp_catalog_aux_reset(aux->catalog); + } + usleep_range(400, 500); /* at least 400us to next try */ + goto unlock_exit; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + if (aux->read) + dp_aux_cmd_fifo_rx(aux, msg); + + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + } else { + /* Reply defer to retry */ + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + } + + /* Return requested size for success or retry */ + ret = msg->size; + aux->retry_cnt = 0; + +unlock_exit: + aux->cmd_busy = false; + mutex_unlock(&aux->mutex); + return ret; +} + +void dp_aux_isr(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->isr = dp_catalog_aux_get_irq(aux->catalog); + + if (!aux->cmd_busy) + return; + + if (aux->native) + dp_aux_native_handler(aux); + else + dp_aux_i2c_handler(aux); +} + +void dp_aux_reconfig(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + dp_catalog_aux_update_cfg(aux->catalog); + dp_catalog_aux_reset(aux->catalog); +} + +void dp_aux_init(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + dp_catalog_aux_enable(aux->catalog, true); + aux->retry_cnt = 0; +} + +void dp_aux_deinit(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + dp_catalog_aux_enable(aux->catalog, false); +} + +int dp_aux_register(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + int ret; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->dp_aux.name = "dpu_dp_aux"; + aux->dp_aux.dev = aux->dev; + aux->dp_aux.transfer = dp_aux_transfer; + ret = drm_dp_aux_register(&aux->dp_aux); + if (ret) { + DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, + ret); + return ret; + } + + return 0; +} + +void dp_aux_unregister(struct drm_dp_aux *dp_aux) +{ + drm_dp_aux_unregister(dp_aux); +} + +struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog) +{ + struct dp_aux_private *aux; + + if (!catalog) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); + if (!aux) + return ERR_PTR(-ENOMEM); + + init_completion(&aux->comp); + aux->cmd_busy = false; + mutex_init(&aux->mutex); + + aux->dev = dev; + aux->catalog = catalog; + aux->retry_cnt = 0; + + return &aux->dp_aux; +} + +void dp_aux_put(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) + return; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_destroy(&aux->mutex); + + devm_kfree(aux->dev, aux); +} diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h new file mode 100644 index 000000000000..f8b8ba919465 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_aux.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUX_H_ +#define _DP_AUX_H_ + +#include "dp_catalog.h" +#include <drm/drm_dp_helper.h> + +#define DP_AUX_ERR_NONE 0 +#define DP_AUX_ERR_ADDR -1 +#define DP_AUX_ERR_TOUT -2 +#define DP_AUX_ERR_NACK -3 +#define DP_AUX_ERR_DEFER -4 +#define DP_AUX_ERR_NACK_DEFER -5 +#define DP_AUX_ERR_PHY -6 + +int dp_aux_register(struct drm_dp_aux *dp_aux); +void dp_aux_unregister(struct drm_dp_aux *dp_aux); +void dp_aux_isr(struct drm_dp_aux *dp_aux); +void dp_aux_init(struct drm_dp_aux *dp_aux); +void dp_aux_deinit(struct drm_dp_aux *dp_aux); +void dp_aux_reconfig(struct drm_dp_aux *dp_aux); + +struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog); +void dp_aux_put(struct drm_dp_aux *aux); + +#endif /*__DP_AUX_H_*/ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c new file mode 100644 index 000000000000..b15b4ce4ba35 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -0,0 +1,1019 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/rational.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> +#include <linux/rational.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_print.h> + +#include "dp_catalog.h" +#include "dp_reg.h" + +#define POLLING_SLEEP_US 1000 +#define POLLING_TIMEOUT_US 10000 + +#define SCRAMBLER_RESET_COUNT_VALUE 0xFC + +#define DP_INTERRUPT_STATUS_ACK_SHIFT 1 +#define DP_INTERRUPT_STATUS_MASK_SHIFT 2 + +#define MSM_DP_CONTROLLER_AHB_OFFSET 0x0000 +#define MSM_DP_CONTROLLER_AHB_SIZE 0x0200 +#define MSM_DP_CONTROLLER_AUX_OFFSET 0x0200 +#define MSM_DP_CONTROLLER_AUX_SIZE 0x0200 +#define MSM_DP_CONTROLLER_LINK_OFFSET 0x0400 +#define MSM_DP_CONTROLLER_LINK_SIZE 0x0C00 +#define MSM_DP_CONTROLLER_P0_OFFSET 0x1000 +#define MSM_DP_CONTROLLER_P0_SIZE 0x0400 + +#define DP_INTERRUPT_STATUS1 \ + (DP_INTR_AUX_I2C_DONE| \ + DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \ + DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \ + DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \ + DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR) + +#define DP_INTERRUPT_STATUS1_ACK \ + (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT) +#define DP_INTERRUPT_STATUS1_MASK \ + (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT) + +#define DP_INTERRUPT_STATUS2 \ + (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \ + DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED) + +#define DP_INTERRUPT_STATUS2_ACK \ + (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT) +#define DP_INTERRUPT_STATUS2_MASK \ + (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT) + +struct dp_catalog_private { + struct device *dev; + struct dp_io *io; + u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_catalog dp_catalog; + u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX]; +}; + +static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) +{ + offset += MSM_DP_CONTROLLER_AUX_OFFSET; + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_aux(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_AUX_OFFSET; + /* + * To make sure aux reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset) +{ + offset += MSM_DP_CONTROLLER_AHB_OFFSET; + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_ahb(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_AHB_OFFSET; + /* + * To make sure phy reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_p0(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_P0_OFFSET; + /* + * To make sure interface reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +static inline u32 dp_read_p0(struct dp_catalog_private *catalog, + u32 offset) +{ + offset += MSM_DP_CONTROLLER_P0_OFFSET; + /* + * To make sure interface reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) +{ + offset += MSM_DP_CONTROLLER_LINK_OFFSET; + return readl_relaxed(catalog->io->dp_controller.base + offset); +} + +static inline void dp_write_link(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + offset += MSM_DP_CONTROLLER_LINK_OFFSET; + /* + * To make sure link reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.base + offset); +} + +/* aux related catalog functions */ +u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_aux(catalog, REG_DP_AUX_DATA); +} + +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data); + return 0; +} + +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data); + return 0; +} + +int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read) +{ + u32 data; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (read) { + data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); + data &= ~DP_AUX_TRANS_CTRL_GO; + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + } else { + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); + } + return 0; +} + +int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); + return 0; +} + +void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + + aux_ctrl |= DP_AUX_CTRL_RESET; + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + usleep_range(1000, 1100); /* h/w recommended delay */ + + aux_ctrl &= ~DP_AUX_CTRL_RESET; + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); +} + +void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + + if (enable) { + dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); + dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); + aux_ctrl |= DP_AUX_CTRL_ENABLE; + } else { + aux_ctrl &= ~DP_AUX_CTRL_ENABLE; + } + + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); +} + +void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dp_io *dp_io = catalog->io; + struct phy *phy = dp_io->phy; + + phy_calibrate(phy); +} + +static void dump_regs(void __iomem *base, int len) +{ + int i; + u32 x0, x4, x8, xc; + u32 addr_off = 0; + + len = DIV_ROUND_UP(len, 16); + for (i = 0; i < len; i++) { + x0 = readl_relaxed(base + addr_off); + x4 = readl_relaxed(base + addr_off + 0x04); + x8 = readl_relaxed(base + addr_off + 0x08); + xc = readl_relaxed(base + addr_off + 0x0c); + + pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc); + addr_off += 16; + } +} + +void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) +{ + u32 offset, len; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + pr_info("AHB regs\n"); + offset = MSM_DP_CONTROLLER_AHB_OFFSET; + len = MSM_DP_CONTROLLER_AHB_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); + + pr_info("AUXCLK regs\n"); + offset = MSM_DP_CONTROLLER_AUX_OFFSET; + len = MSM_DP_CONTROLLER_AUX_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); + + pr_info("LCLK regs\n"); + offset = MSM_DP_CONTROLLER_LINK_OFFSET; + len = MSM_DP_CONTROLLER_LINK_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); + + pr_info("P0CLK regs\n"); + offset = MSM_DP_CONTROLLER_P0_OFFSET; + len = MSM_DP_CONTROLLER_P0_SIZE; + dump_regs(catalog->io->dp_controller.base + offset, len); +} + +int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 intr, intr_ack; + + intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS); + intr &= ~DP_INTERRUPT_STATUS1_MASK; + intr_ack = (intr & DP_INTERRUPT_STATUS1) + << DP_INTERRUPT_STATUS_ACK_SHIFT; + dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | + DP_INTERRUPT_STATUS1_MASK); + + return intr; + +} + +/* controller related catalog functions */ +void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, + u32 dp_tu, u32 valid_boundary, + u32 valid_boundary2) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); + dp_write_link(catalog, REG_DP_TU, dp_tu); + dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); +} + +void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_STATE_CTRL, state); +} + +void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + DRM_DEBUG_DP("DP_CONFIGURATION_CTRL=0x%x\n", cfg); + + dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); +} + +void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */ + u32 ln_mapping; + + ln_mapping = ln_0 << LANE0_MAPPING_SHIFT; + ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT; + ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT; + ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT; + + dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, + ln_mapping); +} + +void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, + bool enable) +{ + u32 mainlink_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (enable) { + /* + * To make sure link reg writes happens before other operation, + * dp_write_link() function uses writel() + */ + mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + + mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET | + DP_MAINLINK_CTRL_ENABLE); + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl |= DP_MAINLINK_CTRL_RESET; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE | + DP_MAINLINK_FB_BOUNDARY_SEL); + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + } else { + mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + } +} + +void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, + u32 colorimetry_cfg, + u32 test_bits_depth) +{ + u32 misc_val; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0); + + /* clear bpp bits */ + misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT); + misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT; + misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT; + /* Configure clock to synchronous mode */ + misc_val |= DP_MISC0_SYNCHRONOUS_CLK; + + DRM_DEBUG_DP("misc settings = 0x%x\n", misc_val); + dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); +} + +void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, + u32 rate, u32 stream_rate_khz, + bool fixed_nvid) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid, pixel_div = 0, dispcc_input_rate; + u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + unsigned long den, num; + + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (rate == link_rate_hbr3) + pixel_div = 6; + else if (rate == 1620000 || rate == 270000) + pixel_div = 2; + else if (rate == link_rate_hbr2) + pixel_div = 4; + else + DRM_ERROR("Invalid pixel mux divider\n"); + + dispcc_input_rate = (rate * 10) / pixel_div; + + rational_best_approximation(dispcc_input_rate, stream_rate_khz, + (unsigned long)(1 << 16) - 1, + (unsigned long)(1 << 16) - 1, &den, &num); + + den = ~(den - num); + den = den & 0xFFFF; + pixel_m = num; + pixel_n = den; + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + DRM_DEBUG_DP("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); + dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); + dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); +} + +int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, + u32 pattern) +{ + int bit, ret; + u32 data; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + bit = BIT(pattern - 1); + DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, pattern); + dp_catalog_ctrl_state_ctrl(dp_catalog, bit); + + bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; + + /* Poll for mainlink ready status */ + ret = readx_poll_timeout(readl, catalog->io->dp_controller.base + + MSM_DP_CONTROLLER_LINK_OFFSET + + REG_DP_MAINLINK_READY, + data, data & bit, + POLLING_SLEEP_US, POLLING_TIMEOUT_US); + if (ret < 0) { + DRM_ERROR("set pattern for link_train=%d failed\n", pattern); + return ret; + } + return 0; +} + +void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) +{ + u32 sw_reset; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET); + + sw_reset |= DP_SW_RESET; + dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + usleep_range(1000, 1100); /* h/w recommended delay */ + + sw_reset &= ~DP_SW_RESET; + dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); +} + +bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) +{ + u32 data; + int ret; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + /* Poll for mainlink ready status */ + ret = readl_poll_timeout(catalog->io->dp_controller.base + + MSM_DP_CONTROLLER_LINK_OFFSET + + REG_DP_MAINLINK_READY, + data, data & DP_MAINLINK_READY_FOR_VIDEO, + POLLING_SLEEP_US, POLLING_TIMEOUT_US); + if (ret < 0) { + DRM_ERROR("mainlink not ready\n"); + return false; + } + + return true; +} + +void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, + bool enable) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (enable) { + dp_write_ahb(catalog, REG_DP_INTR_STATUS, + DP_INTERRUPT_STATUS1_MASK); + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + DP_INTERRUPT_STATUS2_MASK); + } else { + dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); + } +} + +void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, + u32 intr_mask, bool en) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + + config = (en ? config | intr_mask : config & ~intr_mask); + + dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, + config & DP_DP_HPD_INT_MASK); +} + +void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + + /* enable HPD interrupts */ + dp_catalog_hpd_config_intr(dp_catalog, + DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK + | DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true); + + /* Configure REFTIMER and enable it */ + reftimer |= DP_DP_HPD_REFTIMER_ENABLE; + dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + + /* Enable HPD */ + dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); +} + +u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + int isr = 0; + + isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, + (isr & DP_DP_HPD_INT_MASK)); + + return isr; +} + +int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 intr, intr_ack; + + intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2); + intr &= ~DP_INTERRUPT_STATUS2_MASK; + intr_ack = (intr & DP_INTERRUPT_STATUS2) + << DP_INTERRUPT_STATUS_ACK_SHIFT; + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + intr_ack | DP_INTERRUPT_STATUS2_MASK); + + return intr; +} + +void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_ahb(catalog, REG_DP_PHY_CTRL, + DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL); + usleep_range(1000, 1100); /* h/w recommended delay */ + dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); +} + +int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, + u8 v_level, u8 p_level) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dp_io *dp_io = catalog->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + /* TODO: Update for all lanes instead of just first one */ + opts_dp->voltage[0] = v_level; + opts_dp->pre[0] = p_level; + opts_dp->set_voltages = 1; + phy_configure(phy, &dp_io->phy_opts); + opts_dp->set_voltages = 0; + + return 0; +} + +void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, + u32 pattern) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 value = 0x0; + + /* Make sure to clear the current pattern before starting a new one */ + dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); + + switch (pattern) { + case DP_PHY_TEST_PATTERN_D10_2: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TRAINING_PATTERN1); + break; + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + value &= ~(1 << 16); + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + value |= SCRAMBLER_RESET_COUNT_VALUE; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); + break; + case DP_PHY_TEST_PATTERN_PRBS7: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_PRBS7); + break; + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN); + /* 00111110000011111000001111100000 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, + 0x3E0F83E0); + /* 00001111100000111110000011111000 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, + 0x0F83E0F8); + /* 1111100000111110 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, + 0x0000F83E); + break; + case DP_PHY_TEST_PATTERN_CP2520: + value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + + value = DP_HBR2_ERM_PATTERN; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + value |= SCRAMBLER_RESET_COUNT_VALUE; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); + value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value |= DP_MAINLINK_CTRL_ENABLE; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + break; + case DP_PHY_TEST_PATTERN_SEL_MASK: + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, + DP_MAINLINK_CTRL_ENABLE); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TRAINING_PATTERN4); + break; + default: + DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern); + break; + } +} + +u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_link(catalog, REG_DP_MAINLINK_READY); +} + +/* panel related catalog functions */ +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, + dp_catalog->total); + dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, + dp_catalog->sync_start); + dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, + dp_catalog->width_blanking); + dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active); + return 0; +} + +void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, + struct drm_display_mode *drm_mode) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 hsync_period, vsync_period; + u32 display_v_start, display_v_end; + u32 hsync_start_x, hsync_end_x; + u32 v_sync_width; + u32 hsync_ctl; + u32 display_hctl; + + /* TPG config parameters*/ + hsync_period = drm_mode->htotal; + vsync_period = drm_mode->vtotal; + + display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) * + hsync_period); + display_v_end = ((vsync_period - (drm_mode->vsync_start - + drm_mode->vdisplay)) + * hsync_period) - 1; + + display_v_start += drm_mode->htotal - drm_mode->hsync_start; + display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay); + + hsync_start_x = drm_mode->htotal - drm_mode->hsync_start; + hsync_end_x = hsync_period - (drm_mode->hsync_start - + drm_mode->hdisplay) - 1; + + v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start; + + hsync_ctl = (hsync_period << 16) | + (drm_mode->hsync_end - drm_mode->hsync_start); + display_hctl = (hsync_end_x << 16) | hsync_start_x; + + + dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); + dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * + hsync_period); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * + hsync_period); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); + dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); + dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); + + dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, + DP_TPG_CHECKERED_RECT_PATTERN); + dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, + DP_TPG_VIDEO_CONFIG_BPP_8BIT | + DP_TPG_VIDEO_CONFIG_RGB); + dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, + DP_BIST_ENABLE_DPBIST_EN); + dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, + DP_TIMING_ENGINE_EN_EN); + DRM_DEBUG_DP("%s: enabled tpg\n", __func__); +} + +void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); + dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); + dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); +} + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io) +{ + struct dp_catalog_private *catalog; + + if (!io) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); + if (!catalog) + return ERR_PTR(-ENOMEM); + + catalog->dev = dev; + catalog->io = io; + + return &catalog->dp_catalog; +} + +void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_map = catalog->audio_map; + sdp = dp_catalog->sdp_type; + header = dp_catalog->sdp_header; + + dp_catalog->audio_data = dp_read_link(catalog, + sdp_map[sdp][header]); +} + +void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + u32 data; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_map = catalog->audio_map; + sdp = dp_catalog->sdp_type; + header = dp_catalog->sdp_header; + data = dp_catalog->audio_data; + + dp_write_link(catalog, sdp_map[sdp][header], data); +} + +void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 acr_ctrl, select; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + select = dp_catalog->audio_data; + acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); + + DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); + + dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); +} + +void dp_catalog_audio_enable(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + bool enable; + u32 audio_ctrl; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + enable = !!dp_catalog->audio_data; + audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG); + + if (enable) + audio_ctrl |= BIT(0); + else + audio_ctrl &= ~BIT(0); + + DRM_DEBUG_DP("dp_audio_cfg = 0x%x\n", audio_ctrl); + + dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); + /* make sure audio engine is disabled */ + wmb(); +} + +void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 sdp_cfg = 0; + u32 sdp_cfg2 = 0; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); + /* AUDIO_TIMESTAMP_SDP_EN */ + sdp_cfg |= BIT(1); + /* AUDIO_STREAM_SDP_EN */ + sdp_cfg |= BIT(2); + /* AUDIO_COPY_MANAGEMENT_SDP_EN */ + sdp_cfg |= BIT(5); + /* AUDIO_ISRC_SDP_EN */ + sdp_cfg |= BIT(6); + /* AUDIO_INFOFRAME_SDP_EN */ + sdp_cfg |= BIT(20); + + DRM_DEBUG_DP("sdp_cfg = 0x%x\n", sdp_cfg); + + dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); + + sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); + /* IFRM_REGSRC -> Do not use reg values */ + sdp_cfg2 &= ~BIT(0); + /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ + sdp_cfg2 &= ~BIT(1); + + DRM_DEBUG_DP("sdp_cfg2 = 0x%x\n", sdp_cfg2); + + dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); +} + +void dp_catalog_audio_init(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + + static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { + { + MMSS_DP_AUDIO_STREAM_0, + MMSS_DP_AUDIO_STREAM_1, + MMSS_DP_AUDIO_STREAM_1, + }, + { + MMSS_DP_AUDIO_TIMESTAMP_0, + MMSS_DP_AUDIO_TIMESTAMP_1, + MMSS_DP_AUDIO_TIMESTAMP_1, + }, + { + MMSS_DP_AUDIO_INFOFRAME_0, + MMSS_DP_AUDIO_INFOFRAME_1, + MMSS_DP_AUDIO_INFOFRAME_1, + }, + { + MMSS_DP_AUDIO_COPYMANAGEMENT_0, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + }, + { + MMSS_DP_AUDIO_ISRC_0, + MMSS_DP_AUDIO_ISRC_1, + MMSS_DP_AUDIO_ISRC_1, + }, + }; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + catalog->audio_map = sdp_map; +} + +void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 mainlink_levels, safe_to_exit_level; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + safe_to_exit_level = dp_catalog->audio_data; + mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); + mainlink_levels &= 0xFE0; + mainlink_levels |= safe_to_exit_level; + + DRM_DEBUG_DP("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", + mainlink_levels, safe_to_exit_level); + + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); +} diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h new file mode 100644 index 000000000000..4b7666f1fe6f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CATALOG_H_ +#define _DP_CATALOG_H_ + +#include <drm/drm_modes.h> + +#include "dp_parser.h" + +/* interrupts */ +#define DP_INTR_HPD BIT(0) +#define DP_INTR_AUX_I2C_DONE BIT(3) +#define DP_INTR_WRONG_ADDR BIT(6) +#define DP_INTR_TIMEOUT BIT(9) +#define DP_INTR_NACK_DEFER BIT(12) +#define DP_INTR_WRONG_DATA_CNT BIT(15) +#define DP_INTR_I2C_NACK BIT(18) +#define DP_INTR_I2C_DEFER BIT(21) +#define DP_INTR_PLL_UNLOCKED BIT(24) +#define DP_INTR_AUX_ERROR BIT(27) + +#define DP_INTR_READY_FOR_VIDEO BIT(0) +#define DP_INTR_IDLE_PATTERN_SENT BIT(3) +#define DP_INTR_FRAME_END BIT(6) +#define DP_INTR_CRC_UPDATED BIT(9) + +#define DP_AUX_CFG_MAX_VALUE_CNT 3 + +/* PHY AUX config registers */ +enum dp_phy_aux_config_type { + PHY_AUX_CFG0, + PHY_AUX_CFG1, + PHY_AUX_CFG2, + PHY_AUX_CFG3, + PHY_AUX_CFG4, + PHY_AUX_CFG5, + PHY_AUX_CFG6, + PHY_AUX_CFG7, + PHY_AUX_CFG8, + PHY_AUX_CFG9, + PHY_AUX_CFG_MAX, +}; + +enum dp_catalog_audio_sdp_type { + DP_AUDIO_SDP_STREAM, + DP_AUDIO_SDP_TIMESTAMP, + DP_AUDIO_SDP_INFOFRAME, + DP_AUDIO_SDP_COPYMANAGEMENT, + DP_AUDIO_SDP_ISRC, + DP_AUDIO_SDP_MAX, +}; + +enum dp_catalog_audio_header_type { + DP_AUDIO_SDP_HEADER_1, + DP_AUDIO_SDP_HEADER_2, + DP_AUDIO_SDP_HEADER_3, + DP_AUDIO_SDP_HEADER_MAX, +}; + +struct dp_catalog { + u32 aux_data; + u32 total; + u32 sync_start; + u32 width_blanking; + u32 dp_active; + enum dp_catalog_audio_sdp_type sdp_type; + enum dp_catalog_audio_header_type sdp_header; + u32 audio_data; +}; + +/* AUX APIs */ +u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog); +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog); +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog); +int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); +int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); +void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); +void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog); +int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog); + +/* DP Controller APIs */ +void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state); +void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config); +void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); +void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, + u32 stream_rate_khz, bool fixed_nvid); +int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern); +void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); +bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, + u32 intr_mask, bool en); +void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); +u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); +int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level, + u8 p_level); +int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, + u32 dp_tu, u32 valid_boundary, + u32 valid_boundary2); +void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, + u32 pattern); +u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog); + +/* DP Panel APIs */ +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog); +void dp_catalog_dump_regs(struct dp_catalog *dp_catalog); +void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, + struct drm_display_mode *drm_mode); +void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog); + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io); + +/* DP Audio APIs */ +void dp_catalog_audio_get_header(struct dp_catalog *catalog); +void dp_catalog_audio_set_header(struct dp_catalog *catalog); +void dp_catalog_audio_config_acr(struct dp_catalog *catalog); +void dp_catalog_audio_enable(struct dp_catalog *catalog); +void dp_catalog_audio_enable(struct dp_catalog *catalog); +void dp_catalog_audio_config_sdp(struct dp_catalog *catalog); +void dp_catalog_audio_init(struct dp_catalog *catalog); +void dp_catalog_audio_sfe_level(struct dp_catalog *catalog); + +#endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c new file mode 100644 index 000000000000..2e3e1917351f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -0,0 +1,1869 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> +#include <drm/drm_fixed.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_print.h> + +#include "dp_reg.h" +#include "dp_ctrl.h" +#include "dp_link.h" + +#define DP_KHZ_TO_HZ 1000 +#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */ +#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2) + +#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0) +#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3) + +#define MR_LINK_TRAINING1 0x8 +#define MR_LINK_SYMBOL_ERM 0x80 +#define MR_LINK_PRBS7 0x100 +#define MR_LINK_CUSTOM80 0x200 +#define MR_LINK_TRAINING4 0x40 + +enum { + DP_TRAINING_NONE, + DP_TRAINING_1, + DP_TRAINING_2, +}; + +struct dp_tu_calc_input { + u64 lclk; /* 162, 270, 540 and 810 */ + u64 pclk_khz; /* in KHz */ + u64 hactive; /* active h-width */ + u64 hporch; /* bp + fp + pulse */ + int nlanes; /* no.of.lanes */ + int bpp; /* bits */ + int pixel_enc; /* 444, 420, 422 */ + int dsc_en; /* dsc on/off */ + int async_en; /* async mode */ + int fec_en; /* fec */ + int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */ + int num_of_dsc_slices; /* number of slices per line */ +}; + +struct dp_vc_tu_mapping_table { + u32 vic; + u8 lanes; + u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ + u8 bpp; + u8 valid_boundary_link; + u16 delay_start_link; + bool boundary_moderation_en; + u8 valid_lower_boundary_link; + u8 upper_boundary_count; + u8 lower_boundary_count; + u8 tu_size_minus1; +}; + +struct dp_ctrl_private { + struct dp_ctrl dp_ctrl; + struct device *dev; + struct drm_dp_aux *aux; + struct dp_panel *panel; + struct dp_link *link; + struct dp_power *power; + struct dp_parser *parser; + struct dp_catalog *catalog; + + struct completion idle_comp; + struct completion video_comp; +}; + +struct dp_cr_status { + u8 lane_0_1; + u8 lane_2_3; +}; + +#define DP_LANE0_1_CR_DONE 0x11 + +static int dp_aux_link_configure(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} + +void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + reinit_completion(&ctrl->idle_comp); + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); + + if (!wait_for_completion_timeout(&ctrl->idle_comp, + IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) + pr_warn("PUSH_IDLE pattern timedout\n"); + + pr_debug("mainlink off done\n"); +} + +static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) +{ + u32 config = 0, tbd; + u8 *dpcd = ctrl->panel->dpcd; + + /* Default-> LSCLK DIV: 1/4 LCLK */ + config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT); + + /* Scrambler reset enable */ + if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP) + config |= DP_CONFIGURATION_CTRL_ASSR; + + tbd = dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->dp_mode.bpp); + + if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) { + pr_debug("BIT_DEPTH not set. Configure default\n"); + tbd = DP_TEST_BIT_DEPTH_8; + } + + config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; + + /* Num of Lanes */ + config |= ((ctrl->link->link_params.num_lanes - 1) + << DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT); + + if (drm_dp_enhanced_frame_cap(dpcd)) + config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING; + + config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */ + + /* sync clock & static Mvid */ + config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN; + config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK; + + dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); +} + +static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl) +{ + u32 cc, tb; + + dp_catalog_ctrl_lane_mapping(ctrl->catalog); + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + + dp_ctrl_config_ctrl(ctrl); + + tb = dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->dp_mode.bpp); + cc = dp_link_get_colorimetry_config(ctrl->link); + dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); + dp_panel_timing_cfg(ctrl->panel); +} + +/* + * The structure and few functions present below are IP/Hardware + * specific implementation. Most of the implementation will not + * have coding comments + */ +struct tu_algo_data { + s64 lclk_fp; + s64 pclk_fp; + s64 lwidth; + s64 lwidth_fp; + s64 hbp_relative_to_pclk; + s64 hbp_relative_to_pclk_fp; + int nlanes; + int bpp; + int pixelEnc; + int dsc_en; + int async_en; + int bpc; + + uint delay_start_link_extra_pixclk; + int extra_buffer_margin; + s64 ratio_fp; + s64 original_ratio_fp; + + s64 err_fp; + s64 n_err_fp; + s64 n_n_err_fp; + int tu_size; + int tu_size_desired; + int tu_size_minus1; + + int valid_boundary_link; + s64 resulting_valid_fp; + s64 total_valid_fp; + s64 effective_valid_fp; + s64 effective_valid_recorded_fp; + int n_tus; + int n_tus_per_lane; + int paired_tus; + int remainder_tus; + int remainder_tus_upper; + int remainder_tus_lower; + int extra_bytes; + int filler_size; + int delay_start_link; + + int extra_pclk_cycles; + int extra_pclk_cycles_in_link_clk; + s64 ratio_by_tu_fp; + s64 average_valid2_fp; + int new_valid_boundary_link; + int remainder_symbols_exist; + int n_symbols; + s64 n_remainder_symbols_per_lane_fp; + s64 last_partial_tu_fp; + s64 TU_ratio_err_fp; + + int n_tus_incl_last_incomplete_tu; + int extra_pclk_cycles_tmp; + int extra_pclk_cycles_in_link_clk_tmp; + int extra_required_bytes_new_tmp; + int filler_size_tmp; + int lower_filler_size_tmp; + int delay_start_link_tmp; + + bool boundary_moderation_en; + int boundary_mod_lower_err; + int upper_boundary_count; + int lower_boundary_count; + int i_upper_boundary_count; + int i_lower_boundary_count; + int valid_lower_boundary_link; + int even_distribution_BF; + int even_distribution_legacy; + int even_distribution; + int min_hblank_violated; + s64 delay_start_time_fp; + s64 hbp_time_fp; + s64 hactive_time_fp; + s64 diff_abs_fp; + + s64 ratio; +}; + +static int _tu_param_compare(s64 a, s64 b) +{ + u32 a_sign; + u32 b_sign; + s64 a_temp, b_temp, minus_1; + + if (a == b) + return 0; + + minus_1 = drm_fixp_from_fraction(-1, 1); + + a_sign = (a >> 32) & 0x80000000 ? 1 : 0; + + b_sign = (b >> 32) & 0x80000000 ? 1 : 0; + + if (a_sign > b_sign) + return 2; + else if (b_sign > a_sign) + return 1; + + if (!a_sign && !b_sign) { /* positive */ + if (a > b) + return 1; + else + return 2; + } else { /* negative */ + a_temp = drm_fixp_mul(a, minus_1); + b_temp = drm_fixp_mul(b, minus_1); + + if (a_temp > b_temp) + return 2; + else + return 1; + } +} + +static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, + struct tu_algo_data *tu) +{ + int nlanes = in->nlanes; + int dsc_num_slices = in->num_of_dsc_slices; + int dsc_num_bytes = 0; + int numerator; + s64 pclk_dsc_fp; + s64 dwidth_dsc_fp; + s64 hbp_dsc_fp; + + int tot_num_eoc_symbols = 0; + int tot_num_hor_bytes = 0; + int tot_num_dummy_bytes = 0; + int dwidth_dsc_bytes = 0; + int eoc_bytes = 0; + + s64 temp1_fp, temp2_fp, temp3_fp; + + tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1); + tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000); + tu->lwidth = in->hactive; + tu->hbp_relative_to_pclk = in->hporch; + tu->nlanes = in->nlanes; + tu->bpp = in->bpp; + tu->pixelEnc = in->pixel_enc; + tu->dsc_en = in->dsc_en; + tu->async_en = in->async_en; + tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1); + tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1); + + if (tu->pixelEnc == 420) { + temp1_fp = drm_fixp_from_fraction(2, 1); + tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp); + tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp); + tu->hbp_relative_to_pclk_fp = + drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2); + } + + if (tu->pixelEnc == 422) { + switch (tu->bpp) { + case 24: + tu->bpp = 16; + tu->bpc = 8; + break; + case 30: + tu->bpp = 20; + tu->bpc = 10; + break; + default: + tu->bpp = 16; + tu->bpc = 8; + break; + } + } else { + tu->bpc = tu->bpp/3; + } + + if (!in->dsc_en) + goto fec_check; + + temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100); + temp2_fp = drm_fixp_from_fraction(in->bpp, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp); + + temp1_fp = drm_fixp_from_fraction(8, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + + numerator = drm_fixp2int(temp3_fp); + + dsc_num_bytes = numerator / dsc_num_slices; + eoc_bytes = dsc_num_bytes % nlanes; + tot_num_eoc_symbols = nlanes * dsc_num_slices; + tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices; + tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices; + + if (dsc_num_bytes == 0) + pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes); + + dwidth_dsc_bytes = (tot_num_hor_bytes + + tot_num_eoc_symbols + + (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes)); + + dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3); + + temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp); + temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp); + pclk_dsc_fp = temp1_fp; + + temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp); + temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp); + hbp_dsc_fp = temp2_fp; + + /* output */ + tu->pclk_fp = pclk_dsc_fp; + tu->lwidth_fp = dwidth_dsc_fp; + tu->hbp_relative_to_pclk_fp = hbp_dsc_fp; + +fec_check: + if (in->fec_en) { + temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */ + tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp); + } +} + +static void _tu_valid_boundary_calc(struct tu_algo_data *tu) +{ + s64 temp1_fp, temp2_fp, temp, temp1, temp2; + int compare_result_1, compare_result_2, compare_result_3; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + + tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp = (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link-1)); + tu->average_valid2_fp = drm_fixp_from_fraction(temp, + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + tu->n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu->n_tus += 1; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp); + temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu->n_remainder_symbols_per_lane_fp = temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + tu->last_partial_tu_fp = + drm_fixp_div(tu->n_remainder_symbols_per_lane_fp, + temp1_fp); + + if (tu->n_remainder_symbols_per_lane_fp != 0) + tu->remainder_symbols_exist = 1; + else + tu->remainder_symbols_exist = 0; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes); + tu->n_tus_per_lane = drm_fixp2int(temp1_fp); + + tu->paired_tus = (int)((tu->n_tus_per_lane) / + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus * + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count); + + if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) { + tu->remainder_tus_upper = tu->i_upper_boundary_count; + tu->remainder_tus_lower = tu->remainder_tus - + tu->i_upper_boundary_count; + } else { + tu->remainder_tus_upper = tu->remainder_tus; + tu->remainder_tus_lower = 0; + } + + temp = tu->paired_tus * (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link - 1)) + + (tu->remainder_tus_upper * + tu->new_valid_boundary_link) + + (tu->remainder_tus_lower * + (tu->new_valid_boundary_link - 1)); + tu->total_valid_fp = drm_fixp_from_fraction(temp, 1); + + if (tu->remainder_symbols_exist) { + temp1_fp = tu->total_valid_fp + + tu->n_remainder_symbols_per_lane_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp2_fp = temp2_fp + tu->last_partial_tu_fp; + temp1_fp = drm_fixp_div(temp1_fp, temp2_fp); + } else { + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp); + } + tu->effective_valid_fp = temp1_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_err_fp = tu->average_valid2_fp - temp2_fp; + + tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + + if (temp2_fp) + tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp); + else + tu->n_tus_incl_last_incomplete_tu = 0; + + temp1 = 0; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = tu->average_valid2_fp - temp2_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + temp1 = drm_fixp2int_ceil(temp1_fp); + + temp = tu->i_upper_boundary_count * tu->nlanes; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp2_fp) + temp2 = drm_fixp2int_ceil(temp2_fp); + else + temp2 = 0; + tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2); + + temp1_fp = drm_fixp_from_fraction(8, tu->bpp); + temp2_fp = drm_fixp_from_fraction( + tu->extra_required_bytes_new_tmp, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_tmp = 0; + + temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1); + temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_in_link_clk_tmp = + drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_in_link_clk_tmp = 0; + + tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link; + + tu->lower_filler_size_tmp = tu->filler_size_tmp + 1; + + tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp + + tu->lower_filler_size_tmp + + tu->extra_buffer_margin; + + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); + + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp); + if (compare_result_1 == 2) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp); + if (compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + compare_result_3 = _tu_param_compare(tu->hbp_time_fp, + tu->delay_start_time_fp); + if (compare_result_3 == 2) + compare_result_3 = 0; + else + compare_result_3 = 1; + + if (((tu->even_distribution == 1) || + ((tu->even_distribution_BF == 0) && + (tu->even_distribution_legacy == 0))) && + tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 && + compare_result_2 && + (compare_result_1 || (tu->min_hblank_violated == 1)) && + (tu->new_valid_boundary_link - 1) > 0 && + compare_result_3 && + (tu->delay_start_link_tmp <= 1023)) { + tu->upper_boundary_count = tu->i_upper_boundary_count; + tu->lower_boundary_count = tu->i_lower_boundary_count; + tu->err_fp = tu->n_n_err_fp; + tu->boundary_moderation_en = true; + tu->tu_size_desired = tu->tu_size; + tu->valid_boundary_link = tu->new_valid_boundary_link; + tu->effective_valid_recorded_fp = tu->effective_valid_fp; + tu->even_distribution_BF = 1; + tu->delay_start_link = tu->delay_start_link_tmp; + } else if (tu->boundary_mod_lower_err == 0) { + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, + tu->diff_abs_fp); + if (compare_result_1 == 2) + tu->boundary_mod_lower_err = 1; + } +} + +static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct tu_algo_data tu; + int compare_result_1, compare_result_2; + u64 temp = 0; + s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0; + + s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */ + s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */ + s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */ + s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000); + + u8 DP_BRUTE_FORCE = 1; + s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */ + uint EXTRA_PIXCLK_CYCLE_DELAY = 4; + uint HBLANK_MARGIN = 4; + + memset(&tu, 0, sizeof(tu)); + + dp_panel_update_tu_timings(in, &tu); + + tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ + + temp1_fp = drm_fixp_from_fraction(4, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp); + temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp); + tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp); + + tu.original_ratio_fp = tu.ratio_fp; + tu.boundary_moderation_en = false; + tu.upper_boundary_count = 0; + tu.lower_boundary_count = 0; + tu.i_upper_boundary_count = 0; + tu.i_lower_boundary_count = 0; + tu.valid_lower_boundary_link = 0; + tu.even_distribution_BF = 0; + tu.even_distribution_legacy = 0; + tu.even_distribution = 0; + tu.delay_start_time_fp = 0; + + tu.err_fp = drm_fixp_from_fraction(1000, 1); + tu.n_err_fp = 0; + tu.n_n_err_fp = 0; + + tu.ratio = drm_fixp2int(tu.ratio_fp); + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + div64_u64_rem(tu.lwidth_fp, temp1_fp, &temp2_fp); + if (temp2_fp != 0 && + !tu.ratio && tu.dsc_en == 0) { + tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp); + tu.ratio = drm_fixp2int(tu.ratio_fp); + if (tu.ratio) + tu.ratio_fp = drm_fixp_from_fraction(1, 1); + } + + if (tu.ratio > 1) + tu.ratio = 1; + + if (tu.ratio == 1) + goto tu_size_calc; + + compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp); + if (!compare_result_1 || compare_result_1 == 1) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp); + if (!compare_result_2 || compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + if (tu.dsc_en && compare_result_1 && compare_result_2) { + HBLANK_MARGIN += 4; + DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n", + HBLANK_MARGIN); + } + +tu_size_calc: + for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { + temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + temp = drm_fixp2int_ceil(temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + tu.n_err_fp = temp1_fp - temp2_fp; + + if (tu.n_err_fp < tu.err_fp) { + tu.err_fp = tu.n_err_fp; + tu.tu_size_desired = tu.tu_size; + } + } + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = tu.lwidth_fp; + temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu.n_tus += 1; + + tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0; + DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n", + tu.valid_boundary_link, tu.n_tus); + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu.n_tus + 1, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + temp = drm_fixp2int(temp2_fp); + if (temp && temp2_fp) + tu.extra_bytes = drm_fixp2int_ceil(temp2_fp); + else + tu.extra_bytes = 0; + + temp1_fp = drm_fixp_from_fraction(tu.extra_bytes, 1); + temp2_fp = drm_fixp_from_fraction(8, tu.bpp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp && temp1_fp) + tu.extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp); + else + tu.extra_pclk_cycles = drm_fixp2int(temp1_fp); + + temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp); + temp2_fp = drm_fixp_from_fraction(tu.extra_pclk_cycles, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu.extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp); + else + tu.extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp); + + tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + + tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk + + tu.filler_size + tu.extra_buffer_margin; + + tu.resulting_valid_fp = + drm_fixp_from_fraction(tu.valid_boundary_link, 1); + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + + temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1); + temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp; + tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp); + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + compare_result_1 = _tu_param_compare(tu.hbp_time_fp, + tu.delay_start_time_fp); + if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */ + tu.min_hblank_violated = 1; + + tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp); + + compare_result_2 = _tu_param_compare(tu.hactive_time_fp, + tu.delay_start_time_fp); + if (compare_result_2 == 2) + tu.min_hblank_violated = 1; + + tu.delay_start_time_fp = 0; + + /* brute force */ + + tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY; + tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp; + + temp = drm_fixp2int(tu.diff_abs_fp); + if (!temp && tu.diff_abs_fp <= 0xffff) + tu.diff_abs_fp = 0; + + /* if(diff_abs < 0) diff_abs *= -1 */ + if (tu.diff_abs_fp < 0) + tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1); + + tu.boundary_mod_lower_err = 0; + if ((tu.diff_abs_fp != 0 && + ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) || + (tu.even_distribution_legacy == 0) || + (DP_BRUTE_FORCE == 1))) || + (tu.min_hblank_violated == 1)) { + do { + tu.err_fp = drm_fixp_from_fraction(1000, 1); + + temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp); + temp2_fp = drm_fixp_from_fraction( + tu.delay_start_link_extra_pixclk, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu.extra_buffer_margin = + drm_fixp2int_ceil(temp1_fp); + else + tu.extra_buffer_margin = 0; + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + + if (temp1_fp) + tu.n_symbols = drm_fixp2int_ceil(temp1_fp); + else + tu.n_symbols = 0; + + for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { + for (tu.i_upper_boundary_count = 1; + tu.i_upper_boundary_count <= 15; + tu.i_upper_boundary_count++) { + for (tu.i_lower_boundary_count = 1; + tu.i_lower_boundary_count <= 15; + tu.i_lower_boundary_count++) { + _tu_valid_boundary_calc(&tu); + } + } + } + tu.delay_start_link_extra_pixclk--; + } while (tu.boundary_moderation_en != true && + tu.boundary_mod_lower_err == 1 && + tu.delay_start_link_extra_pixclk != 0); + + if (tu.boundary_moderation_en == true) { + temp1_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count * + tu.valid_boundary_link + + tu.lower_boundary_count * + (tu.valid_boundary_link - 1)), 1); + temp2_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count + + tu.lower_boundary_count), 1); + tu.resulting_valid_fp = + drm_fixp_div(temp1_fp, temp2_fp); + + temp1_fp = drm_fixp_from_fraction( + tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = + drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + + tu.valid_lower_boundary_link = + tu.valid_boundary_link - 1; + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, + tu.resulting_valid_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + tu.even_distribution_BF = 1; + + temp1_fp = + drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = + drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + } + } + + temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp); + + if (temp2_fp) + temp = drm_fixp2int_ceil(temp2_fp); + else + temp = 0; + + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = drm_fixp_div(temp1_fp, temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp = drm_fixp2int(temp2_fp); + + if (tu.async_en) + tu.delay_start_link += (int)temp; + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + /* OUTPUTS */ + tu_table->valid_boundary_link = tu.valid_boundary_link; + tu_table->delay_start_link = tu.delay_start_link; + tu_table->boundary_moderation_en = tu.boundary_moderation_en; + tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link; + tu_table->upper_boundary_count = tu.upper_boundary_count; + tu_table->lower_boundary_count = tu.lower_boundary_count; + tu_table->tu_size_minus1 = tu.tu_size_minus1; + + DRM_DEBUG_DP("TU: valid_boundary_link: %d\n", + tu_table->valid_boundary_link); + DRM_DEBUG_DP("TU: delay_start_link: %d\n", + tu_table->delay_start_link); + DRM_DEBUG_DP("TU: boundary_moderation_en: %d\n", + tu_table->boundary_moderation_en); + DRM_DEBUG_DP("TU: valid_lower_boundary_link: %d\n", + tu_table->valid_lower_boundary_link); + DRM_DEBUG_DP("TU: upper_boundary_count: %d\n", + tu_table->upper_boundary_count); + DRM_DEBUG_DP("TU: lower_boundary_count: %d\n", + tu_table->lower_boundary_count); + DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1); +} + +static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct dp_tu_calc_input in; + struct drm_display_mode *drm_mode; + + drm_mode = &ctrl->panel->dp_mode.drm_mode; + + in.lclk = ctrl->link->link_params.rate / 1000; + in.pclk_khz = drm_mode->clock; + in.hactive = drm_mode->hdisplay; + in.hporch = drm_mode->htotal - drm_mode->hdisplay; + in.nlanes = ctrl->link->link_params.num_lanes; + in.bpp = ctrl->panel->dp_mode.bpp; + in.pixel_enc = 444; + in.dsc_en = 0; + in.async_en = 0; + in.fec_en = 0; + in.num_of_dsc_slices = 0; + in.compress_ratio = 100; + + _dp_ctrl_calc_tu(&in, tu_table); +} + +static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) +{ + u32 dp_tu = 0x0; + u32 valid_boundary = 0x0; + u32 valid_boundary2 = 0x0; + struct dp_vc_tu_mapping_table tu_calc_table; + + dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); + + dp_tu |= tu_calc_table.tu_size_minus1; + valid_boundary |= tu_calc_table.valid_boundary_link; + valid_boundary |= (tu_calc_table.delay_start_link << 16); + + valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1); + valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16); + valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20); + + if (tu_calc_table.boundary_moderation_en) + valid_boundary2 |= BIT(0); + + pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", + dp_tu, valid_boundary, valid_boundary2); + + dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, + dp_tu, valid_boundary, valid_boundary2); +} + +static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (!wait_for_completion_timeout(&ctrl->video_comp, + WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) { + DRM_ERROR("wait4video timedout\n"); + ret = -ETIMEDOUT; + } + return ret; +} + +static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) +{ + struct dp_link *link = ctrl->link; + int ret = 0, lane, lane_cnt; + u8 buf[4]; + u32 max_level_reached = 0; + u32 voltage_swing_level = link->phy_params.v_level; + u32 pre_emphasis_level = link->phy_params.p_level; + + ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog, + voltage_swing_level, pre_emphasis_level); + + if (ret) + return ret; + + if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) { + DRM_DEBUG_DP("max. voltage swing level reached %d\n", + voltage_swing_level); + max_level_reached |= DP_TRAIN_MAX_SWING_REACHED; + } + + if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) { + DRM_DEBUG_DP("max. pre-emphasis level reached %d\n", + pre_emphasis_level); + max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } + + pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + lane_cnt = ctrl->link->link_params.num_lanes; + for (lane = 0; lane < lane_cnt; lane++) + buf[lane] = voltage_swing_level | pre_emphasis_level + | max_level_reached; + + DRM_DEBUG_DP("sink: p|v=0x%x\n", voltage_swing_level + | pre_emphasis_level); + ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET, + buf, lane_cnt); + if (ret == lane_cnt) + ret = 0; + + return ret; +} + +static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, + u8 pattern) +{ + u8 buf; + int ret = 0; + + DRM_DEBUG_DP("sink: pattern=%x\n", pattern); + + buf = pattern; + + if (pattern && pattern != DP_TRAINING_PATTERN_4) + buf |= DP_LINK_SCRAMBLING_DISABLE; + + ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf); + return ret == 1; +} + +static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, + u8 *link_status) +{ + int len = 0; + u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS; + u32 link_status_read_max_retries = 100; + + while (--link_status_read_max_retries) { + len = drm_dp_dpcd_read_link_status(ctrl->aux, + link_status); + if (len != DP_LINK_STATUS_SIZE) { + DRM_ERROR("DP link status read failed, err: %d\n", len); + return len; + } + + if (!(link_status[offset] & DP_LINK_STATUS_UPDATED)) + return 0; + } + + return -ETIMEDOUT; +} + +static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int tries, old_v_level, ret = 0; + u8 link_status[DP_LINK_STATUS_SIZE]; + int const maximum_retries = 4; + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + *training_step = DP_TRAINING_1; + + ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, DP_TRAINING_PATTERN_1); + if (ret) + return ret; + dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE); + + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + for (tries = 0; tries < maximum_retries; tries++) { + drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + return ret; + + cr->lane_0_1 = link_status[0]; + cr->lane_2_3 = link_status[1]; + + if (drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.num_lanes)) { + return 0; + } + + if (ctrl->link->phy_params.v_level >= + DP_TRAIN_VOLTAGE_SWING_MAX) { + DRM_ERROR_RATELIMITED("max v_level reached\n"); + return -EAGAIN; + } + + if (old_v_level != ctrl->link->phy_params.v_level) { + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + } + + DRM_DEBUG_DP("clock recovery not done, adjusting vx px\n"); + + dp_link_adjust_levels(ctrl->link, link_status); + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + } + + DRM_ERROR("max tries reached\n"); + return -ETIMEDOUT; +} + +static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + switch (ctrl->link->link_params.rate) { + case 810000: + ctrl->link->link_params.rate = 540000; + break; + case 540000: + ctrl->link->link_params.rate = 270000; + break; + case 270000: + ctrl->link->link_params.rate = 162000; + break; + case 162000: + default: + ret = -EINVAL; + break; + }; + + if (!ret) + DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate); + + return ret; +} + +static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) +{ + + if (ctrl->link->link_params.num_lanes == 1) + return -1; + + ctrl->link->link_params.num_lanes /= 2; + ctrl->link->link_params.rate = ctrl->panel->link_info.rate; + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + return 0; +} + +static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +{ + dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); +} + +static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int tries = 0, ret = 0; + char pattern; + int const maximum_retries = 5; + u8 link_status[DP_LINK_STATUS_SIZE]; + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + *training_step = DP_TRAINING_2; + + if (drm_dp_tps3_supported(ctrl->panel->dpcd)) + pattern = DP_TRAINING_PATTERN_3; + else + pattern = DP_TRAINING_PATTERN_2; + + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern); + if (ret) + return ret; + + dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN); + + for (tries = 0; tries <= maximum_retries; tries++) { + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + return ret; + cr->lane_0_1 = link_status[0]; + cr->lane_2_3 = link_status[1]; + + if (drm_dp_channel_eq_ok(link_status, + ctrl->link->link_params.num_lanes)) { + return 0; + } + + dp_link_adjust_levels(ctrl->link, link_status); + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + } + + return -ETIMEDOUT; +} + +static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl); + +static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int ret = 0; + u8 encoding = DP_SET_ANSI_8B10B; + struct dp_link_info link_info = {0}; + + dp_ctrl_config_ctrl(ctrl); + + link_info.num_lanes = ctrl->link->link_params.num_lanes; + link_info.rate = ctrl->link->link_params.rate; + link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; + + dp_aux_link_configure(ctrl->aux, &link_info); + drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, + &encoding, 1); + + ret = dp_ctrl_link_train_1(ctrl, cr, training_step); + if (ret) { + DRM_ERROR("link training #1 failed. ret=%d\n", ret); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DRM_DEBUG_DP("link training #1 successful\n"); + + ret = dp_ctrl_link_train_2(ctrl, cr, training_step); + if (ret) { + DRM_ERROR("link training #2 failed. ret=%d\n", ret); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DRM_DEBUG_DP("link training #2 successful\n"); + +end: + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + return ret; +} + +static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, + struct dp_cr_status *cr, int *training_step) +{ + int ret = 0; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + return ret; + + /* + * As part of previous calls, DP controller state might have + * transitioned to PUSH_IDLE. In order to start transmitting + * a link training pattern, we have to first do soft reset. + */ + dp_catalog_ctrl_reset(ctrl->catalog); + + ret = dp_ctrl_link_train(ctrl, cr, training_step); + + return ret; +} + +static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl, + enum dp_pm_type module, char *name, unsigned long rate) +{ + u32 num = ctrl->parser->mp[module].num_clk; + struct dss_clk *cfg = ctrl->parser->mp[module].clk_config; + + while (num && strcmp(cfg->clk_name, name)) { + num--; + cfg++; + } + + DRM_DEBUG_DP("setting rate=%lu on clk=%s\n", rate, name); + + if (num) + cfg->rate = rate; + else + DRM_ERROR("%s clock doesn't exit to set rate %lu\n", + name, rate); +} + +static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_io *dp_io = &ctrl->parser->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + opts_dp->lanes = ctrl->link->link_params.num_lanes; + opts_dp->link_rate = ctrl->link->link_params.rate / 100; + dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link", + ctrl->link->link_params.rate * 1000); + + phy_configure(phy, &dp_io->phy_opts); + phy_power_on(phy); + + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true); + if (ret) + DRM_ERROR("Unable to start link clocks. ret=%d\n", ret); + + DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n", + ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate); + + return ret; +} + +static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", + ctrl->dp_ctrl.pixel_rate * 1000); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true); + if (ret) + DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret); + + DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n", + ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate); + + return ret; +} + +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + + if (!dp_ctrl) { + DRM_ERROR("Invalid input data\n"); + return -EINVAL; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + ctrl->dp_ctrl.orientation = flip; + + dp_catalog_ctrl_phy_reset(ctrl->catalog); + phy_init(phy); + dp_catalog_ctrl_enable_irq(ctrl->catalog, true); + + return 0; +} + +/** + * dp_ctrl_host_deinit() - Uninitialize DP controller + * @dp_ctrl: Display Port Driver data + * + * Perform required steps to uninitialize DP controller + * and its resources. + */ +void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DRM_ERROR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + dp_catalog_ctrl_enable_irq(ctrl->catalog, false); + + DRM_DEBUG_DP("Host deinitialized successfully\n"); +} + +static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl) +{ + u8 *dpcd = ctrl->panel->dpcd; + u32 edid_quirks = 0; + + edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid); + /* + * For better interop experience, used a fixed NVID=0x8000 + * whenever connected to a VGA dongle downstream. + */ + if (drm_dp_is_branch(dpcd)) + return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks, + DP_DPCD_QUIRK_CONSTANT_N)); + + return false; +} + +static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_io *dp_io = &ctrl->parser->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + opts_dp->lanes = ctrl->link->link_params.num_lanes; + phy_configure(phy, &dp_io->phy_opts); + /* + * Disable and re-enable the mainlink clock since the + * link clock might have been adjusted as part of the + * link maintenance. + */ + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable clocks. ret=%d\n", ret); + return ret; + } + phy_power_off(phy); + /* hw recommended delay before re-enabling clocks */ + msleep(20); + + ret = dp_ctrl_enable_mainlink_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret); + return ret; + } + + return ret; +} + +static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_cr_status cr; + int training_step = DP_TRAINING_NONE; + + dp_ctrl_push_idle(&ctrl->dp_ctrl); + dp_catalog_ctrl_reset(ctrl->catalog); + + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + + ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); + if (ret) + goto end; + + dp_ctrl_clear_training_pattern(ctrl); + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + + ret = dp_ctrl_wait4video_ready(ctrl); +end: + return ret; +} + +static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (!ctrl->link->phy_params.phy_test_pattern_sel) { + DRM_DEBUG_DP("no test pattern selected by sink\n"); + return ret; + } + + /* + * The global reset will need DP link related clocks to be + * running. Add the global reset just before disabling the + * link clocks and core clocks. + */ + ret = dp_ctrl_off(&ctrl->dp_ctrl); + if (ret) { + DRM_ERROR("failed to disable DP controller\n"); + return ret; + } + + ret = dp_ctrl_on_link(&ctrl->dp_ctrl); + if (!ret) + ret = dp_ctrl_on_stream(&ctrl->dp_ctrl); + else + DRM_ERROR("failed to enable DP link controller\n"); + + return ret; +} + +static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +{ + bool success = false; + u32 pattern_sent = 0x0; + u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel; + + DRM_DEBUG_DP("request: 0x%x\n", pattern_requested); + + if (dp_catalog_ctrl_update_vx_px(ctrl->catalog, + ctrl->link->phy_params.v_level, + ctrl->link->phy_params.p_level)) { + DRM_ERROR("Failed to set v/p levels\n"); + return false; + } + dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); + dp_ctrl_update_vx_px(ctrl); + dp_link_send_test_response(ctrl->link); + + pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); + + switch (pattern_sent) { + case MR_LINK_TRAINING1: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_D10_2); + break; + case MR_LINK_SYMBOL_ERM: + success = ((pattern_requested == + DP_PHY_TEST_PATTERN_ERROR_COUNT) || + (pattern_requested == + DP_PHY_TEST_PATTERN_CP2520)); + break; + case MR_LINK_PRBS7: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_PRBS7); + break; + case MR_LINK_CUSTOM80: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_80BIT_CUSTOM); + break; + case MR_LINK_TRAINING4: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_SEL_MASK); + break; + default: + success = false; + } + + DRM_DEBUG_DP("%s: test->0x%x\n", success ? "success" : "failed", + pattern_requested); + return success; +} + +void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + u32 sink_request = 0x0; + + if (!dp_ctrl) { + DRM_ERROR("invalid input\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + sink_request = ctrl->link->sink_request; + + if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + DRM_DEBUG_DP("PHY_TEST_PATTERN request\n"); + if (dp_ctrl_process_phy_test_request(ctrl)) { + DRM_ERROR("process phy_test_req failed\n"); + return; + } + } + + if (sink_request & DP_LINK_STATUS_UPDATED) { + if (dp_ctrl_link_maintenance(ctrl)) { + DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); + return; + } + } + + if (sink_request & DP_TEST_LINK_TRAINING) { + dp_link_send_test_response(ctrl->link); + if (dp_ctrl_link_maintenance(ctrl)) { + DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); + return; + } + } +} + +int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) +{ + int rc = 0; + struct dp_ctrl_private *ctrl; + u32 rate = 0; + int link_train_max_retries = 5; + u32 const phy_cts_pixel_clk_khz = 148500; + struct dp_cr_status cr; + unsigned int training_step; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + rate = ctrl->panel->link_info.rate; + + dp_power_clk_enable(ctrl->power, DP_CORE_PM, true); + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + DRM_DEBUG_DP("using phy test link parameters\n"); + if (!ctrl->panel->dp_mode.drm_mode.clock) + ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz; + } else { + ctrl->link->link_params.rate = rate; + ctrl->link->link_params.num_lanes = + ctrl->panel->link_info.num_lanes; + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + } + + DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n", + ctrl->link->link_params.rate, + ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate); + + rc = dp_ctrl_enable_mainlink_clocks(ctrl); + if (rc) + return rc; + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + while (--link_train_max_retries && + !atomic_read(&ctrl->dp_ctrl.aborted)) { + rc = dp_ctrl_reinitialize_mainlink(ctrl); + if (rc) { + DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", + rc); + break; + } + + training_step = DP_TRAINING_NONE; + rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step); + if (rc == 0) { + /* training completed successfully */ + break; + } else if (training_step == DP_TRAINING_1) { + /* link train_1 failed */ + rc = dp_ctrl_link_rate_down_shift(ctrl); + if (rc < 0) { /* already in RBR = 1.6G */ + if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) { + /* + * some lanes are ready, + * reduce lane number + */ + rc = dp_ctrl_link_lane_down_shift(ctrl); + if (rc < 0) { /* lane == 1 already */ + /* end with failure */ + break; + } + } else { + /* end with failure */ + break; /* lane == 1 already */ + } + } + } else if (training_step == DP_TRAINING_2) { + /* link train_2 failed, lower lane rate */ + rc = dp_ctrl_link_lane_down_shift(ctrl); + if (rc < 0) { + /* end with failure */ + break; /* lane == 1 already */ + } + } + } + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + return rc; + + /* stop txing train pattern */ + dp_ctrl_clear_training_pattern(ctrl); + + /* + * keep transmitting idle pattern until video ready + * to avoid main link from loss of sync + */ + if (rc == 0) /* link train successfully */ + dp_ctrl_push_idle(dp_ctrl); + + return rc; +} + +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) +{ + u32 rate = 0; + int ret = 0; + bool mainlink_ready = false; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + rate = ctrl->panel->link_info.rate; + + ctrl->link->link_params.rate = rate; + ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + + DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n", + ctrl->link->link_params.rate, + ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate); + + if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */ + ret = dp_ctrl_enable_mainlink_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to start link clocks. ret=%d\n", ret); + goto end; + } + } + + ret = dp_ctrl_enable_stream_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); + goto end; + } + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + dp_ctrl_send_phy_test_pattern(ctrl); + return 0; + } + + /* + * Set up transfer unit values and set controller state to send + * video. + */ + dp_ctrl_configure_source_params(ctrl); + + dp_catalog_ctrl_config_msa(ctrl->catalog, + ctrl->link->link_params.rate, + ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl)); + + reinit_completion(&ctrl->video_comp); + + dp_ctrl_setup_tr_unit(ctrl); + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + + ret = dp_ctrl_wait4video_ready(ctrl); + if (ret) + return ret; + + mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog); + DRM_DEBUG_DP("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY"); + +end: + return ret; +} + +int dp_ctrl_off(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + int ret = 0; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + dp_catalog_ctrl_reset(ctrl->catalog); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false); + if (ret) + DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret); + + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + } + + phy_power_off(phy); + phy_exit(phy); + + DRM_DEBUG_DP("DP off done\n"); + return ret; +} + +void dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + u32 isr; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog); + + if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) { + DRM_DEBUG_DP("dp_video_ready\n"); + complete(&ctrl->video_comp); + } + + if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) { + DRM_DEBUG_DP("idle_patterns_sent\n"); + complete(&ctrl->idle_comp); + } +} + +struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, + struct dp_panel *panel, struct drm_dp_aux *aux, + struct dp_power *power, struct dp_catalog *catalog, + struct dp_parser *parser) +{ + struct dp_ctrl_private *ctrl; + + if (!dev || !panel || !aux || + !link || !catalog) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + DRM_ERROR("Mem allocation failure\n"); + return ERR_PTR(-ENOMEM); + } + + init_completion(&ctrl->idle_comp); + init_completion(&ctrl->video_comp); + + /* in parameters */ + ctrl->parser = parser; + ctrl->panel = panel; + ctrl->power = power; + ctrl->aux = aux; + ctrl->link = link; + ctrl->catalog = catalog; + ctrl->dev = dev; + + return &ctrl->dp_ctrl; +} + +void dp_ctrl_put(struct dp_ctrl *dp_ctrl) +{ +} diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h new file mode 100644 index 000000000000..f60ba93c8678 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CTRL_H_ +#define _DP_CTRL_H_ + +#include "dp_aux.h" +#include "dp_panel.h" +#include "dp_link.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" + +struct dp_ctrl { + bool orientation; + atomic_t aborted; + u32 pixel_rate; +}; + +int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip); +void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl); +int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); +int dp_ctrl_off(struct dp_ctrl *dp_ctrl); +void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); +void dp_ctrl_isr(struct dp_ctrl *dp_ctrl); +void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl); +struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, + struct dp_panel *panel, struct drm_dp_aux *aux, + struct dp_power *power, struct dp_catalog *catalog, + struct dp_parser *parser); +void dp_ctrl_put(struct dp_ctrl *dp_ctrl); + +#endif /* _DP_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c new file mode 100644 index 000000000000..84670bcdcfea --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__ + +#include <linux/debugfs.h> +#include <drm/drm_connector.h> +#include <drm/drm_file.h> + +#include "dp_parser.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_ctrl.h" +#include "dp_debug.h" +#include "dp_display.h" + +#define DEBUG_NAME "msm_dp" + +struct dp_debug_private { + struct dentry *root; + + struct dp_usbpd *usbpd; + struct dp_link *link; + struct dp_panel *panel; + struct drm_connector **connector; + struct device *dev; + struct drm_device *drm_dev; + + struct dp_debug dp_debug; +}; + +static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len) +{ + if (rc >= *max_size) { + DRM_ERROR("buffer overflow\n"); + return -EINVAL; + } + *len += rc; + *max_size = SZ_4K - *len; + + return 0; +} + +static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, + size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, rc = 0; + u64 lclk = 0; + u32 max_size = SZ_4K; + u32 link_params_rate; + struct drm_display_mode *drm_mode; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + drm_mode = &debug->panel->dp_mode.drm_mode; + + rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tdp_panel\n\t\tmax_pclk_khz = %d\n", + debug->panel->max_pclk_khz); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tdrm_dp_link\n\t\trate = %u\n", + debug->panel->link_info.rate); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tnum_lanes = %u\n", + debug->panel->link_info.num_lanes); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tcapabilities = %lu\n", + debug->panel->link_info.capabilities); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tdp_panel_info:\n\t\tactive = %dx%d\n", + drm_mode->hdisplay, + drm_mode->vdisplay); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tback_porch = %dx%d\n", + drm_mode->htotal - drm_mode->hsync_end, + drm_mode->vtotal - drm_mode->vsync_end); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tfront_porch = %dx%d\n", + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->vsync_start - drm_mode->vdisplay); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tsync_width = %dx%d\n", + drm_mode->hsync_end - drm_mode->hsync_start, + drm_mode->vsync_end - drm_mode->vsync_start); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tactive_low = %dx%d\n", + debug->panel->dp_mode.h_active_low, + debug->panel->dp_mode.v_active_low); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\th_skew = %d\n", + drm_mode->hskew); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\trefresh rate = %d\n", + drm_mode_vrefresh(drm_mode)); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tpixel clock khz = %d\n", + drm_mode->clock); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tbpp = %d\n", + debug->panel->dp_mode.bpp); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + /* Link Information */ + rc = snprintf(buf + len, max_size, + "\tdp_link:\n\t\ttest_requested = %d\n", + debug->link->sink_request); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tnum_lanes = %d\n", + debug->link->link_params.num_lanes); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + link_params_rate = debug->link->link_params.rate; + rc = snprintf(buf + len, max_size, + "\t\tbw_code = %d\n", + drm_dp_link_rate_to_bw_code(link_params_rate)); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + lclk = debug->link->link_params.rate * 1000; + rc = snprintf(buf + len, max_size, + "\t\tlclk = %lld\n", lclk); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tv_level = %d\n", + debug->link->phy_params.v_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\t\tp_level = %d\n", + debug->link->phy_params.p_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + if (copy_to_user(user_buff, buf, len)) + goto error; + + *ppos += len; + + kfree(buf); + return len; + error: + kfree(buf); + return -EINVAL; +} + +static int dp_test_data_show(struct seq_file *m, void *data) +{ + struct drm_device *dev; + struct dp_debug_private *debug; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + u32 bpc; + + debug = m->private; + dev = debug->drm_dev; + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) { + bpc = debug->link->test_video.test_bit_depth; + seq_printf(m, "hdisplay: %d\n", + debug->link->test_video.test_h_width); + seq_printf(m, "vdisplay: %d\n", + debug->link->test_video.test_v_height); + seq_printf(m, "bpc: %u\n", + dp_link_bit_depth_to_bpc(bpc)); + } else + seq_puts(m, "0"); + } + + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dp_test_data); + +static int dp_test_type_show(struct seq_file *m, void *data) +{ + struct dp_debug_private *debug = m->private; + struct drm_device *dev = debug->drm_dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) + seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN); + else + seq_puts(m, "0"); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dp_test_type); + +static ssize_t dp_test_active_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + char *input_buffer; + int status = 0; + struct dp_debug_private *debug; + struct drm_device *dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + int val = 0; + + debug = ((struct seq_file *)file->private_data)->private; + dev = debug->drm_dev; + + if (len == 0) + return 0; + + input_buffer = memdup_user_nul(ubuf, len); + if (IS_ERR(input_buffer)) + return PTR_ERR(input_buffer); + + DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) { + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) + break; + DRM_DEBUG_DRIVER("Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + debug->panel->video_test = true; + else + debug->panel->video_test = false; + } + } + drm_connector_list_iter_end(&conn_iter); + kfree(input_buffer); + if (status < 0) + return status; + + *offp += len; + return len; +} + +static int dp_test_active_show(struct seq_file *m, void *data) +{ + struct dp_debug_private *debug = m->private; + struct drm_device *dev = debug->drm_dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + if (connector->status == connector_status_connected) { + if (debug->panel->video_test) + seq_puts(m, "1"); + else + seq_puts(m, "0"); + } else + seq_puts(m, "0"); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + +static int dp_test_active_open(struct inode *inode, + struct file *file) +{ + return single_open(file, dp_test_active_show, + inode->i_private); +} + +static const struct file_operations dp_debug_fops = { + .open = simple_open, + .read = dp_debug_read_info, +}; + +static const struct file_operations test_active_fops = { + .owner = THIS_MODULE, + .open = dp_test_active_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dp_test_active_write +}; + +static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) +{ + int rc = 0; + struct dp_debug_private *debug = container_of(dp_debug, + struct dp_debug_private, dp_debug); + struct dentry *file; + struct dentry *test_active; + struct dentry *test_data, *test_type; + + file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root, + debug, &dp_debug_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DRM_ERROR("[%s] debugfs create file failed, rc=%d\n", + DEBUG_NAME, rc); + } + + test_active = debugfs_create_file("msm_dp_test_active", 0444, + minor->debugfs_root, + debug, &test_active_fops); + if (IS_ERR_OR_NULL(test_active)) { + rc = PTR_ERR(test_active); + DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n", + DEBUG_NAME, rc); + } + + test_data = debugfs_create_file("msm_dp_test_data", 0444, + minor->debugfs_root, + debug, &dp_test_data_fops); + if (IS_ERR_OR_NULL(test_data)) { + rc = PTR_ERR(test_data); + DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n", + DEBUG_NAME, rc); + } + + test_type = debugfs_create_file("msm_dp_test_type", 0444, + minor->debugfs_root, + debug, &dp_test_type_fops); + if (IS_ERR_OR_NULL(test_type)) { + rc = PTR_ERR(test_type); + DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n", + DEBUG_NAME, rc); + } + + debug->root = minor->debugfs_root; + + return rc; +} + +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector **connector, struct drm_minor *minor) +{ + int rc = 0; + struct dp_debug_private *debug; + struct dp_debug *dp_debug; + + if (!dev || !panel || !usbpd || !link) { + DRM_ERROR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL); + if (!debug) { + rc = -ENOMEM; + goto error; + } + + debug->dp_debug.debug_en = false; + debug->usbpd = usbpd; + debug->link = link; + debug->panel = panel; + debug->dev = dev; + debug->drm_dev = minor->dev; + debug->connector = connector; + + dp_debug = &debug->dp_debug; + dp_debug->vdisplay = 0; + dp_debug->hdisplay = 0; + dp_debug->vrefresh = 0; + + rc = dp_debug_init(dp_debug, minor); + if (rc) { + devm_kfree(dev, debug); + goto error; + } + + return dp_debug; + error: + return ERR_PTR(rc); +} + +static int dp_debug_deinit(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return -EINVAL; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + debugfs_remove_recursive(debug->root); + + return 0; +} + +void dp_debug_put(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + dp_debug_deinit(dp_debug); + + devm_kfree(debug->dev, debug); +} diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h new file mode 100644 index 000000000000..7eaedfbb149c --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DEBUG_H_ +#define _DP_DEBUG_H_ + +#include "dp_panel.h" +#include "dp_link.h" + +/** + * struct dp_debug + * @debug_en: specifies whether debug mode enabled + * @vdisplay: used to filter out vdisplay value + * @hdisplay: used to filter out hdisplay value + * @vrefresh: used to filter out vrefresh value + * @tpg_state: specifies whether tpg feature is enabled + */ +struct dp_debug { + bool debug_en; + int aspect_ratio; + int vdisplay; + int hdisplay; + int vrefresh; +}; + +#if defined(CONFIG_DEBUG_FS) + +/** + * dp_debug_get() - configure and get the DisplayPlot debug module data + * + * @dev: device instance of the caller + * @panel: instance of panel module + * @usbpd: instance of usbpd module + * @link: instance of link module + * @connector: double pointer to display connector + * @minor: pointer to drm minor number after device registration + * return: pointer to allocated debug module data + * + * This function sets up the debug module and provides a way + * for debugfs input to be communicated with existing modules + */ +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector **connector, + struct drm_minor *minor); + +/** + * dp_debug_put() + * + * Cleans up dp_debug instance + * + * @dp_debug: instance of dp_debug + */ +void dp_debug_put(struct dp_debug *dp_debug); + +#else + +static inline +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector **connector, struct drm_minor *minor) +{ + return ERR_PTR(-EINVAL); +} + +static inline void dp_debug_put(struct dp_debug *dp_debug) +{ +} + +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* _DP_DEBUG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c new file mode 100644 index 000000000000..e175aa3fd3a9 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -0,0 +1,1463 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/component.h> +#include <linux/of_irq.h> +#include <linux/delay.h> + +#include "msm_drv.h" +#include "msm_kms.h" +#include "dp_hpd.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_reg.h" +#include "dp_link.h" +#include "dp_panel.h" +#include "dp_ctrl.h" +#include "dp_display.h" +#include "dp_drm.h" +#include "dp_audio.h" +#include "dp_debug.h" + +static struct msm_dp *g_dp_display; +#define HPD_STRING_SIZE 30 + +enum { + ISR_DISCONNECTED, + ISR_CONNECT_PENDING, + ISR_CONNECTED, + ISR_HPD_REPLUG_COUNT, + ISR_IRQ_HPD_PULSE_COUNT, + ISR_HPD_LO_GLITH_COUNT, +}; + +/* event thread connection state */ +enum { + ST_DISCONNECTED, + ST_CONNECT_PENDING, + ST_CONNECTED, + ST_DISCONNECT_PENDING, + ST_SUSPEND_PENDING, + ST_SUSPENDED, +}; + +enum { + EV_NO_EVENT, + /* hpd events */ + EV_HPD_INIT_SETUP, + EV_HPD_PLUG_INT, + EV_IRQ_HPD_INT, + EV_HPD_REPLUG_INT, + EV_HPD_UNPLUG_INT, + EV_USER_NOTIFICATION, + EV_CONNECT_PENDING_TIMEOUT, + EV_DISCONNECT_PENDING_TIMEOUT, +}; + +#define EVENT_TIMEOUT (HZ/10) /* 100ms */ +#define DP_EVENT_Q_MAX 8 + +#define DP_TIMEOUT_5_SECOND (5000/EVENT_TIMEOUT) +#define DP_TIMEOUT_NONE 0 + +#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2) + +struct dp_event { + u32 event_id; + u32 data; + u32 delay; +}; + +struct dp_display_private { + char *name; + int irq; + + /* state variables */ + bool core_initialized; + bool hpd_irq_on; + bool audio_supported; + + struct platform_device *pdev; + struct dentry *root; + + struct dp_usbpd *usbpd; + struct dp_parser *parser; + struct dp_power *power; + struct dp_catalog *catalog; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_panel *panel; + struct dp_ctrl *ctrl; + struct dp_debug *debug; + + struct dp_usbpd_cb usbpd_cb; + struct dp_display_mode dp_mode; + struct msm_dp dp_display; + + /* wait for audio signaling */ + struct completion audio_comp; + + /* event related only access by event thread */ + struct mutex event_mutex; + wait_queue_head_t event_q; + atomic_t hpd_state; + u32 event_pndx; + u32 event_gndx; + struct dp_event event_list[DP_EVENT_Q_MAX]; + spinlock_t event_lock; + + struct completion resume_comp; + + struct dp_audio *audio; +}; + +static const struct of_device_id dp_dt_match[] = { + {.compatible = "qcom,sc7180-dp"}, + {} +}; + +static int dp_add_event(struct dp_display_private *dp_priv, u32 event, + u32 data, u32 delay) +{ + unsigned long flag; + struct dp_event *todo; + int pndx; + + spin_lock_irqsave(&dp_priv->event_lock, flag); + pndx = dp_priv->event_pndx + 1; + pndx %= DP_EVENT_Q_MAX; + if (pndx == dp_priv->event_gndx) { + pr_err("event_q is full: pndx=%d gndx=%d\n", + dp_priv->event_pndx, dp_priv->event_gndx); + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + return -EPERM; + } + todo = &dp_priv->event_list[dp_priv->event_pndx++]; + dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo->event_id = event; + todo->data = data; + todo->delay = delay; + wake_up(&dp_priv->event_q); + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + return 0; +} + +static int dp_del_event(struct dp_display_private *dp_priv, u32 event) +{ + unsigned long flag; + struct dp_event *todo; + u32 gndx; + + spin_lock_irqsave(&dp_priv->event_lock, flag); + if (dp_priv->event_pndx == dp_priv->event_gndx) { + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + return -ENOENT; + } + + gndx = dp_priv->event_gndx; + while (dp_priv->event_pndx != gndx) { + todo = &dp_priv->event_list[gndx]; + if (todo->event_id == event) { + todo->event_id = EV_NO_EVENT; /* deleted */ + todo->delay = 0; + } + gndx++; + gndx %= DP_EVENT_Q_MAX; + } + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + return 0; +} + +void dp_display_signal_audio_complete(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + complete_all(&dp->audio_comp); +} + +static int dp_display_bind(struct device *dev, struct device *master, + void *data) +{ + int rc = 0; + struct dp_display_private *dp; + struct drm_device *drm; + struct msm_drm_private *priv; + + drm = dev_get_drvdata(master); + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("DP driver bind failed. Invalid driver data\n"); + return -EINVAL; + } + + dp->dp_display.drm_dev = drm; + priv = drm->dev_private; + priv->dp = &(dp->dp_display); + + rc = dp->parser->parse(dp->parser); + if (rc) { + DRM_ERROR("device tree parsing failed\n"); + goto end; + } + + rc = dp_aux_register(dp->aux); + if (rc) { + DRM_ERROR("DRM DP AUX register failed\n"); + goto end; + } + + rc = dp_power_client_init(dp->power); + if (rc) { + DRM_ERROR("Power client create failed\n"); + goto end; + } + + rc = dp_register_audio_driver(dev, dp->audio); + if (rc) + DRM_ERROR("Audio registration Dp failed\n"); + +end: + return rc; +} + +static void dp_display_unbind(struct device *dev, struct device *master, + void *data) +{ + struct dp_display_private *dp; + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("Invalid DP driver data\n"); + return; + } + + dp_power_client_deinit(dp->power); + dp_aux_unregister(dp->aux); + priv->dp = NULL; +} + +static const struct component_ops dp_display_comp_ops = { + .bind = dp_display_bind, + .unbind = dp_display_unbind, +}; + +static bool dp_display_is_ds_bridge(struct dp_panel *panel) +{ + return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT); +} + +static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) +{ + return dp_display_is_ds_bridge(dp->panel) && + (dp->link->sink_count == 0); +} + +static void dp_display_send_hpd_event(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + struct drm_connector *connector; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + connector = dp->dp_display.connector; + drm_helper_hpd_irq_event(connector->dev); +} + +static int dp_display_send_hpd_notification(struct dp_display_private *dp, + bool hpd) +{ + static bool encoder_mode_set; + struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private; + struct msm_kms *kms = priv->kms; + + if ((hpd && dp->dp_display.is_connected) || + (!hpd && !dp->dp_display.is_connected)) { + DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off")); + return 0; + } + + /* reset video pattern flag on disconnect */ + if (!hpd) + dp->panel->video_test = false; + + dp->dp_display.is_connected = hpd; + + if (dp->dp_display.is_connected && dp->dp_display.encoder + && !encoder_mode_set + && kms->funcs->set_encoder_mode) { + kms->funcs->set_encoder_mode(kms, + dp->dp_display.encoder, false); + DRM_DEBUG_DP("set_encoder_mode() Completed\n"); + encoder_mode_set = true; + } + + dp_display_send_hpd_event(&dp->dp_display); + + return 0; +} + +static int dp_display_process_hpd_high(struct dp_display_private *dp) +{ + int rc = 0; + struct edid *edid; + + dp->panel->max_dp_lanes = dp->parser->max_dp_lanes; + + rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector); + if (rc) + goto end; + + dp_link_process_request(dp->link); + + edid = dp->panel->edid; + + dp->audio_supported = drm_detect_monitor_audio(edid); + dp_panel_handle_sink_request(dp->panel); + + dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ; + dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes; + + rc = dp_ctrl_on_link(dp->ctrl); + if (rc) { + DRM_ERROR("failed to complete DP link training\n"); + goto end; + } + + dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); + + +end: + return rc; +} + +static void dp_display_host_init(struct dp_display_private *dp) +{ + bool flip = false; + + if (dp->core_initialized) { + DRM_DEBUG_DP("DP core already initialized\n"); + return; + } + + if (dp->usbpd->orientation == ORIENTATION_CC2) + flip = true; + + dp_power_init(dp->power, flip); + dp_ctrl_host_init(dp->ctrl, flip); + dp_aux_init(dp->aux); + dp->core_initialized = true; +} + +static int dp_display_usbpd_configure_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DRM_ERROR("invalid dev\n"); + rc = -EINVAL; + goto end; + } + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("no driver data found\n"); + rc = -ENODEV; + goto end; + } + + dp_display_host_init(dp); + + /* + * set sink to normal operation mode -- D0 + * before dpcd read + */ + dp_link_psm_config(dp->link, &dp->panel->link_info, false); + rc = dp_display_process_hpd_high(dp); +end: + return rc; +} + +static int dp_display_usbpd_disconnect_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DRM_ERROR("invalid dev\n"); + rc = -EINVAL; + return rc; + } + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("no driver data found\n"); + rc = -ENODEV; + return rc; + } + + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + + return rc; +} + +static void dp_display_handle_video_request(struct dp_display_private *dp) +{ + if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { + dp->panel->video_test = true; + dp_link_send_test_response(dp->link); + } +} + +static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +{ + u32 sink_request; + + sink_request = dp->link->sink_request; + + if (sink_request & DS_PORT_STATUS_CHANGED) { + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + if (dp_display_is_sink_count_zero(dp)) { + DRM_DEBUG_DP("sink count is zero, nothing to do\n"); + return 0; + } + + return dp_display_process_hpd_high(dp); + } + + dp_ctrl_handle_sink_request(dp->ctrl); + + if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) + dp_display_handle_video_request(dp); + + return 0; +} + +static int dp_display_usbpd_attention_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DRM_ERROR("invalid dev\n"); + return -EINVAL; + } + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + if (!dp) { + DRM_ERROR("no driver data found\n"); + return -ENODEV; + } + + /* check for any test request issued by sink */ + rc = dp_link_process_request(dp->link); + if (!rc) + dp_display_handle_irq_hpd(dp); + + return rc; +} + +static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) +{ + struct dp_usbpd *hpd = dp->usbpd; + u32 state; + u32 tout = DP_TIMEOUT_5_SECOND; + int ret; + + if (!hpd) + return 0; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_CONNECT_PENDING || state == ST_CONNECTED) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_DISCONNECT_PENDING) { + /* wait until ST_DISCONNECTED */ + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_SUSPENDED) + tout = DP_TIMEOUT_NONE; + + atomic_set(&dp->hpd_state, ST_CONNECT_PENDING); + + hpd->hpd_high = 1; + + ret = dp_display_usbpd_configure_cb(&dp->pdev->dev); + if (ret) { /* failed */ + hpd->hpd_high = 0; + atomic_set(&dp->hpd_state, ST_DISCONNECTED); + } + + /* start sanity checking */ + dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout); + + mutex_unlock(&dp->event_mutex); + + /* uevent will complete connection part */ + return 0; +}; + +static int dp_display_enable(struct dp_display_private *dp, u32 data); +static int dp_display_disable(struct dp_display_private *dp, u32 data); + +static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data) +{ + u32 state; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_CONNECT_PENDING) { + dp_display_enable(dp, 0); + atomic_set(&dp->hpd_state, ST_CONNECTED); + } + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static void dp_display_handle_plugged_change(struct msm_dp *dp_display, + bool plugged) +{ + if (dp_display->plugged_cb && dp_display->codec_dev) + dp_display->plugged_cb(dp_display->codec_dev, plugged); +} + +static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) +{ + struct dp_usbpd *hpd = dp->usbpd; + u32 state; + + if (!hpd) + return 0; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_CONNECT_PENDING) { + /* wait until CONNECTED */ + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + + atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING); + + /* disable HPD plug interrupt until disconnect is done */ + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK + | DP_DP_IRQ_HPD_INT_MASK, false); + + hpd->hpd_high = 0; + + /* + * We don't need separate work for disconnect as + * connect/attention interrupts are disabled + */ + dp_display_usbpd_disconnect_cb(&dp->pdev->dev); + + /* start sanity checking */ + dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); + + /* signal the disconnect event early to ensure proper teardown */ + dp_display_handle_plugged_change(g_dp_display, false); + reinit_completion(&dp->audio_comp); + + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK | + DP_DP_IRQ_HPD_INT_MASK, true); + + /* uevent will complete disconnection part */ + mutex_unlock(&dp->event_mutex); + return 0; +} + +static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data) +{ + u32 state; + + mutex_lock(&dp->event_mutex); + + state = atomic_read(&dp->hpd_state); + if (state == ST_DISCONNECT_PENDING) { + dp_display_disable(dp, 0); + atomic_set(&dp->hpd_state, ST_DISCONNECTED); + } + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) +{ + u32 state; + + mutex_lock(&dp->event_mutex); + + /* irq_hpd can happen at either connected or disconnected state */ + state = atomic_read(&dp->hpd_state); + if (state == ST_SUSPEND_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + dp_display_usbpd_attention_cb(&dp->pdev->dev); + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +{ + dp_debug_put(dp->debug); + dp_ctrl_put(dp->ctrl); + dp_panel_put(dp->panel); + dp_aux_put(dp->aux); + dp_audio_put(dp->audio); +} + +static int dp_init_sub_modules(struct dp_display_private *dp) +{ + int rc = 0; + struct device *dev = &dp->pdev->dev; + struct dp_usbpd_cb *cb = &dp->usbpd_cb; + struct dp_panel_in panel_in = { + .dev = dev, + }; + + /* Callback APIs used for cable status change event */ + cb->configure = dp_display_usbpd_configure_cb; + cb->disconnect = dp_display_usbpd_disconnect_cb; + cb->attention = dp_display_usbpd_attention_cb; + + dp->usbpd = dp_hpd_get(dev, cb); + if (IS_ERR(dp->usbpd)) { + rc = PTR_ERR(dp->usbpd); + DRM_ERROR("failed to initialize hpd, rc = %d\n", rc); + dp->usbpd = NULL; + goto error; + } + + dp->parser = dp_parser_get(dp->pdev); + if (IS_ERR(dp->parser)) { + rc = PTR_ERR(dp->parser); + DRM_ERROR("failed to initialize parser, rc = %d\n", rc); + dp->parser = NULL; + goto error; + } + + dp->catalog = dp_catalog_get(dev, &dp->parser->io); + if (IS_ERR(dp->catalog)) { + rc = PTR_ERR(dp->catalog); + DRM_ERROR("failed to initialize catalog, rc = %d\n", rc); + dp->catalog = NULL; + goto error; + } + + dp->power = dp_power_get(dp->parser); + if (IS_ERR(dp->power)) { + rc = PTR_ERR(dp->power); + DRM_ERROR("failed to initialize power, rc = %d\n", rc); + dp->power = NULL; + goto error; + } + + dp->aux = dp_aux_get(dev, dp->catalog); + if (IS_ERR(dp->aux)) { + rc = PTR_ERR(dp->aux); + DRM_ERROR("failed to initialize aux, rc = %d\n", rc); + dp->aux = NULL; + goto error; + } + + dp->link = dp_link_get(dev, dp->aux); + if (IS_ERR(dp->link)) { + rc = PTR_ERR(dp->link); + DRM_ERROR("failed to initialize link, rc = %d\n", rc); + dp->link = NULL; + goto error_link; + } + + panel_in.aux = dp->aux; + panel_in.catalog = dp->catalog; + panel_in.link = dp->link; + + dp->panel = dp_panel_get(&panel_in); + if (IS_ERR(dp->panel)) { + rc = PTR_ERR(dp->panel); + DRM_ERROR("failed to initialize panel, rc = %d\n", rc); + dp->panel = NULL; + goto error_link; + } + + dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, + dp->power, dp->catalog, dp->parser); + if (IS_ERR(dp->ctrl)) { + rc = PTR_ERR(dp->ctrl); + DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc); + dp->ctrl = NULL; + goto error_ctrl; + } + + dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog); + if (IS_ERR(dp->audio)) { + rc = PTR_ERR(dp->audio); + pr_err("failed to initialize audio, rc = %d\n", rc); + dp->audio = NULL; + goto error_audio; + } + + return rc; + +error_audio: + dp_ctrl_put(dp->ctrl); +error_ctrl: + dp_panel_put(dp->panel); +error_link: + dp_aux_put(dp->aux); +error: + return rc; +} + +static int dp_display_set_mode(struct msm_dp *dp_display, + struct dp_display_mode *mode) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp->panel->dp_mode.drm_mode = mode->drm_mode; + dp->panel->dp_mode.bpp = mode->bpp; + dp->panel->dp_mode.capabilities = mode->capabilities; + dp_panel_init_panel_info(dp->panel); + return 0; +} + +static int dp_display_prepare(struct msm_dp *dp) +{ + return 0; +} + +static int dp_display_enable(struct dp_display_private *dp, u32 data) +{ + int rc = 0; + struct msm_dp *dp_display; + + dp_display = g_dp_display; + + if (dp_display->power_on) { + DRM_DEBUG_DP("Link already setup, return\n"); + return 0; + } + + rc = dp_ctrl_on_stream(dp->ctrl); + if (!rc) + dp_display->power_on = true; + + /* complete resume_comp regardless it is armed or not */ + complete(&dp->resume_comp); + return rc; +} + +static int dp_display_post_enable(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + u32 rate; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + rate = dp->link->link_params.rate; + + if (dp->audio_supported) { + dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate); + dp->audio->lane_count = dp->link->link_params.num_lanes; + } + + /* signal the connect event late to synchronize video and display */ + dp_display_handle_plugged_change(dp_display, true); + return 0; +} + +static int dp_display_disable(struct dp_display_private *dp, u32 data) +{ + struct msm_dp *dp_display; + + dp_display = g_dp_display; + + if (!dp_display->power_on) + return -EINVAL; + + /* wait only if audio was enabled */ + if (dp_display->audio_enabled) { + if (!wait_for_completion_timeout(&dp->audio_comp, + HZ * 5)) + DRM_ERROR("audio comp timeout\n"); + } + + dp_display->audio_enabled = false; + + dp_ctrl_off(dp->ctrl); + + dp->core_initialized = false; + + dp_display->power_on = false; + + return 0; +} + +static int dp_display_unprepare(struct msm_dp *dp) +{ + return 0; +} + +int dp_display_set_plugged_cb(struct msm_dp *dp_display, + hdmi_codec_plugged_cb fn, struct device *codec_dev) +{ + bool plugged; + + dp_display->plugged_cb = fn; + dp_display->codec_dev = codec_dev; + plugged = dp_display->is_connected; + dp_display_handle_plugged_change(dp_display, plugged); + + return 0; +} + +int dp_display_validate_mode(struct msm_dp *dp, u32 mode_pclk_khz) +{ + const u32 num_components = 3, default_bpp = 24; + struct dp_display_private *dp_display; + struct dp_link_info *link_info; + u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; + + if (!dp || !mode_pclk_khz || !dp->connector) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + link_info = &dp_display->panel->link_info; + + mode_bpp = dp->connector->display_info.bpc * num_components; + if (!mode_bpp) + mode_bpp = default_bpp; + + mode_bpp = dp_panel_get_mode_bpp(dp_display->panel, + mode_bpp, mode_pclk_khz); + + mode_rate_khz = mode_pclk_khz * mode_bpp; + supported_rate_khz = link_info->num_lanes * link_info->rate * 8; + + if (mode_rate_khz > supported_rate_khz) + return MODE_BAD; + + return MODE_OK; +} + +int dp_display_get_modes(struct msm_dp *dp, + struct dp_display_mode *dp_mode) +{ + struct dp_display_private *dp_display; + int ret = 0; + + if (!dp) { + DRM_ERROR("invalid params\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + ret = dp_panel_get_modes(dp_display->panel, + dp->connector, dp_mode); + if (dp_mode->drm_mode.clock) + dp->max_pclk_khz = dp_mode->drm_mode.clock; + return ret; +} + +bool dp_display_check_video_test(struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + return dp_display->panel->video_test; +} + +int dp_display_get_test_bpp(struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + if (!dp) { + DRM_ERROR("invalid params\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + return dp_link_bit_depth_to_bpp( + dp_display->link->test_video.test_bit_depth); +} + +static void dp_display_config_hpd(struct dp_display_private *dp) +{ + + dp_display_host_init(dp); + dp_catalog_ctrl_hpd_config(dp->catalog); + + /* Enable interrupt first time + * we are leaving dp clocks on during disconnect + * and never disable interrupt + */ + enable_irq(dp->irq); +} + +static int hpd_event_thread(void *data) +{ + struct dp_display_private *dp_priv; + unsigned long flag; + struct dp_event *todo; + int timeout_mode = 0; + + dp_priv = (struct dp_display_private *)data; + + while (1) { + if (timeout_mode) { + wait_event_timeout(dp_priv->event_q, + (dp_priv->event_pndx == dp_priv->event_gndx), + EVENT_TIMEOUT); + } else { + wait_event_interruptible(dp_priv->event_q, + (dp_priv->event_pndx != dp_priv->event_gndx)); + } + spin_lock_irqsave(&dp_priv->event_lock, flag); + todo = &dp_priv->event_list[dp_priv->event_gndx]; + if (todo->delay) { + struct dp_event *todo_next; + + dp_priv->event_gndx++; + dp_priv->event_gndx %= DP_EVENT_Q_MAX; + + /* re enter delay event into q */ + todo_next = &dp_priv->event_list[dp_priv->event_pndx++]; + dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo_next->event_id = todo->event_id; + todo_next->data = todo->data; + todo_next->delay = todo->delay - 1; + + /* clean up older event */ + todo->event_id = EV_NO_EVENT; + todo->delay = 0; + + /* switch to timeout mode */ + timeout_mode = 1; + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + continue; + } + + /* timeout with no events in q */ + if (dp_priv->event_pndx == dp_priv->event_gndx) { + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + continue; + } + + dp_priv->event_gndx++; + dp_priv->event_gndx %= DP_EVENT_Q_MAX; + timeout_mode = 0; + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + switch (todo->event_id) { + case EV_HPD_INIT_SETUP: + dp_display_config_hpd(dp_priv); + break; + case EV_HPD_PLUG_INT: + dp_hpd_plug_handle(dp_priv, todo->data); + break; + case EV_HPD_UNPLUG_INT: + dp_hpd_unplug_handle(dp_priv, todo->data); + break; + case EV_IRQ_HPD_INT: + dp_irq_hpd_handle(dp_priv, todo->data); + break; + case EV_HPD_REPLUG_INT: + /* do nothing */ + break; + case EV_USER_NOTIFICATION: + dp_display_send_hpd_notification(dp_priv, + todo->data); + break; + case EV_CONNECT_PENDING_TIMEOUT: + dp_connect_pending_timeout(dp_priv, + todo->data); + break; + case EV_DISCONNECT_PENDING_TIMEOUT: + dp_disconnect_pending_timeout(dp_priv, + todo->data); + break; + default: + break; + } + } + + return 0; +} + +static void dp_hpd_event_setup(struct dp_display_private *dp_priv) +{ + init_waitqueue_head(&dp_priv->event_q); + spin_lock_init(&dp_priv->event_lock); + + kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); +} + +static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) +{ + struct dp_display_private *dp = dev_id; + irqreturn_t ret = IRQ_HANDLED; + u32 hpd_isr_status; + + if (!dp) { + DRM_ERROR("invalid data\n"); + return IRQ_NONE; + } + + hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); + + if (hpd_isr_status & 0x0F) { + /* hpd related interrupts */ + if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK || + hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + } + + if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { + /* delete connect pending event first */ + dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT); + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); + } + + if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) + dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0); + + if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + } + + /* DP controller isr */ + dp_ctrl_isr(dp->ctrl); + + /* DP aux isr */ + dp_aux_isr(dp->aux); + + return ret; +} + +int dp_display_request_irq(struct msm_dp *dp_display) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0); + if (dp->irq < 0) { + rc = dp->irq; + DRM_ERROR("failed to get irq: %d\n", rc); + return rc; + } + + rc = devm_request_irq(&dp->pdev->dev, dp->irq, + dp_display_irq_handler, + IRQF_TRIGGER_HIGH, "dp_display_isr", dp); + if (rc < 0) { + DRM_ERROR("failed to request IRQ%u: %d\n", + dp->irq, rc); + return rc; + } + disable_irq(dp->irq); + + return 0; +} + +static int dp_display_probe(struct platform_device *pdev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!pdev || !pdev->dev.of_node) { + DRM_ERROR("pdev not found\n"); + return -ENODEV; + } + + dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + dp->pdev = pdev; + dp->name = "drm_dp"; + + rc = dp_init_sub_modules(dp); + if (rc) { + DRM_ERROR("init sub module failed\n"); + return -EPROBE_DEFER; + } + + mutex_init(&dp->event_mutex); + + init_completion(&dp->resume_comp); + + g_dp_display = &dp->dp_display; + + /* Store DP audio handle inside DP display */ + g_dp_display->dp_audio = dp->audio; + + init_completion(&dp->audio_comp); + + platform_set_drvdata(pdev, g_dp_display); + + rc = component_add(&pdev->dev, &dp_display_comp_ops); + if (rc) { + DRM_ERROR("component add failed, rc=%d\n", rc); + dp_display_deinit_sub_modules(dp); + } + + return rc; +} + +static int dp_display_remove(struct platform_device *pdev) +{ + struct dp_display_private *dp; + + dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + + dp_display_deinit_sub_modules(dp); + + component_del(&pdev->dev, &dp_display_comp_ops); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int dp_pm_resume(struct device *dev) +{ + return 0; +} + +static int dp_pm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dp_display_private *dp = platform_get_drvdata(pdev); + + if (!dp) { + DRM_ERROR("DP driver bind failed. Invalid driver data\n"); + return -EINVAL; + } + + atomic_set(&dp->hpd_state, ST_SUSPENDED); + + return 0; +} + +static int dp_pm_prepare(struct device *dev) +{ + return 0; +} + +static void dp_pm_complete(struct device *dev) +{ + +} + +static const struct dev_pm_ops dp_pm_ops = { + .suspend = dp_pm_suspend, + .resume = dp_pm_resume, + .prepare = dp_pm_prepare, + .complete = dp_pm_complete, +}; + +static struct platform_driver dp_display_driver = { + .probe = dp_display_probe, + .remove = dp_display_remove, + .driver = { + .name = "msm-dp-display", + .of_match_table = dp_dt_match, + .suppress_bind_attrs = true, + .pm = &dp_pm_ops, + }, +}; + +int __init msm_dp_register(void) +{ + int ret; + + ret = platform_driver_register(&dp_display_driver); + if (ret) + DRM_ERROR("Dp display driver register failed"); + + return ret; +} + +void __exit msm_dp_unregister(void) +{ + platform_driver_unregister(&dp_display_driver); +} + +void msm_dp_irq_postinstall(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + if (!dp_display) + return; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp_hpd_event_setup(dp); + + dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100); +} + +void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) +{ + struct dp_display_private *dp; + struct device *dev; + int rc; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dev = &dp->pdev->dev; + + dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd, + dp->link, &dp->dp_display.connector, + minor); + if (IS_ERR(dp->debug)) { + rc = PTR_ERR(dp->debug); + DRM_ERROR("failed to initialize debug, rc = %d\n", rc); + dp->debug = NULL; + } +} + +int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct msm_drm_private *priv; + int ret; + + if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev)) + return -EINVAL; + + priv = dev->dev_private; + dp_display->drm_dev = dev; + + ret = dp_display_request_irq(dp_display); + if (ret) { + DRM_ERROR("request_irq failed, ret=%d\n", ret); + return ret; + } + + dp_display->encoder = encoder; + + dp_display->connector = dp_drm_connector_init(dp_display); + if (IS_ERR(dp_display->connector)) { + ret = PTR_ERR(dp_display->connector); + DRM_DEV_ERROR(dev->dev, + "failed to create dp connector: %d\n", ret); + dp_display->connector = NULL; + return ret; + } + + priv->connectors[priv->num_connectors++] = dp_display->connector; + return 0; +} + +static int dp_display_wait4resume_done(struct dp_display_private *dp) +{ + int ret = 0; + + reinit_completion(&dp->resume_comp); + if (!wait_for_completion_timeout(&dp->resume_comp, + WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) { + DRM_ERROR("wait4resume_done timedout\n"); + ret = -ETIMEDOUT; + } + return ret; +} + +int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) +{ + int rc = 0; + struct dp_display_private *dp_display; + u32 state; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + if (!dp_display->dp_mode.drm_mode.clock) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + mutex_lock(&dp_display->event_mutex); + + rc = dp_display_set_mode(dp, &dp_display->dp_mode); + if (rc) { + DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); + mutex_unlock(&dp_display->event_mutex); + return rc; + } + + rc = dp_display_prepare(dp); + if (rc) { + DRM_ERROR("DP display prepare failed, rc=%d\n", rc); + mutex_unlock(&dp_display->event_mutex); + return rc; + } + + state = atomic_read(&dp_display->hpd_state); + if (state == ST_SUSPENDED) { + /* start link training */ + dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0); + mutex_unlock(&dp_display->event_mutex); + + /* wait until dp interface is up */ + goto resume_done; + } + + dp_display_enable(dp_display, 0); + + rc = dp_display_post_enable(dp); + if (rc) { + DRM_ERROR("DP display post enable failed, rc=%d\n", rc); + dp_display_disable(dp_display, 0); + dp_display_unprepare(dp); + } + + dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT); + + if (state == ST_SUSPEND_PENDING) + dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); + + /* completed connection */ + atomic_set(&dp_display->hpd_state, ST_CONNECTED); + + mutex_unlock(&dp_display->event_mutex); + + return rc; + +resume_done: + dp_display_wait4resume_done(dp_display); + return rc; +} + +int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + dp_ctrl_push_idle(dp_display->ctrl); + + return 0; +} + +int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder) +{ + int rc = 0; + u32 state; + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + mutex_lock(&dp_display->event_mutex); + + dp_display_disable(dp_display, 0); + + rc = dp_display_unprepare(dp); + if (rc) + DRM_ERROR("DP display unprepare failed, rc=%d\n", rc); + + dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT); + + state = atomic_read(&dp_display->hpd_state); + if (state == ST_DISCONNECT_PENDING) { + /* completed disconnection */ + atomic_set(&dp_display->hpd_state, ST_DISCONNECTED); + } else { + atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING); + } + + mutex_unlock(&dp_display->event_mutex); + return rc; +} + +void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode)); + + if (dp_display_check_video_test(dp)) + dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp); + else /* Default num_components per pixel = 3 */ + dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3; + + if (!dp_display->dp_mode.bpp) + dp_display->dp_mode.bpp = 24; /* Default bpp */ + + drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode); + + dp_display->dp_mode.v_active_low = + !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); + + dp_display->dp_mode.h_active_low = + !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); +} diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h new file mode 100644 index 000000000000..6092ba1ed85e --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DISPLAY_H_ +#define _DP_DISPLAY_H_ + +#include "dp_panel.h" +#include <sound/hdmi-codec.h> + +struct msm_dp { + struct drm_device *drm_dev; + struct device *codec_dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + bool is_connected; + bool audio_enabled; + bool power_on; + + hdmi_codec_plugged_cb plugged_cb; + + u32 max_pclk_khz; + + u32 max_dp_lanes; + struct dp_audio *dp_audio; +}; + +int dp_display_set_plugged_cb(struct msm_dp *dp_display, + hdmi_codec_plugged_cb fn, struct device *codec_dev); +int dp_display_validate_mode(struct msm_dp *dp_display, u32 mode_pclk_khz); +int dp_display_get_modes(struct msm_dp *dp_display, + struct dp_display_mode *dp_mode); +int dp_display_request_irq(struct msm_dp *dp_display); +bool dp_display_check_video_test(struct msm_dp *dp_display); +int dp_display_get_test_bpp(struct msm_dp *dp_display); +void dp_display_signal_audio_complete(struct msm_dp *dp_display); + +#endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c new file mode 100644 index 000000000000..764f4b81017e --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_crtc.h> + +#include "msm_drv.h" +#include "msm_kms.h" +#include "dp_drm.h" + +struct dp_connector { + struct drm_connector base; + struct msm_dp *dp_display; +}; +#define to_dp_connector(x) container_of(x, struct dp_connector, base) + +/** + * dp_connector_detect - callback to determine if connector is connected + * @conn: Pointer to drm connector structure + * @force: Force detect setting from drm framework + * Returns: Connector 'is connected' status + */ +static enum drm_connector_status dp_connector_detect(struct drm_connector *conn, + bool force) +{ + struct msm_dp *dp; + + dp = to_dp_connector(conn)->dp_display; + + DRM_DEBUG_DP("is_connected = %s\n", + (dp->is_connected) ? "true" : "false"); + + return (dp->is_connected) ? connector_status_connected : + connector_status_disconnected; +} + +/** + * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add() + * @connector: Pointer to drm connector structure + * Returns: Number of modes added + */ +static int dp_connector_get_modes(struct drm_connector *connector) +{ + int rc = 0; + struct msm_dp *dp; + struct dp_display_mode *dp_mode = NULL; + struct drm_display_mode *m, drm_mode; + + if (!connector) + return 0; + + dp = to_dp_connector(connector)->dp_display; + + dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL); + if (!dp_mode) + return 0; + + /* pluggable case assumes EDID is read when HPD */ + if (dp->is_connected) { + /* + *The get_modes() function might return one mode that is stored + * in dp_mode when compliance test is in progress. If not, the + * return value is equal to the total number of modes supported + * by the sink + */ + rc = dp_display_get_modes(dp, dp_mode); + if (rc <= 0) { + DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); + kfree(dp_mode); + return rc; + } + if (dp_mode->drm_mode.clock) { /* valid DP mode */ + memset(&drm_mode, 0x0, sizeof(drm_mode)); + drm_mode_copy(&drm_mode, &dp_mode->drm_mode); + m = drm_mode_duplicate(connector->dev, &drm_mode); + if (!m) { + DRM_ERROR("failed to add mode %ux%u\n", + drm_mode.hdisplay, + drm_mode.vdisplay); + kfree(dp_mode); + return 0; + } + drm_mode_probed_add(connector, m); + } + } else { + DRM_DEBUG_DP("No sink connected\n"); + } + kfree(dp_mode); + return rc; +} + +/** + * dp_connector_mode_valid - callback to determine if specified mode is valid + * @connector: Pointer to drm connector structure + * @mode: Pointer to drm mode structure + * Returns: Validity status for specified mode + */ +static enum drm_mode_status dp_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct msm_dp *dp_disp; + + dp_disp = to_dp_connector(connector)->dp_display; + + if ((dp_disp->max_pclk_khz <= 0) || + (dp_disp->max_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) || + (mode->clock > dp_disp->max_pclk_khz)) + return MODE_BAD; + + return dp_display_validate_mode(dp_disp, mode->clock); +} + +static const struct drm_connector_funcs dp_connector_funcs = { + .detect = dp_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs dp_connector_helper_funcs = { + .get_modes = dp_connector_get_modes, + .mode_valid = dp_connector_mode_valid, +}; + +/* connector initialization */ +struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) +{ + struct drm_connector *connector = NULL; + struct dp_connector *dp_connector; + int ret; + + dp_connector = devm_kzalloc(dp_display->drm_dev->dev, + sizeof(*dp_connector), + GFP_KERNEL); + if (!dp_connector) + return ERR_PTR(-ENOMEM); + + dp_connector->dp_display = dp_display; + + connector = &dp_connector->base; + + ret = drm_connector_init(dp_display->drm_dev, connector, + &dp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ERR_PTR(ret); + + drm_connector_helper_add(connector, &dp_connector_helper_funcs); + + /* + * Enable HPD to let hpd event is handled when cable is connected. + */ + connector->polled = DRM_CONNECTOR_POLL_HPD; + + drm_connector_attach_encoder(connector, dp_display->encoder); + + return connector; +} diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h new file mode 100644 index 000000000000..c27bfceefdf0 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DRM_H_ +#define _DP_DRM_H_ + +#include <linux/types.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> + +#include "msm_drv.h" +#include "dp_display.h" + +struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display); + +#endif /* _DP_DRM_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c new file mode 100644 index 000000000000..5b8fe32022b5 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/slab.h> +#include <linux/device.h> + +#include "dp_hpd.h" + +/* DP specific VDM commands */ +#define DP_USBPD_VDM_STATUS 0x10 +#define DP_USBPD_VDM_CONFIGURE 0x11 + +/* USBPD-TypeC specific Macros */ +#define VDM_VERSION 0x0 +#define USB_C_DP_SID 0xFF01 + +struct dp_hpd_private { + struct device *dev; + struct dp_usbpd_cb *dp_cb; + struct dp_usbpd dp_usbpd; +}; + +int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd) +{ + int rc = 0; + struct dp_hpd_private *hpd_priv; + + hpd_priv = container_of(dp_usbpd, struct dp_hpd_private, + dp_usbpd); + + dp_usbpd->hpd_high = hpd; + + if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure + && !hpd_priv->dp_cb->disconnect) { + pr_err("hpd dp_cb not initialized\n"); + return -EINVAL; + } + if (hpd) + hpd_priv->dp_cb->configure(hpd_priv->dev); + else + hpd_priv->dp_cb->disconnect(hpd_priv->dev); + + return rc; +} + +struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb) +{ + struct dp_hpd_private *dp_hpd; + + if (!cb) { + pr_err("invalid cb data\n"); + return ERR_PTR(-EINVAL); + } + + dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL); + if (!dp_hpd) + return ERR_PTR(-ENOMEM); + + dp_hpd->dev = dev; + dp_hpd->dp_cb = cb; + + dp_hpd->dp_usbpd.connect = dp_hpd_connect; + + return &dp_hpd->dp_usbpd; +} diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h new file mode 100644 index 000000000000..5bc5bb64680f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_hpd.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_HPD_H_ +#define _DP_HPD_H_ + +//#include <linux/usb/usbpd.h> + +#include <linux/types.h> +#include <linux/device.h> + +enum plug_orientation { + ORIENTATION_NONE, + ORIENTATION_CC1, + ORIENTATION_CC2, +}; + +/** + * struct dp_usbpd - DisplayPort status + * + * @orientation: plug orientation configuration + * @low_pow_st: low power state + * @adaptor_dp_en: adaptor functionality enabled + * @multi_func: multi-function preferred + * @usb_config_req: request to switch to usb + * @exit_dp_mode: request exit from displayport mode + * @hpd_high: Hot Plug Detect signal is high. + * @hpd_irq: Change in the status since last message + * @alt_mode_cfg_done: bool to specify alt mode status + * @debug_en: bool to specify debug mode + * @connect: simulate disconnect or connect for debug mode + */ +struct dp_usbpd { + enum plug_orientation orientation; + bool low_pow_st; + bool adaptor_dp_en; + bool multi_func; + bool usb_config_req; + bool exit_dp_mode; + bool hpd_high; + bool hpd_irq; + bool alt_mode_cfg_done; + bool debug_en; + + int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd); +}; + +/** + * struct dp_usbpd_cb - callback functions provided by the client + * + * @configure: called by usbpd module when PD communication has + * been completed and the usb peripheral has been configured on + * dp mode. + * @disconnect: notify the cable disconnect issued by usb. + * @attention: notify any attention message issued by usb. + */ +struct dp_usbpd_cb { + int (*configure)(struct device *dev); + int (*disconnect)(struct device *dev); + int (*attention)(struct device *dev); +}; + +/** + * dp_hpd_get() - setup hpd module + * + * @dev: device instance of the caller + * @cb: struct containing callback function pointers. + * + * This function allows the client to initialize the usbpd + * module. The module will communicate with HPD module. + */ +struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb); + +int dp_hpd_register(struct dp_usbpd *dp_usbpd); +void dp_hpd_unregister(struct dp_usbpd *dp_usbpd); +int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd); + +#endif /* _DP_HPD_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c new file mode 100644 index 000000000000..c811da515fb3 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -0,0 +1,1210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <drm/drm_print.h> + +#include "dp_link.h" +#include "dp_panel.h" + +#define DP_TEST_REQUEST_MASK 0x7F + +enum audio_sample_rate { + AUDIO_SAMPLE_RATE_32_KHZ = 0x00, + AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01, + AUDIO_SAMPLE_RATE_48_KHZ = 0x02, + AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03, + AUDIO_SAMPLE_RATE_96_KHZ = 0x04, + AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05, + AUDIO_SAMPLE_RATE_192_KHZ = 0x06, +}; + +enum audio_pattern_type { + AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00, + AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, +}; + +struct dp_link_request { + u32 test_requested; + u32 test_link_rate; + u32 test_lane_count; +}; + +struct dp_link_private { + u32 prev_sink_count; + struct device *dev; + struct drm_dp_aux *aux; + struct dp_link dp_link; + + struct dp_link_request request; + struct mutex psm_mutex; + u8 link_status[DP_LINK_STATUS_SIZE]; +}; + +static int dp_aux_link_power_up(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 value; + int err; + + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + usleep_range(1000, 2000); + + return 0; +} + +static int dp_aux_link_power_down(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 value; + int err; + + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} + +static int dp_link_get_period(struct dp_link_private *link, int const addr) +{ + int ret = 0; + u8 data; + u32 const max_audio_period = 0xA; + + /* TEST_AUDIO_PERIOD_CH_XX */ + if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) { + DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr); + ret = -EINVAL; + goto exit; + } + + /* Period - Bits 3:0 */ + data = data & 0xF; + if ((int)data > max_audio_period) { + DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + ret = data; +exit: + return ret; +} + +static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +{ + int ret = 0; + struct dp_link_test_audio *req = &link->dp_link.test_audio; + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_1 = ret; + DRM_DEBUG_DP("test_audio_period_ch_1 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_2 = ret; + DRM_DEBUG_DP("test_audio_period_ch_2 = 0x%x\n", ret); + + /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_3 = ret; + DRM_DEBUG_DP("test_audio_period_ch_3 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_4 = ret; + DRM_DEBUG_DP("test_audio_period_ch_4 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_5 = ret; + DRM_DEBUG_DP("test_audio_period_ch_5 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_6 = ret; + DRM_DEBUG_DP("test_audio_period_ch_6 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_7 = ret; + DRM_DEBUG_DP("test_audio_period_ch_7 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_8 = ret; + DRM_DEBUG_DP("test_audio_period_ch_8 = 0x%x\n", ret); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + int const max_audio_pattern_type = 0x1; + + rlen = drm_dp_dpcd_readb(link->aux, + DP_TEST_AUDIO_PATTERN_TYPE, &data); + if (rlen < 0) { + DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen); + return rlen; + } + + /* Audio Pattern Type - Bits 7:0 */ + if ((int)data > max_audio_pattern_type) { + DRM_ERROR("invalid audio pattern type = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_pattern_type = data; + DRM_DEBUG_DP("audio pattern type = 0x%x\n", data); +exit: + return ret; +} + +static int dp_link_parse_audio_mode(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + int const max_audio_sampling_rate = 0x6; + int const max_audio_channel_count = 0x8; + int sampling_rate = 0x0; + int channel_count = 0x0; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data); + if (rlen < 0) { + DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen); + return rlen; + } + + /* Sampling Rate - Bits 3:0 */ + sampling_rate = data & 0xF; + if (sampling_rate > max_audio_sampling_rate) { + DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n", + sampling_rate, max_audio_sampling_rate); + ret = -EINVAL; + goto exit; + } + + /* Channel Count - Bits 7:4 */ + channel_count = ((data & 0xF0) >> 4) + 1; + if (channel_count > max_audio_channel_count) { + DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n", + channel_count, max_audio_channel_count); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->dp_link.test_audio.test_audio_channel_count = channel_count; + DRM_DEBUG_DP("sampling_rate = 0x%x, channel_count = 0x%x\n", + sampling_rate, channel_count); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + + ret = dp_link_parse_audio_mode(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_pattern_type(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_channel_period(link); + +exit: + return ret; +} + +static bool dp_link_is_video_pattern_valid(u32 pattern) +{ + switch (pattern) { + case DP_NO_TEST_PATTERN: + case DP_COLOR_RAMP: + case DP_BLACK_AND_WHITE_VERTICAL_LINES: + case DP_COLOR_SQUARE: + return true; + default: + return false; + } +} + +/** + * dp_link_is_bit_depth_valid() - validates the bit depth requested + * @tbd: bit depth requested by the sink + * + * Returns true if the requested bit depth is supported. + */ +static bool dp_link_is_bit_depth_valid(u32 tbd) +{ + /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + case DP_TEST_BIT_DEPTH_8: + case DP_TEST_BIT_DEPTH_10: + return true; + default: + return false; + } +} + +static int dp_link_parse_timing_params1(struct dp_link_private *link, + int addr, int len, u32 *val) +{ + u8 bp[2]; + int rlen; + + if (len != 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux, addr, bp, len); + if (rlen < len) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val = bp[1] | (bp[0] << 8); + + return 0; +} + +static int dp_link_parse_timing_params2(struct dp_link_private *link, + int addr, int len, + u32 *val1, u32 *val2) +{ + u8 bp[2]; + int rlen; + + if (len != 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux, addr, bp, len); + if (rlen < len) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val1 = (bp[0] & BIT(7)) >> 7; + *val2 = bp[1] | ((bp[0] & 0x7F) << 8); + + return 0; +} + +static int dp_link_parse_timing_params3(struct dp_link_private *link, + int addr, u32 *val) +{ + u8 bp; + u32 len = 1; + int rlen; + + rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len); + if (rlen < 1) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + *val = bp; + + return 0; +} + +/** + * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the video link pattern and the link + * bit depth requested by the sink and, and if the values parsed are valid. + */ +static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + ssize_t rlen; + u8 bp; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link video pattern. rlen=%zd\n", + rlen); + return rlen; + } + + if (!dp_link_is_video_pattern_valid(bp)) { + DRM_ERROR("invalid link video pattern = 0x%x\n", bp); + ret = -EINVAL; + return ret; + } + + link->dp_link.test_video.test_video_pattern = bp; + + /* Read the requested color bit depth and dynamic range (Byte 0x232) */ + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen); + return rlen; + } + + /* Dynamic Range */ + link->dp_link.test_video.test_dyn_range = + (bp & DP_TEST_DYNAMIC_RANGE_CEA); + + /* Color bit depth */ + bp &= DP_TEST_BIT_DEPTH_MASK; + if (!dp_link_is_bit_depth_valid(bp)) { + DRM_ERROR("invalid link bit depth = 0x%x\n", bp); + ret = -EINVAL; + return ret; + } + + link->dp_link.test_video.test_bit_depth = bp; + + /* resolution timing params */ + ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->dp_link.test_video.test_h_total); + if (ret) { + DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->dp_link.test_video.test_v_total); + if (ret) { + DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->dp_link.test_video.test_h_start); + if (ret) { + DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->dp_link.test_video.test_v_start); + if (ret) { + DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->dp_link.test_video.test_hsync_pol, + &link->dp_link.test_video.test_hsync_width); + if (ret) { + DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->dp_link.test_video.test_vsync_pol, + &link->dp_link.test_video.test_vsync_width); + if (ret) { + DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->dp_link.test_video.test_h_width); + if (ret) { + DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->dp_link.test_video.test_v_height); + if (ret) { + DRM_ERROR("failed to parse test_v_height\n"); + return ret; + } + + ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->dp_link.test_video.test_rr_d); + link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + if (ret) { + DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); + return ret; + } + + ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->dp_link.test_video.test_rr_n); + if (ret) { + DRM_ERROR("failed to parse test_rr_n\n"); + return ret; + } + + DRM_DEBUG_DP("link video pattern = 0x%x\n" + "link dynamic range = 0x%x\n" + "link bit depth = 0x%x\n" + "TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n" + "TEST_H_START = %d, TEST_V_START = %d\n" + "TEST_HSYNC_POL = %d\n" + "TEST_HSYNC_WIDTH = %d\n" + "TEST_VSYNC_POL = %d\n" + "TEST_VSYNC_WIDTH = %d\n" + "TEST_H_WIDTH = %d\n" + "TEST_V_HEIGHT = %d\n" + "TEST_REFRESH_DENOMINATOR = %d\n" + "TEST_REFRESH_NUMERATOR = %d\n", + link->dp_link.test_video.test_video_pattern, + link->dp_link.test_video.test_dyn_range, + link->dp_link.test_video.test_bit_depth, + link->dp_link.test_video.test_h_total, + link->dp_link.test_video.test_v_total, + link->dp_link.test_video.test_h_start, + link->dp_link.test_video.test_v_start, + link->dp_link.test_video.test_hsync_pol, + link->dp_link.test_video.test_hsync_width, + link->dp_link.test_video.test_vsync_pol, + link->dp_link.test_video.test_vsync_width, + link->dp_link.test_video.test_h_width, + link->dp_link.test_video.test_v_height, + link->dp_link.test_video.test_rr_d, + link->dp_link.test_video.test_rr_n); + + return ret; +} + +/** + * dp_link_parse_link_training_params() - parses link training parameters from + * DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane + * count (Byte 0x220), and if these values parse are valid. + */ +static int dp_link_parse_link_training_params(struct dp_link_private *link) +{ + u8 bp; + ssize_t rlen; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen); + return rlen; + } + + if (!is_link_rate_valid(bp)) { + DRM_ERROR("invalid link rate = 0x%x\n", bp); + return -EINVAL; + } + + link->request.test_link_rate = bp; + DRM_DEBUG_DP("link rate = 0x%x\n", link->request.test_link_rate); + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen); + return rlen; + } + bp &= DP_MAX_LANE_COUNT_MASK; + + if (!is_lane_count_valid(bp)) { + DRM_ERROR("invalid lane count = 0x%x\n", bp); + return -EINVAL; + } + + link->request.test_lane_count = bp; + DRM_DEBUG_DP("lane count = 0x%x\n", link->request.test_lane_count); + return 0; +} + +/** + * dp_parse_phy_test_params() - parses the phy link parameters + * @link: Display Port Driver data + * + * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being + * requested. + */ +static int dp_link_parse_phy_test_params(struct dp_link_private *link) +{ + u8 data; + ssize_t rlen; + + rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN, + &data); + if (rlen < 0) { + DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen); + return rlen; + } + + link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07; + + DRM_DEBUG_DP("phy_test_pattern_sel = 0x%x\n", data); + + switch (data) { + case DP_PHY_TEST_PATTERN_SEL_MASK: + case DP_PHY_TEST_PATTERN_NONE: + case DP_PHY_TEST_PATTERN_D10_2: + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + case DP_PHY_TEST_PATTERN_PRBS7: + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + case DP_PHY_TEST_PATTERN_CP2520: + return 0; + default: + return -EINVAL; + } +} + +/** + * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * @link: link requested by the sink + * + * Returns true if the requested link is a permitted audio/video link. + */ +static bool dp_link_is_video_audio_test_requested(u32 link) +{ + u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN | + DP_TEST_LINK_AUDIO_PATTERN | + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); + + return ((link & video_audio_test) && + !(link & ~video_audio_test)); +} + +/** + * dp_link_parse_request() - parses link request parameters from sink + * @link: Display Port Driver data + * + * Parses the DPCD to check if an automated link is requested (Byte 0x201), + * and what type of link automation is being requested (Byte 0x218). + */ +static int dp_link_parse_request(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + + /** + * Read the device service IRQ vector (Byte 0x201) to determine + * whether an automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_readb(link->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, &data); + if (rlen < 0) { + DRM_ERROR("aux read failed. rlen=%zd\n", rlen); + return rlen; + } + + DRM_DEBUG_DP("device service irq vector = 0x%x\n", data); + + if (!(data & DP_AUTOMATED_TEST_REQUEST)) { + DRM_DEBUG_DP("no test requested\n"); + return 0; + } + + /** + * Read the link request byte (Byte 0x218) to determine what type + * of automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data); + if (rlen < 0) { + DRM_ERROR("aux read failed. rlen=%zd\n", rlen); + return rlen; + } + + if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) { + DRM_DEBUG_DP("link 0x%x not supported\n", data); + goto end; + } + + DRM_DEBUG_DP("Test:(0x%x) requested\n", data); + link->request.test_requested = data; + if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { + ret = dp_link_parse_phy_test_params(link); + if (ret) + goto end; + ret = dp_link_parse_link_training_params(link); + if (ret) + goto end; + } + + if (link->request.test_requested == DP_TEST_LINK_TRAINING) { + ret = dp_link_parse_link_training_params(link); + if (ret) + goto end; + } + + if (dp_link_is_video_audio_test_requested( + link->request.test_requested)) { + ret = dp_link_parse_video_pattern_params(link); + if (ret) + goto end; + + ret = dp_link_parse_audio_pattern_params(link); + } +end: + /* + * Send a DP_TEST_ACK if all link parameters are valid, otherwise send + * a DP_TEST_NAK. + */ + if (ret) { + link->dp_link.test_response = DP_TEST_NAK; + } else { + if (link->request.test_requested != DP_TEST_LINK_EDID_READ) + link->dp_link.test_response = DP_TEST_ACK; + else + link->dp_link.test_response = + DP_TEST_EDID_CHECKSUM_WRITE; + } + + return ret; +} + +/** + * dp_link_parse_sink_count() - parses the sink count + * @dp_link: pointer to link module data + * + * Parses the DPCD to check if there is an update to the sink count + * (Byte 0x200), and whether all the sink devices connected have Content + * Protection enabled. + */ +static int dp_link_parse_sink_count(struct dp_link *dp_link) +{ + ssize_t rlen; + bool cp_ready; + + struct dp_link_private *link = container_of(dp_link, + struct dp_link_private, dp_link); + + rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT, + &link->dp_link.sink_count); + if (rlen < 0) { + DRM_ERROR("sink count read failed. rlen=%zd\n", rlen); + return rlen; + } + + cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY; + + link->dp_link.sink_count = + DP_GET_SINK_COUNT(link->dp_link.sink_count); + + DRM_DEBUG_DP("sink_count = 0x%x, cp_ready = 0x%x\n", + link->dp_link.sink_count, cp_ready); + return 0; +} + +static void dp_link_parse_sink_status_field(struct dp_link_private *link) +{ + int len = 0; + + link->prev_sink_count = link->dp_link.sink_count; + dp_link_parse_sink_count(&link->dp_link); + + len = drm_dp_dpcd_read_link_status(link->aux, + link->link_status); + if (len < DP_LINK_STATUS_SIZE) + DRM_ERROR("DP link status read failed\n"); + dp_link_parse_request(link); +} + +/** + * dp_link_process_link_training_request() - processes new training requests + * @link: Display Port link data + * + * This function will handle new link training requests that are initiated by + * the sink. In particular, it will update the requested lane count and link + * rate, and then trigger the link retraining procedure. + * + * The function will return 0 if a link training request has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_training_request(struct dp_link_private *link) +{ + if (link->request.test_requested != DP_TEST_LINK_TRAINING) + return -EINVAL; + + DRM_DEBUG_DP("Test:0x%x link rate = 0x%x, lane count = 0x%x\n", + DP_TEST_LINK_TRAINING, + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; + link->dp_link.link_params.rate = link->request.test_link_rate; + + return 0; +} + +bool dp_link_send_test_response(struct dp_link *dp_link) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return false; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE, + dp_link->test_response); + + return ret == 1; +} + +int dp_link_psm_config(struct dp_link *dp_link, + struct dp_link_info *link_info, bool enable) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + mutex_lock(&link->psm_mutex); + if (enable) + ret = dp_aux_link_power_down(link->aux, link_info); + else + ret = dp_aux_link_power_up(link->aux, link_info); + + if (ret) + DRM_ERROR("Failed to %s low power mode\n", enable ? + "enter" : "exit"); + else + dp_link->psm_enabled = enable; + + mutex_unlock(&link->psm_mutex); + return ret; +} + +bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return false; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM, + checksum); + return ret == 1; +} + +static int dp_link_parse_vx_px(struct dp_link_private *link) +{ + int ret = 0; + + DRM_DEBUG_DP("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", + drm_dp_get_adjust_request_voltage(link->link_status, 0), + drm_dp_get_adjust_request_voltage(link->link_status, 1), + drm_dp_get_adjust_request_voltage(link->link_status, 2), + drm_dp_get_adjust_request_voltage(link->link_status, 3)); + + DRM_DEBUG_DP("px: 0=%d, 1=%d, 2=%d, 3=%d\n", + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3)); + + /** + * Update the voltage and pre-emphasis levels as per DPCD request + * vector. + */ + DRM_DEBUG_DP("Current: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); + link->dp_link.phy_params.v_level = + drm_dp_get_adjust_request_voltage(link->link_status, 0); + link->dp_link.phy_params.p_level = + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); + DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); + + return ret; +} + +/** + * dp_link_process_phy_test_pattern_request() - process new phy link requests + * @link: Display Port Driver data + * + * This function will handle new phy link pattern requests that are initiated + * by the sink. The function will return 0 if a phy link pattern has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_phy_test_pattern_request( + struct dp_link_private *link) +{ + int ret = 0; + + if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { + DRM_DEBUG_DP("no phy test\n"); + return -EINVAL; + } + + if (!is_link_rate_valid(link->request.test_link_rate) || + !is_lane_count_valid(link->request.test_lane_count)) { + DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n", + link->request.test_link_rate, + link->request.test_lane_count); + return -EINVAL; + } + + DRM_DEBUG_DP("Current: rate = 0x%x, lane count = 0x%x\n", + link->dp_link.link_params.rate, + link->dp_link.link_params.num_lanes); + + DRM_DEBUG_DP("Requested: rate = 0x%x, lane count = 0x%x\n", + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; + link->dp_link.link_params.rate = link->request.test_link_rate; + + ret = dp_link_parse_vx_px(link); + + if (ret) + DRM_ERROR("parse_vx_px failed. ret=%d\n", ret); + + return ret; +} + +static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +/** + * dp_link_process_link_status_update() - processes link status updates + * @link: Display Port link module data + * + * This function will check for changes in the link status, e.g. clock + * recovery done on all lanes, and trigger link training if there is a + * failure/error on the link. + * + * The function will return 0 if the a link status update has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_status_update(struct dp_link_private *link) +{ + if (!(get_link_status(link->link_status, + DP_LANE_ALIGN_STATUS_UPDATED) & + DP_LINK_STATUS_UPDATED) || + (drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes) && + drm_dp_channel_eq_ok(link->link_status, + link->dp_link.link_params.num_lanes))) + return -EINVAL; + + DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n", + drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes), + drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes)); + + return 0; +} + +/** + * dp_link_process_downstream_port_status_change() - process port status changes + * @link: Display Port Driver data + * + * This function will handle downstream port updates that are initiated by + * the sink. If the downstream port status has changed, the EDID is read via + * AUX. + * + * The function will return 0 if a downstream port update has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +{ + if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & + DP_DOWNSTREAM_PORT_STATUS_CHANGED) + goto reset; + + if (link->prev_sink_count == link->dp_link.sink_count) + return -EINVAL; + +reset: + /* reset prev_sink_count */ + link->prev_sink_count = link->dp_link.sink_count; + + return 0; +} + +static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) + && !(link->request.test_requested & + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); +} + +static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); +} + +static void dp_link_reset_data(struct dp_link_private *link) +{ + link->request = (const struct dp_link_request){ 0 }; + link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; + link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; + link->dp_link.phy_params.phy_test_pattern_sel = 0; + link->dp_link.sink_request = 0; + link->dp_link.test_response = 0; +} + +/** + * dp_link_process_request() - handle HPD IRQ transition to HIGH + * @dp_link: pointer to link module data + * + * This function will handle the HPD IRQ state transitions from LOW to HIGH + * (including cases when there are back to back HPD IRQ HIGH) indicating + * the start of a new link training request or sink status update. + */ +int dp_link_process_request(struct dp_link *dp_link) +{ + int ret = 0; + struct dp_link_private *link; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + dp_link_reset_data(link); + + dp_link_parse_sink_status_field(link); + + if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { + dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + return ret; + } + + ret = dp_link_process_ds_port_status_change(link); + if (!ret) { + dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + return ret; + } + + ret = dp_link_process_link_training_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_TRAINING; + return ret; + } + + ret = dp_link_process_phy_test_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + return ret; + } + + ret = dp_link_process_link_status_update(link); + if (!ret) { + dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + return ret; + } + + if (dp_link_is_video_pattern_requested(link)) { + ret = 0; + dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + } + + if (dp_link_is_audio_pattern_requested(link)) { + dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + return -EINVAL; + } + + return ret; +} + +int dp_link_get_colorimetry_config(struct dp_link *dp_link) +{ + u32 cc; + struct dp_link_private *link; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* + * Unless a video pattern CTS test is ongoing, use RGB_VESA + * Only RGB_VESA and RGB_CEA supported for now + */ + if (dp_link_is_video_pattern_requested(link)) + cc = link->dp_link.test_video.test_dyn_range; + else + cc = DP_TEST_DYNAMIC_RANGE_VESA; + + return cc; +} + +int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +{ + int i; + int v_max = 0, p_max = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + /* use the max level across lanes */ + for (i = 0; i < dp_link->link_params.num_lanes; i++) { + u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i); + u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status, + i); + DRM_DEBUG_DP("lane=%d req_vol_swing=%d req_pre_emphasis=%d\n", + i, data_v, data_p); + if (v_max < data_v) + v_max = data_v; + if (p_max < data_p) + p_max = data_p; + } + + dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + + /** + * Adjust the voltage swing and pre-emphasis level combination to within + * the allowable range. + */ + if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) { + DRM_DEBUG_DP("Requested vSwingLevel=%d, change to %d\n", + dp_link->phy_params.v_level, + DP_TRAIN_VOLTAGE_SWING_MAX); + dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX; + } + + if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) { + DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n", + dp_link->phy_params.p_level, + DP_TRAIN_PRE_EMPHASIS_MAX); + dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX; + } + + if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1) + && (dp_link->phy_params.v_level == + DP_TRAIN_VOLTAGE_SWING_LVL_2)) { + DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n", + dp_link->phy_params.p_level, + DP_TRAIN_PRE_EMPHASIS_LVL_1); + dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1; + } + + DRM_DEBUG_DP("adjusted: v_level=%d, p_level=%d\n", + dp_link->phy_params.v_level, dp_link->phy_params.p_level); + + return 0; +} + +u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +{ + u32 tbd; + + /* + * Few simplistic rules and assumptions made here: + * 1. Test bit depth is bit depth per color component + * 2. Assume 3 color components + */ + switch (bpp) { + case 18: + tbd = DP_TEST_BIT_DEPTH_6; + break; + case 24: + tbd = DP_TEST_BIT_DEPTH_8; + break; + case 30: + tbd = DP_TEST_BIT_DEPTH_10; + break; + default: + tbd = DP_TEST_BIT_DEPTH_UNKNOWN; + break; + } + + if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); + + return tbd; +} + +struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) +{ + struct dp_link_private *link; + struct dp_link *dp_link; + + if (!dev || !aux) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL); + if (!link) + return ERR_PTR(-ENOMEM); + + link->dev = dev; + link->aux = aux; + + mutex_init(&link->psm_mutex); + dp_link = &link->dp_link; + + return dp_link; +} diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h new file mode 100644 index 000000000000..49811b6221e5 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_LINK_H_ +#define _DP_LINK_H_ + +#include "dp_aux.h" + +#define DS_PORT_STATUS_CHANGED 0x200 +#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF +#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) + +struct dp_link_info { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +enum dp_link_voltage_level { + DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0, + DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1, + DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2, + DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2, +}; + +enum dp_link_preemaphasis_level { + DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0, + DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1, + DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2, + DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2, +}; + +struct dp_link_test_video { + u32 test_video_pattern; + u32 test_bit_depth; + u32 test_dyn_range; + u32 test_h_total; + u32 test_v_total; + u32 test_h_start; + u32 test_v_start; + u32 test_hsync_pol; + u32 test_hsync_width; + u32 test_vsync_pol; + u32 test_vsync_width; + u32 test_h_width; + u32 test_v_height; + u32 test_rr_d; + u32 test_rr_n; +}; + +struct dp_link_test_audio { + u32 test_audio_sampling_rate; + u32 test_audio_channel_count; + u32 test_audio_pattern_type; + u32 test_audio_period_ch_1; + u32 test_audio_period_ch_2; + u32 test_audio_period_ch_3; + u32 test_audio_period_ch_4; + u32 test_audio_period_ch_5; + u32 test_audio_period_ch_6; + u32 test_audio_period_ch_7; + u32 test_audio_period_ch_8; +}; + +struct dp_link_phy_params { + u32 phy_test_pattern_sel; + u8 v_level; + u8 p_level; +}; + +struct dp_link { + u32 sink_request; + u32 test_response; + bool psm_enabled; + + u8 sink_count; + struct dp_link_test_video test_video; + struct dp_link_test_audio test_audio; + struct dp_link_phy_params phy_params; + struct dp_link_info link_params; +}; + +/** + * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp + * @tbd: test bit depth + * + * Returns the bits per pixel (bpp) to be used corresponding to the + * git bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +{ + /* + * Few simplistic rules and assumptions made here: + * 1. Bit depth is per color component + * 2. If bit depth is unknown return 0 + * 3. Assume 3 color components + */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return 18; + case DP_TEST_BIT_DEPTH_8: + return 24; + case DP_TEST_BIT_DEPTH_10: + return 30; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return 0; + } +} + +/** + * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc + * @tbd: test bit depth + * + * Returns the bits per comp (bpc) to be used corresponding to the + * bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpc(u32 tbd) +{ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return 6; + case DP_TEST_BIT_DEPTH_8: + return 8; + case DP_TEST_BIT_DEPTH_10: + return 10; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return 0; + } +} + +u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); +int dp_link_process_request(struct dp_link *dp_link); +int dp_link_get_colorimetry_config(struct dp_link *dp_link); +int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status); +bool dp_link_send_test_response(struct dp_link *dp_link); +int dp_link_psm_config(struct dp_link *dp_link, + struct dp_link_info *link_info, bool enable); +bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum); + +/** + * dp_link_get() - get the functionalities of dp test module + * + * + * return: a pointer to dp_link struct + */ +struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux); + +#endif /* _DP_LINK_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c new file mode 100644 index 000000000000..18cec4fc5e0b --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include "dp_panel.h" + +#include <drm/drm_connector.h> +#include <drm/drm_edid.h> +#include <drm/drm_print.h> + +struct dp_panel_private { + struct device *dev; + struct dp_panel dp_panel; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_catalog *catalog; + bool panel_on; + bool aux_cfg_update_done; +}; + +static int dp_panel_read_dpcd(struct dp_panel *dp_panel) +{ + int rc = 0; + size_t len; + ssize_t rlen; + struct dp_panel_private *panel; + struct dp_link_info *link_info; + u8 *dpcd, major = 0, minor = 0, temp; + u32 offset = DP_DPCD_REV; + + dpcd = dp_panel->dpcd; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + link_info = &dp_panel->link_info; + + rlen = drm_dp_dpcd_read(panel->aux, offset, + dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL]; + + /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */ + if (temp & BIT(7)) { + DRM_DEBUG_DP("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n"); + offset = DPRX_EXTENDED_DPCD_FIELD; + } + + rlen = drm_dp_dpcd_read(panel->aux, offset, + dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + link_info->revision = dpcd[DP_DPCD_REV]; + major = (link_info->revision >> 4) & 0x0f; + minor = link_info->revision & 0x0f; + + link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); + link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + + if (link_info->num_lanes > dp_panel->max_dp_lanes) + link_info->num_lanes = dp_panel->max_dp_lanes; + + /* Limit support upto HBR2 until HBR3 support is added */ + if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4))) + link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4); + + DRM_DEBUG_DP("version: %d.%d\n", major, minor); + DRM_DEBUG_DP("link_rate=%d\n", link_info->rate); + DRM_DEBUG_DP("lane_count=%d\n", link_info->num_lanes); + + if (drm_dp_enhanced_frame_cap(dpcd)) + link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT]; + dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT; + + if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) { + dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT]; + dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK; + len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE; + + rlen = drm_dp_dpcd_read(panel->aux, + DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len); + if (rlen < len) { + DRM_ERROR("ds port status failed, rlen=%zd\n", rlen); + rc = -EINVAL; + goto end; + } + } + +end: + return rc; +} + +static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_link_info *link_info; + const u32 max_supported_bpp = 30, min_supported_bpp = 18; + u32 bpp = 0, data_rate_khz = 0; + + bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); + + link_info = &dp_panel->link_info; + data_rate_khz = link_info->num_lanes * link_info->rate * 8; + + while (bpp > min_supported_bpp) { + if (mode_pclk_khz * bpp <= data_rate_khz) + break; + bpp -= 6; + } + + return bpp; +} + +static int dp_panel_update_modes(struct drm_connector *connector, + struct edid *edid) +{ + int rc = 0; + + if (edid) { + rc = drm_connector_update_edid_property(connector, edid); + if (rc) { + DRM_ERROR("failed to update edid property %d\n", rc); + return rc; + } + rc = drm_add_edid_modes(connector, edid); + DRM_DEBUG_DP("%s -", __func__); + return rc; + } + + rc = drm_connector_update_edid_property(connector, NULL); + if (rc) + DRM_ERROR("failed to update edid property %d\n", rc); + + return rc; +} + +int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector) +{ + int rc = 0, bw_code; + int rlen, count; + struct dp_panel_private *panel; + + if (!dp_panel || !connector) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rc = dp_panel_read_dpcd(dp_panel); + bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + if (rc || !is_link_rate_valid(bw_code) || + !is_lane_count_valid(dp_panel->link_info.num_lanes) || + (bw_code > dp_panel->max_bw_code)) { + DRM_ERROR("read dpcd failed %d\n", rc); + return rc; + } + + if (dp_panel->dfp_present) { + rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT, + &count, 1); + if (rlen == 1) { + count = DP_GET_SINK_COUNT(count); + if (!count) { + DRM_ERROR("no downstream ports connected\n"); + panel->link->sink_count = 0; + rc = -ENOTCONN; + goto end; + } + } + } + + kfree(dp_panel->edid); + dp_panel->edid = NULL; + + dp_panel->edid = drm_get_edid(connector, + &panel->aux->ddc); + if (!dp_panel->edid) { + DRM_ERROR("panel edid read failed\n"); + + /* fail safe edid */ + mutex_lock(&connector->dev->mode_config.mutex); + if (drm_add_modes_noedid(connector, 640, 480)) + drm_set_preferred_mode(connector, 640, 480); + mutex_unlock(&connector->dev->mode_config.mutex); + } + + if (panel->aux_cfg_update_done) { + DRM_DEBUG_DP("read DPCD with updated AUX config\n"); + rc = dp_panel_read_dpcd(dp_panel); + bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + if (rc || !is_link_rate_valid(bw_code) || + !is_lane_count_valid(dp_panel->link_info.num_lanes) + || (bw_code > dp_panel->max_bw_code)) { + DRM_ERROR("read dpcd failed %d\n", rc); + return rc; + } + panel->aux_cfg_update_done = false; + } +end: + return rc; +} + +u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_panel_private *panel; + u32 bpp = mode_edid_bpp; + + if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + DRM_ERROR("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->video_test) + bpp = dp_link_bit_depth_to_bpp( + panel->link->test_video.test_bit_depth); + else + bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + mode_pclk_khz); + + return bpp; +} + +int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode) +{ + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + if (dp_panel->edid) + return dp_panel_update_modes(connector, dp_panel->edid); + + return 0; +} + +static u8 dp_panel_get_edid_checksum(struct edid *edid) +{ + struct edid *last_block; + u8 *raw_edid; + bool is_edid_corrupt; + + if (!edid) { + DRM_ERROR("invalid edid input\n"); + return 0; + } + + raw_edid = (u8 *)edid; + raw_edid += (edid->extensions * EDID_LENGTH); + last_block = (struct edid *)raw_edid; + + /* block type extension */ + drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt); + if (!is_edid_corrupt) + return last_block->checksum; + + DRM_ERROR("Invalid block, no checksum\n"); + return 0; +} + +void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { + u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid); + + dp_link_send_edid_checksum(panel->link, checksum); + dp_link_send_test_response(panel->link); + } +} + +void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) +{ + struct dp_catalog *catalog; + struct dp_panel_private *panel; + + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + if (!panel->panel_on) { + DRM_DEBUG_DP("DP panel not enabled, handle TPG on next on\n"); + return; + } + + if (!enable) { + dp_catalog_panel_tpg_disable(catalog); + return; + } + + DRM_DEBUG_DP("%s: calling catalog tpg_enable\n", __func__); + dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode); +} + +void dp_panel_dump_regs(struct dp_panel *dp_panel) +{ + struct dp_catalog *catalog; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + dp_catalog_dump_regs(catalog); +} + +int dp_panel_timing_cfg(struct dp_panel *dp_panel) +{ + int rc = 0; + u32 data, total_ver, total_hor; + struct dp_catalog *catalog; + struct dp_panel_private *panel; + struct drm_display_mode *drm_mode; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + drm_mode = &panel->dp_panel.dp_mode.drm_mode; + + DRM_DEBUG_DP("width=%d hporch= %d %d %d\n", + drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end, + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->hsync_end - drm_mode->hsync_start); + + DRM_DEBUG_DP("height=%d vporch= %d %d %d\n", + drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end, + drm_mode->vsync_start - drm_mode->vdisplay, + drm_mode->vsync_end - drm_mode->vsync_start); + + total_hor = drm_mode->htotal; + + total_ver = drm_mode->vtotal; + + data = total_ver; + data <<= 16; + data |= total_hor; + + catalog->total = data; + + data = (drm_mode->vtotal - drm_mode->vsync_start); + data <<= 16; + data |= (drm_mode->htotal - drm_mode->hsync_start); + + catalog->sync_start = data; + + data = drm_mode->vsync_end - drm_mode->vsync_start; + data <<= 16; + data |= (panel->dp_panel.dp_mode.v_active_low << 31); + data |= drm_mode->hsync_end - drm_mode->hsync_start; + data |= (panel->dp_panel.dp_mode.h_active_low << 15); + + catalog->width_blanking = data; + + data = drm_mode->vdisplay; + data <<= 16; + data |= drm_mode->hdisplay; + + catalog->dp_active = data; + + dp_catalog_panel_timing_cfg(catalog); + panel->panel_on = true; + + return rc; +} + +int dp_panel_init_panel_info(struct dp_panel *dp_panel) +{ + int rc = 0; + struct drm_display_mode *drm_mode; + + drm_mode = &dp_panel->dp_mode.drm_mode; + + /* + * print resolution info as this is a result + * of user initiated action of cable connection + */ + DRM_DEBUG_DP("SET NEW RESOLUTION:\n"); + DRM_DEBUG_DP("%dx%d@%dfps\n", drm_mode->hdisplay, + drm_mode->vdisplay, drm_mode_vrefresh(drm_mode)); + DRM_DEBUG_DP("h_porches(back|front|width) = (%d|%d|%d)\n", + drm_mode->htotal - drm_mode->hsync_end, + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->hsync_end - drm_mode->hsync_start); + DRM_DEBUG_DP("v_porches(back|front|width) = (%d|%d|%d)\n", + drm_mode->vtotal - drm_mode->vsync_end, + drm_mode->vsync_start - drm_mode->vdisplay, + drm_mode->vsync_end - drm_mode->vsync_start); + DRM_DEBUG_DP("pixel clock (KHz)=(%d)\n", drm_mode->clock); + DRM_DEBUG_DP("bpp = %d\n", dp_panel->dp_mode.bpp); + + dp_panel->dp_mode.bpp = max_t(u32, 18, + min_t(u32, dp_panel->dp_mode.bpp, 30)); + DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp); + + return rc; +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in) +{ + struct dp_panel_private *panel; + struct dp_panel *dp_panel; + + if (!in->dev || !in->catalog || !in->aux || !in->link) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL); + if (!panel) + return ERR_PTR(-ENOMEM); + + panel->dev = in->dev; + panel->aux = in->aux; + panel->catalog = in->catalog; + panel->link = in->link; + + dp_panel = &panel->dp_panel; + dp_panel->max_bw_code = DP_LINK_BW_8_1; + panel->aux_cfg_update_done = false; + + return dp_panel; +} + +void dp_panel_put(struct dp_panel *dp_panel) +{ + if (!dp_panel) + return; + + kfree(dp_panel->edid); +} diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h new file mode 100644 index 000000000000..9023e5bb4b8b --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PANEL_H_ +#define _DP_PANEL_H_ + +#include <drm/msm_drm.h> + +#include "dp_aux.h" +#include "dp_link.h" +#include "dp_hpd.h" + +struct edid; + +#define DPRX_EXTENDED_DPCD_FIELD 0x2200 + +#define DP_DOWNSTREAM_PORTS 4 +#define DP_DOWNSTREAM_CAP_SIZE 4 + +struct dp_display_mode { + struct drm_display_mode drm_mode; + u32 capabilities; + u32 bpp; + u32 h_active_low; + u32 v_active_low; +}; + +struct dp_panel_in { + struct device *dev; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_catalog *catalog; +}; + +struct dp_panel { + /* dpcd raw data */ + u8 dpcd[DP_RECEIVER_CAP_SIZE + 1]; + u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE]; + u32 ds_port_cnt; + u32 dfp_present; + + struct dp_link_info link_info; + struct drm_dp_desc desc; + struct edid *edid; + struct drm_connector *connector; + struct dp_display_mode dp_mode; + bool video_test; + + u32 vic; + u32 max_pclk_khz; + u32 max_dp_lanes; + + u32 max_bw_code; +}; + +int dp_panel_init_panel_info(struct dp_panel *dp_panel); +int dp_panel_deinit(struct dp_panel *dp_panel); +int dp_panel_timing_cfg(struct dp_panel *dp_panel); +void dp_panel_dump_regs(struct dp_panel *dp_panel); +int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector); +u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, + u32 mode_pclk_khz); +int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode); +void dp_panel_handle_sink_request(struct dp_panel *dp_panel); +void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable); + +/** + * is_link_rate_valid() - validates the link rate + * @lane_rate: link rate requested by the sink + * + * Returns true if the requested link rate is supported. + */ +static inline bool is_link_rate_valid(u32 bw_code) +{ + return (bw_code == DP_LINK_BW_1_62 || + bw_code == DP_LINK_BW_2_7 || + bw_code == DP_LINK_BW_5_4 || + bw_code == DP_LINK_BW_8_1); +} + +/** + * dp_link_is_lane_count_valid() - validates the lane count + * @lane_count: lane count requested by the sink + * + * Returns true if the requested lane count is supported. + */ +static inline bool is_lane_count_valid(u32 lane_count) +{ + return (lane_count == 1 || + lane_count == 2 || + lane_count == 4); +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in); +void dp_panel_put(struct dp_panel *dp_panel); +#endif /* _DP_PANEL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c new file mode 100644 index 000000000000..0519dd3ac3c3 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/of_gpio.h> +#include <linux/phy/phy.h> + +#include <drm/drm_print.h> + +#include "dp_parser.h" +#include "dp_reg.h" + +static const struct dp_regulator_cfg sdm845_dp_reg_cfg = { + .num = 2, + .regs = { + {"vdda-1p2", 21800, 4 }, /* 1.2 V */ + {"vdda-0p9", 36000, 32 }, /* 0.9 V */ + }, +}; + +static int msm_dss_ioremap(struct platform_device *pdev, + struct dss_io_data *io_data) +{ + struct resource *res = NULL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + DRM_ERROR("%pS->%s: msm_dss_get_res failed\n", + __builtin_return_address(0), __func__); + return -ENODEV; + } + + io_data->len = (u32)resource_size(res); + io_data->base = ioremap(res->start, io_data->len); + if (!io_data->base) { + DRM_ERROR("%pS->%s: ioremap failed\n", + __builtin_return_address(0), __func__); + return -EIO; + } + + return 0; +} + +static void msm_dss_iounmap(struct dss_io_data *io_data) +{ + if (io_data->base) { + iounmap(io_data->base); + io_data->base = NULL; + } + io_data->len = 0; +} + +static void dp_parser_unmap_io_resources(struct dp_parser *parser) +{ + struct dp_io *io = &parser->io; + + msm_dss_iounmap(&io->dp_controller); +} + +static int dp_parser_ctrl_res(struct dp_parser *parser) +{ + int rc = 0; + struct platform_device *pdev = parser->pdev; + struct dp_io *io = &parser->io; + + rc = msm_dss_ioremap(pdev, &io->dp_controller); + if (rc) { + DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc); + goto err; + } + + io->phy = devm_phy_get(&pdev->dev, "dp"); + if (IS_ERR(io->phy)) { + rc = PTR_ERR(io->phy); + goto err; + } + + return 0; +err: + dp_parser_unmap_io_resources(parser); + return rc; +} + +static int dp_parser_misc(struct dp_parser *parser) +{ + struct device_node *of_node = parser->pdev->dev.of_node; + int len = 0; + const char *data_lane_property = "data-lanes"; + + len = of_property_count_elems_of_size(of_node, + data_lane_property, sizeof(u32)); + if (len < 0) { + DRM_WARN("Invalid property %s, default max DP lanes = %d\n", + data_lane_property, DP_MAX_NUM_DP_LANES); + len = DP_MAX_NUM_DP_LANES; + } + + parser->max_dp_lanes = len; + return 0; +} + +static inline bool dp_parser_check_prefix(const char *clk_prefix, + const char *clk_name) +{ + return !strncmp(clk_prefix, clk_name, strlen(clk_prefix)); +} + +static int dp_parser_init_clk_data(struct dp_parser *parser) +{ + int num_clk, i, rc; + int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM]; + struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM]; + + num_clk = of_property_count_strings(dev->of_node, "clock-names"); + if (num_clk <= 0) { + DRM_ERROR("no clocks are defined\n"); + return -EINVAL; + } + + for (i = 0; i < num_clk; i++) { + rc = of_property_read_string_index(dev->of_node, + "clock-names", i, &clk_name); + if (rc < 0) + return rc; + + if (dp_parser_check_prefix("core", clk_name)) + core_clk_count++; + + if (dp_parser_check_prefix("ctrl", clk_name)) + ctrl_clk_count++; + + if (dp_parser_check_prefix("stream", clk_name)) + stream_clk_count++; + } + + /* Initialize the CORE power module */ + if (core_clk_count == 0) { + DRM_ERROR("no core clocks are defined\n"); + return -EINVAL; + } + + core_power->num_clk = core_clk_count; + core_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * core_power->num_clk, + GFP_KERNEL); + if (!core_power->clk_config) + return -EINVAL; + + /* Initialize the CTRL power module */ + if (ctrl_clk_count == 0) { + DRM_ERROR("no ctrl clocks are defined\n"); + return -EINVAL; + } + + ctrl_power->num_clk = ctrl_clk_count; + ctrl_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * ctrl_power->num_clk, + GFP_KERNEL); + if (!ctrl_power->clk_config) { + ctrl_power->num_clk = 0; + return -EINVAL; + } + + /* Initialize the STREAM power module */ + if (stream_clk_count == 0) { + DRM_ERROR("no stream (pixel) clocks are defined\n"); + return -EINVAL; + } + + stream_power->num_clk = stream_clk_count; + stream_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * stream_power->num_clk, + GFP_KERNEL); + if (!stream_power->clk_config) { + stream_power->num_clk = 0; + return -EINVAL; + } + + return 0; +} + +static int dp_parser_clock(struct dp_parser *parser) +{ + int rc = 0, i = 0; + int num_clk = 0; + int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0; + int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM]; + struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM]; + + rc = dp_parser_init_clk_data(parser); + if (rc) { + DRM_ERROR("failed to initialize power data %d\n", rc); + return -EINVAL; + } + + core_clk_count = core_power->num_clk; + ctrl_clk_count = ctrl_power->num_clk; + stream_clk_count = stream_power->num_clk; + + num_clk = core_clk_count + ctrl_clk_count + stream_clk_count; + + for (i = 0; i < num_clk; i++) { + rc = of_property_read_string_index(dev->of_node, "clock-names", + i, &clk_name); + if (rc) { + DRM_ERROR("error reading clock-names %d\n", rc); + return rc; + } + if (dp_parser_check_prefix("core", clk_name) && + core_clk_index < core_clk_count) { + struct dss_clk *clk = + &core_power->clk_config[core_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + clk->type = DSS_CLK_AHB; + core_clk_index++; + } else if (dp_parser_check_prefix("stream", clk_name) && + stream_clk_index < stream_clk_count) { + struct dss_clk *clk = + &stream_power->clk_config[stream_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + clk->type = DSS_CLK_PCLK; + stream_clk_index++; + } else if (dp_parser_check_prefix("ctrl", clk_name) && + ctrl_clk_index < ctrl_clk_count) { + struct dss_clk *clk = + &ctrl_power->clk_config[ctrl_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + ctrl_clk_index++; + if (dp_parser_check_prefix("ctrl_link", clk_name) || + dp_parser_check_prefix("stream_pixel", clk_name)) + clk->type = DSS_CLK_PCLK; + else + clk->type = DSS_CLK_AHB; + } + } + + DRM_DEBUG_DP("clock parsing successful\n"); + + return 0; +} + +static int dp_parser_parse(struct dp_parser *parser) +{ + int rc = 0; + + if (!parser) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + rc = dp_parser_ctrl_res(parser); + if (rc) + return rc; + + rc = dp_parser_misc(parser); + if (rc) + return rc; + + rc = dp_parser_clock(parser); + if (rc) + return rc; + + /* Map the corresponding regulator information according to + * version. Currently, since we only have one supported platform, + * mapping the regulator directly. + */ + parser->regulator_cfg = &sdm845_dp_reg_cfg; + + return 0; +} + +struct dp_parser *dp_parser_get(struct platform_device *pdev) +{ + struct dp_parser *parser; + + parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL); + if (!parser) + return ERR_PTR(-ENOMEM); + + parser->parse = dp_parser_parse; + parser->pdev = pdev; + + return parser; +} diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h new file mode 100644 index 000000000000..34b49628bbaf --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_parser.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PARSER_H_ +#define _DP_PARSER_H_ + +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-dp.h> + +#include "dpu_io_util.h" +#include "msm_drv.h" + +#define DP_LABEL "MDSS DP DISPLAY" +#define DP_MAX_PIXEL_CLK_KHZ 675000 +#define DP_MAX_NUM_DP_LANES 4 + +enum dp_pm_type { + DP_CORE_PM, + DP_CTRL_PM, + DP_STREAM_PM, + DP_PHY_PM, + DP_MAX_PM +}; + +struct dss_io_data { + u32 len; + void __iomem *base; +}; + +static inline const char *dp_parser_pm_name(enum dp_pm_type module) +{ + switch (module) { + case DP_CORE_PM: return "DP_CORE_PM"; + case DP_CTRL_PM: return "DP_CTRL_PM"; + case DP_STREAM_PM: return "DP_STREAM_PM"; + case DP_PHY_PM: return "DP_PHY_PM"; + default: return "???"; + } +} + +/** + * struct dp_display_data - display related device tree data. + * + * @ctrl_node: referece to controller device + * @phy_node: reference to phy device + * @is_active: is the controller currently active + * @name: name of the display + * @display_type: type of the display + */ +struct dp_display_data { + struct device_node *ctrl_node; + struct device_node *phy_node; + bool is_active; + const char *name; + const char *display_type; +}; + +/** + * struct dp_ctrl_resource - controller's IO related data + * + * @dp_controller: Display Port controller mapped memory address + * @phy_io: phy's mapped memory address + */ +struct dp_io { + struct dss_io_data dp_controller; + struct phy *phy; + union phy_configure_opts phy_opts; +}; + +/** + * struct dp_pinctrl - DP's pin control + * + * @pin: pin-controller's instance + * @state_active: active state pin control + * @state_hpd_active: hpd active state pin control + * @state_suspend: suspend state pin control + */ +struct dp_pinctrl { + struct pinctrl *pin; + struct pinctrl_state *state_active; + struct pinctrl_state *state_hpd_active; + struct pinctrl_state *state_suspend; +}; + +#define DP_DEV_REGULATOR_MAX 4 + +/* Regulators for DP devices */ +struct dp_reg_entry { + char name[32]; + int enable_load; + int disable_load; +}; + +struct dp_regulator_cfg { + int num; + struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX]; +}; + +/** + * struct dp_parser - DP parser's data exposed to clients + * + * @pdev: platform data of the client + * @mp: gpio, regulator and clock related data + * @pinctrl: pin-control related data + * @disp_data: controller's display related data + * @parse: function to be called by client to parse device tree. + */ +struct dp_parser { + struct platform_device *pdev; + struct dss_module_power mp[DP_MAX_PM]; + struct dp_pinctrl pinctrl; + struct dp_io io; + struct dp_display_data disp_data; + const struct dp_regulator_cfg *regulator_cfg; + u32 max_dp_lanes; + + int (*parse)(struct dp_parser *parser); +}; + +/** + * dp_parser_get() - get the DP's device tree parser module + * + * @pdev: platform data of the client + * return: pointer to dp_parser structure. + * + * This function provides client capability to parse the + * device tree and populate the data structures. The data + * related to clock, regulators, pin-control and other + * can be parsed using this module. + */ +struct dp_parser *dp_parser_get(struct platform_device *pdev); + +#endif diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c new file mode 100644 index 000000000000..17c1fc6a2d44 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_power.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/regulator/consumer.h> +#include "dp_power.h" +#include "msm_drv.h" + +struct dp_power_private { + struct dp_parser *parser; + struct platform_device *pdev; + struct clk *link_clk_src; + struct clk *pixel_provider; + struct clk *link_provider; + struct regulator_bulk_data supplies[DP_DEV_REGULATOR_MAX]; + + struct dp_power dp_power; +}; + +static void dp_power_regulator_disable(struct dp_power_private *power) +{ + struct regulator_bulk_data *s = power->supplies; + const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs; + int num = power->parser->regulator_cfg->num; + int i; + + DBG(""); + for (i = num - 1; i >= 0; i--) + if (regs[i].disable_load >= 0) + regulator_set_load(s[i].consumer, + regs[i].disable_load); + + regulator_bulk_disable(num, s); +} + +static int dp_power_regulator_enable(struct dp_power_private *power) +{ + struct regulator_bulk_data *s = power->supplies; + const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs; + int num = power->parser->regulator_cfg->num; + int ret, i; + + DBG(""); + for (i = 0; i < num; i++) { + if (regs[i].enable_load >= 0) { + ret = regulator_set_load(s[i].consumer, + regs[i].enable_load); + if (ret < 0) { + pr_err("regulator %d set op mode failed, %d\n", + i, ret); + goto fail; + } + } + } + + ret = regulator_bulk_enable(num, s); + if (ret < 0) { + pr_err("regulator enable failed, %d\n", ret); + goto fail; + } + + return 0; + +fail: + for (i--; i >= 0; i--) + regulator_set_load(s[i].consumer, regs[i].disable_load); + return ret; +} + +static int dp_power_regulator_init(struct dp_power_private *power) +{ + struct regulator_bulk_data *s = power->supplies; + const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs; + struct platform_device *pdev = power->pdev; + int num = power->parser->regulator_cfg->num; + int i, ret; + + for (i = 0; i < num; i++) + s[i].supply = regs[i].name; + + ret = devm_regulator_bulk_get(&pdev->dev, num, s); + if (ret < 0) { + pr_err("%s: failed to init regulator, ret=%d\n", + __func__, ret); + return ret; + } + + return 0; +} + +static int dp_power_clk_init(struct dp_power_private *power) +{ + int rc = 0; + struct dss_module_power *core, *ctrl, *stream; + struct device *dev = &power->pdev->dev; + + core = &power->parser->mp[DP_CORE_PM]; + ctrl = &power->parser->mp[DP_CTRL_PM]; + stream = &power->parser->mp[DP_STREAM_PM]; + + rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + return rc; + } + + rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CTRL_PM), rc); + msm_dss_put_clk(core->clk_config, core->num_clk); + return -ENODEV; + } + + rc = msm_dss_get_clk(dev, stream->clk_config, stream->num_clk); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CTRL_PM), rc); + msm_dss_put_clk(core->clk_config, core->num_clk); + return -ENODEV; + } + + return 0; +} + +static int dp_power_clk_deinit(struct dp_power_private *power) +{ + struct dss_module_power *core, *ctrl, *stream; + + core = &power->parser->mp[DP_CORE_PM]; + ctrl = &power->parser->mp[DP_CTRL_PM]; + stream = &power->parser->mp[DP_STREAM_PM]; + + if (!core || !ctrl || !stream) { + DRM_ERROR("invalid power_data\n"); + return -EINVAL; + } + + msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk); + msm_dss_put_clk(core->clk_config, core->num_clk); + msm_dss_put_clk(stream->clk_config, stream->num_clk); + return 0; +} + +static int dp_power_clk_set_rate(struct dp_power_private *power, + enum dp_pm_type module, bool enable) +{ + int rc = 0; + struct dss_module_power *mp = &power->parser->mp[module]; + + if (enable) { + rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (rc) { + DRM_ERROR("failed to set clks rate.\n"); + return rc; + } + } + + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable); + if (rc) { + DRM_ERROR("failed to %d clks, err: %d\n", enable, rc); + return rc; + } + + return 0; +} + +int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type) +{ + if (pm_type == DP_CORE_PM) + return dp_power->core_clks_on; + + if (pm_type == DP_CTRL_PM) + return dp_power->link_clks_on; + + if (pm_type == DP_STREAM_PM) + return dp_power->stream_clks_on; + + return 0; +} + +int dp_power_clk_enable(struct dp_power *dp_power, + enum dp_pm_type pm_type, bool enable) +{ + int rc = 0; + struct dp_power_private *power; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM && + pm_type != DP_STREAM_PM) { + DRM_ERROR("unsupported power module: %s\n", + dp_parser_pm_name(pm_type)); + return -EINVAL; + } + + if (enable) { + if (pm_type == DP_CORE_PM && dp_power->core_clks_on) { + DRM_DEBUG_DP("core clks already enabled\n"); + return 0; + } + + if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) { + DRM_DEBUG_DP("links clks already enabled\n"); + return 0; + } + + if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) { + DRM_DEBUG_DP("pixel clks already enabled\n"); + return 0; + } + + if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) { + DRM_DEBUG_DP("Enable core clks before link clks\n"); + + rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable); + if (rc) { + DRM_ERROR("fail to enable clks: %s. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + return rc; + } + dp_power->core_clks_on = true; + } + } + + rc = dp_power_clk_set_rate(power, pm_type, enable); + if (rc) { + DRM_ERROR("failed to '%s' clks for: %s. err=%d\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type), rc); + return rc; + } + + if (pm_type == DP_CORE_PM) + dp_power->core_clks_on = enable; + else if (pm_type == DP_STREAM_PM) + dp_power->stream_clks_on = enable; + else + dp_power->link_clks_on = enable; + + DRM_DEBUG_DP("%s clocks for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type)); + DRM_DEBUG_DP("strem_clks:%s link_clks:%s core_clks:%s\n", + dp_power->stream_clks_on ? "on" : "off", + dp_power->link_clks_on ? "on" : "off", + dp_power->core_clks_on ? "on" : "off"); + + return 0; +} + +int dp_power_client_init(struct dp_power *dp_power) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + pm_runtime_enable(&power->pdev->dev); + + rc = dp_power_regulator_init(power); + if (rc) { + DRM_ERROR("failed to init regulators %d\n", rc); + goto error; + } + + rc = dp_power_clk_init(power); + if (rc) { + DRM_ERROR("failed to init clocks %d\n", rc); + goto error; + } + return 0; + +error: + pm_runtime_disable(&power->pdev->dev); + return rc; +} + +void dp_power_client_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + dp_power_clk_deinit(power); + pm_runtime_disable(&power->pdev->dev); + +} + +int dp_power_init(struct dp_power *dp_power, bool flip) +{ + int rc = 0; + struct dp_power_private *power = NULL; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + pm_runtime_get_sync(&power->pdev->dev); + rc = dp_power_regulator_enable(power); + if (rc) { + DRM_ERROR("failed to enable regulators, %d\n", rc); + goto exit; + } + + rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true); + if (rc) { + DRM_ERROR("failed to enable DP core clocks, %d\n", rc); + goto err_clk; + } + + return 0; + +err_clk: + dp_power_regulator_disable(power); +exit: + pm_runtime_put_sync(&power->pdev->dev); + return rc; +} + +int dp_power_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + dp_power_clk_enable(dp_power, DP_CORE_PM, false); + dp_power_regulator_disable(power); + pm_runtime_put_sync(&power->pdev->dev); + return 0; +} + +struct dp_power *dp_power_get(struct dp_parser *parser) +{ + struct dp_power_private *power; + struct dp_power *dp_power; + + if (!parser) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL); + if (!power) + return ERR_PTR(-ENOMEM); + + power->parser = parser; + power->pdev = parser->pdev; + + dp_power = &power->dp_power; + + return dp_power; +} diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h new file mode 100644 index 000000000000..76743d755833 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_power.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_POWER_H_ +#define _DP_POWER_H_ + +#include "dp_parser.h" + +/** + * sruct dp_power - DisplayPort's power related data + * + * @init: initializes the regulators/core clocks/GPIOs/pinctrl + * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl + * @clk_enable: enable/disable the DP clocks + * @set_pixel_clk_parent: set the parent of DP pixel clock + */ +struct dp_power { + bool core_clks_on; + bool link_clks_on; + bool stream_clks_on; +}; + +/** + * dp_power_init() - enable power supplies for display controller + * + * @power: instance of power module + * @flip: bool for flipping gpio direction + * return: 0 if success or error if failure. + * + * This API will turn on the regulators and configures gpio's + * aux/hpd. + */ +int dp_power_init(struct dp_power *power, bool flip); + +/** + * dp_power_deinit() - turn off regulators and gpios. + * + * @power: instance of power module + * return: 0 for success + * + * This API turns off power and regulators. + */ +int dp_power_deinit(struct dp_power *power); + +/** + * dp_power_clk_status() - display controller clocks status + * + * @power: instance of power module + * @pm_type: type of pm, core/ctrl/phy + * return: status of power clocks + * + * This API return status of DP clocks + */ + +int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type); + +/** + * dp_power_clk_enable() - enable display controller clocks + * + * @power: instance of power module + * @pm_type: type of pm, core/ctrl/phy + * @enable: enables or disables + * return: pointer to allocated power module data + * + * This API will call setrate and enable for DP clocks + */ + +int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type, + bool enable); + +/** + * dp_power_client_init() - initialize clock and regulator modules + * + * @power: instance of power module + * return: 0 for success, error for failure. + * + * This API will configure the DisplayPort's clocks and regulator + * modules. + */ +int dp_power_client_init(struct dp_power *power); + +/** + * dp_power_clinet_deinit() - de-initialize clock and regulator modules + * + * @power: instance of power module + * return: 0 for success, error for failure. + * + * This API will de-initialize the DisplayPort's clocks and regulator + * modueles. + */ +void dp_power_client_deinit(struct dp_power *power); + +/** + * dp_power_get() - configure and get the DisplayPort power module data + * + * @parser: instance of parser module + * return: pointer to allocated power module data + * + * This API will configure the DisplayPort's power module and provides + * methods to be called by the client to configure the power related + * modueles. + */ +struct dp_power *dp_power_get(struct dp_parser *parser); + +#endif /* _DP_POWER_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h new file mode 100644 index 000000000000..43042ff90a19 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_REG_H_ +#define _DP_REG_H_ + +/* DP_TX Registers */ +#define REG_DP_HW_VERSION (0x00000000) + +#define REG_DP_SW_RESET (0x00000010) +#define DP_SW_RESET (0x00000001) + +#define REG_DP_PHY_CTRL (0x00000014) +#define DP_PHY_CTRL_SW_RESET_PLL (0x00000001) +#define DP_PHY_CTRL_SW_RESET (0x00000004) + +#define REG_DP_CLK_CTRL (0x00000018) +#define REG_DP_CLK_ACTIVE (0x0000001C) +#define REG_DP_INTR_STATUS (0x00000020) +#define REG_DP_INTR_STATUS2 (0x00000024) +#define REG_DP_INTR_STATUS3 (0x00000028) + +#define REG_DP_DP_HPD_CTRL (0x00000000) +#define DP_DP_HPD_CTRL_HPD_EN (0x00000001) + +#define REG_DP_DP_HPD_INT_STATUS (0x00000004) + +#define REG_DP_DP_HPD_INT_ACK (0x00000008) +#define DP_DP_HPD_PLUG_INT_ACK (0x00000001) +#define DP_DP_IRQ_HPD_INT_ACK (0x00000002) +#define DP_DP_HPD_REPLUG_INT_ACK (0x00000004) +#define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008) + +#define REG_DP_DP_HPD_INT_MASK (0x0000000C) +#define DP_DP_HPD_PLUG_INT_MASK (0x00000001) +#define DP_DP_IRQ_HPD_INT_MASK (0x00000002) +#define DP_DP_HPD_REPLUG_INT_MASK (0x00000004) +#define DP_DP_HPD_UNPLUG_INT_MASK (0x00000008) +#define DP_DP_HPD_INT_MASK (DP_DP_HPD_PLUG_INT_MASK | \ + DP_DP_IRQ_HPD_INT_MASK | \ + DP_DP_HPD_REPLUG_INT_MASK | \ + DP_DP_HPD_UNPLUG_INT_MASK) +#define DP_DP_HPD_STATE_STATUS_CONNECTED (0x40000000) +#define DP_DP_HPD_STATE_STATUS_PENDING (0x20000000) +#define DP_DP_HPD_STATE_STATUS_DISCONNECTED (0x00000000) +#define DP_DP_HPD_STATE_STATUS_MASK (0xE0000000) + +#define REG_DP_DP_HPD_REFTIMER (0x00000018) +#define DP_DP_HPD_REFTIMER_ENABLE (1 << 16) + +#define REG_DP_DP_HPD_EVENT_TIME_0 (0x0000001C) +#define REG_DP_DP_HPD_EVENT_TIME_1 (0x00000020) +#define DP_DP_HPD_EVENT_TIME_0_VAL (0x3E800FA) +#define DP_DP_HPD_EVENT_TIME_1_VAL (0x1F407D0) + +#define REG_DP_AUX_CTRL (0x00000030) +#define DP_AUX_CTRL_ENABLE (0x00000001) +#define DP_AUX_CTRL_RESET (0x00000002) + +#define REG_DP_AUX_DATA (0x00000034) +#define DP_AUX_DATA_READ (0x00000001) +#define DP_AUX_DATA_WRITE (0x00000000) +#define DP_AUX_DATA_OFFSET (0x00000008) +#define DP_AUX_DATA_INDEX_OFFSET (0x00000010) +#define DP_AUX_DATA_MASK (0x0000ff00) +#define DP_AUX_DATA_INDEX_WRITE (0x80000000) + +#define REG_DP_AUX_TRANS_CTRL (0x00000038) +#define DP_AUX_TRANS_CTRL_I2C (0x00000100) +#define DP_AUX_TRANS_CTRL_GO (0x00000200) +#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR (0x00000400) +#define DP_AUX_TRANS_CTRL_NO_SEND_STOP (0x00000800) + +#define REG_DP_TIMEOUT_COUNT (0x0000003C) +#define REG_DP_AUX_LIMITS (0x00000040) +#define REG_DP_AUX_STATUS (0x00000044) + +#define DP_DPCD_CP_IRQ (0x201) +#define DP_DPCD_RXSTATUS (0x69493) + +#define DP_INTERRUPT_TRANS_NUM (0x000000A0) + +#define REG_DP_MAINLINK_CTRL (0x00000000) +#define DP_MAINLINK_CTRL_ENABLE (0x00000001) +#define DP_MAINLINK_CTRL_RESET (0x00000002) +#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER (0x00000010) +#define DP_MAINLINK_FB_BOUNDARY_SEL (0x02000000) + +#define REG_DP_STATE_CTRL (0x00000004) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1 (0x00000001) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2 (0x00000002) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3 (0x00000004) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4 (0x00000008) +#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE (0x00000010) +#define DP_STATE_CTRL_LINK_PRBS7 (0x00000020) +#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040) +#define DP_STATE_CTRL_SEND_VIDEO (0x00000080) +#define DP_STATE_CTRL_PUSH_IDLE (0x00000100) + +#define REG_DP_CONFIGURATION_CTRL (0x00000008) +#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK (0x00000001) +#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002) +#define DP_CONFIGURATION_CTRL_P_INTERLACED (0x00000004) +#define DP_CONFIGURATION_CTRL_INTERLACED_BTF (0x00000008) +#define DP_CONFIGURATION_CTRL_NUM_OF_LANES (0x00000010) +#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040) +#define DP_CONFIGURATION_CTRL_SEND_VSC (0x00000080) +#define DP_CONFIGURATION_CTRL_BPC (0x00000100) +#define DP_CONFIGURATION_CTRL_ASSR (0x00000400) +#define DP_CONFIGURATION_CTRL_RGB_YUV (0x00000800) +#define DP_CONFIGURATION_CTRL_LSCLK_DIV (0x00002000) +#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT (0x04) +#define DP_CONFIGURATION_CTRL_BPC_SHIFT (0x08) +#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT (0x0D) + +#define REG_DP_SOFTWARE_MVID (0x00000010) +#define REG_DP_SOFTWARE_NVID (0x00000018) +#define REG_DP_TOTAL_HOR_VER (0x0000001C) +#define REG_DP_START_HOR_VER_FROM_SYNC (0x00000020) +#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024) +#define REG_DP_ACTIVE_HOR_VER (0x00000028) + +#define REG_DP_MISC1_MISC0 (0x0000002C) +#define DP_MISC0_SYNCHRONOUS_CLK (0x00000001) +#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001) +#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005) + +#define REG_DP_VALID_BOUNDARY (0x00000030) +#define REG_DP_VALID_BOUNDARY_2 (0x00000034) + +#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038) +#define LANE0_MAPPING_SHIFT (0x00000000) +#define LANE1_MAPPING_SHIFT (0x00000002) +#define LANE2_MAPPING_SHIFT (0x00000004) +#define LANE3_MAPPING_SHIFT (0x00000006) + +#define REG_DP_MAINLINK_READY (0x00000040) +#define DP_MAINLINK_READY_FOR_VIDEO (0x00000001) +#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT (0x00000003) + +#define REG_DP_MAINLINK_LEVELS (0x00000044) +#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2 (0x00000002) + + +#define REG_DP_TU (0x0000004C) + +#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054) +#define DP_HBR2_ERM_PATTERN (0x00010000) + +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0) +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4) +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8) + +#define MMSS_DP_MISC1_MISC0 (0x0000002C) +#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080) +#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084) +#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088) +#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C) +#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090) +#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094) +#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098) + +#define MMSS_DP_PSR_CRC_RG (0x00000154) +#define MMSS_DP_PSR_CRC_B (0x00000158) + +#define REG_DP_COMPRESSION_MODE_CTRL (0x00000180) + +#define MMSS_DP_AUDIO_CFG (0x00000200) +#define MMSS_DP_AUDIO_STATUS (0x00000204) +#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208) +#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C) +#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210) +#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214) + +#define MMSS_DP_SDP_CFG (0x00000228) +#define MMSS_DP_SDP_CFG2 (0x0000022C) +#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230) +#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234) + +#define MMSS_DP_AUDIO_STREAM_0 (0x00000240) +#define MMSS_DP_AUDIO_STREAM_1 (0x00000244) + +#define MMSS_DP_EXTENSION_0 (0x00000250) +#define MMSS_DP_EXTENSION_1 (0x00000254) +#define MMSS_DP_EXTENSION_2 (0x00000258) +#define MMSS_DP_EXTENSION_3 (0x0000025C) +#define MMSS_DP_EXTENSION_4 (0x00000260) +#define MMSS_DP_EXTENSION_5 (0x00000264) +#define MMSS_DP_EXTENSION_6 (0x00000268) +#define MMSS_DP_EXTENSION_7 (0x0000026C) +#define MMSS_DP_EXTENSION_8 (0x00000270) +#define MMSS_DP_EXTENSION_9 (0x00000274) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C) +#define MMSS_DP_AUDIO_ISRC_0 (0x00000290) +#define MMSS_DP_AUDIO_ISRC_1 (0x00000294) +#define MMSS_DP_AUDIO_ISRC_2 (0x00000298) +#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C) +#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0) +#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4) +#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8) +#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC) +#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0) + +#define MMSS_DP_GENERIC0_0 (0x00000300) +#define MMSS_DP_GENERIC0_1 (0x00000304) +#define MMSS_DP_GENERIC0_2 (0x00000308) +#define MMSS_DP_GENERIC0_3 (0x0000030C) +#define MMSS_DP_GENERIC0_4 (0x00000310) +#define MMSS_DP_GENERIC0_5 (0x00000314) +#define MMSS_DP_GENERIC0_6 (0x00000318) +#define MMSS_DP_GENERIC0_7 (0x0000031C) +#define MMSS_DP_GENERIC0_8 (0x00000320) +#define MMSS_DP_GENERIC0_9 (0x00000324) +#define MMSS_DP_GENERIC1_0 (0x00000328) +#define MMSS_DP_GENERIC1_1 (0x0000032C) +#define MMSS_DP_GENERIC1_2 (0x00000330) +#define MMSS_DP_GENERIC1_3 (0x00000334) +#define MMSS_DP_GENERIC1_4 (0x00000338) +#define MMSS_DP_GENERIC1_5 (0x0000033C) +#define MMSS_DP_GENERIC1_6 (0x00000340) +#define MMSS_DP_GENERIC1_7 (0x00000344) +#define MMSS_DP_GENERIC1_8 (0x00000348) +#define MMSS_DP_GENERIC1_9 (0x0000034C) + +#define MMSS_DP_VSCEXT_0 (0x000002D0) +#define MMSS_DP_VSCEXT_1 (0x000002D4) +#define MMSS_DP_VSCEXT_2 (0x000002D8) +#define MMSS_DP_VSCEXT_3 (0x000002DC) +#define MMSS_DP_VSCEXT_4 (0x000002E0) +#define MMSS_DP_VSCEXT_5 (0x000002E4) +#define MMSS_DP_VSCEXT_6 (0x000002E8) +#define MMSS_DP_VSCEXT_7 (0x000002EC) +#define MMSS_DP_VSCEXT_8 (0x000002F0) +#define MMSS_DP_VSCEXT_9 (0x000002F4) + +#define MMSS_DP_BIST_ENABLE (0x00000000) +#define DP_BIST_ENABLE_DPBIST_EN (0x00000001) + +#define MMSS_DP_TIMING_ENGINE_EN (0x00000010) +#define DP_TIMING_ENGINE_EN_EN (0x00000001) + +#define MMSS_DP_INTF_CONFIG (0x00000014) +#define MMSS_DP_INTF_HSYNC_CTL (0x00000018) +#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C) +#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028) +#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C) +#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030) +#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034) +#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038) +#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C) +#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040) +#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044) +#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048) +#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C) +#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050) +#define MMSS_DP_INTF_POLARITY_CTL (0x00000058) + +#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060) +#define MMSS_DP_DSC_DTO (0x0000007C) +#define DP_TPG_CHECKERED_RECT_PATTERN (0x00000100) + +#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064) +#define DP_TPG_VIDEO_CONFIG_BPP_8BIT (0x00000001) +#define DP_TPG_VIDEO_CONFIG_RGB (0x00000004) + +#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088) + +#define REG_DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C) +#define REG_DP_PHY_AUX_BIST_CFG (0x00000050) +#define REG_DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC) + +/* DP HDCP 1.3 registers */ +#define DP_HDCP_CTRL (0x0A0) +#define DP_HDCP_STATUS (0x0A4) +#define DP_HDCP_SW_UPPER_AKSV (0x098) +#define DP_HDCP_SW_LOWER_AKSV (0x09C) +#define DP_HDCP_ENTROPY_CTRL0 (0x350) +#define DP_HDCP_ENTROPY_CTRL1 (0x35C) +#define DP_HDCP_SHA_STATUS (0x0C8) +#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0) +#define DP_HDCP_RCVPORT_DATA3 (0x0A4) +#define DP_HDCP_RCVPORT_DATA4 (0x0A8) +#define DP_HDCP_RCVPORT_DATA5 (0x0C0) +#define DP_HDCP_RCVPORT_DATA6 (0x0C4) + +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020) + +#endif /* _DP_REG_H_ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 4de771d6f0be..78ef5d4ed922 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -30,6 +30,8 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_8960, MSM_DSI_PHY_14NM, MSM_DSI_PHY_10NM, + MSM_DSI_PHY_7NM, + MSM_DSI_PHY_7NM_V4_1, MSM_DSI_PHY_MAX }; diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 8e536e060070..50eb4d1b8fdd 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -1886,5 +1886,428 @@ static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000 #define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0 +#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000 + +#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004 + +#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008 + +#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c + +#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010 + +#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014 + +#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018 + +#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c + +#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020 + +#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024 + +#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028 + +#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c + +#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030 + +#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034 + +#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038 + +#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c + +#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8 + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac + +#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4 + +#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8 + +#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec + +#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0 + +#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4 + +#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8 + +#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc + +#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100 + +#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104 + +#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108 + +#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c + +#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110 + +#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114 + +#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128 + +#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140 + +#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148 + +#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c + +static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; } + +static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; } + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004 + +#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008 + +#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018 + +#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c + +#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020 + +#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024 + +#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028 + +#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c + +#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030 + +#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034 + +#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038 + +#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c + +#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040 + +#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088 + +#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c + +#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090 + +#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094 + +#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098 + +#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c + +#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0 + +#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4 + +#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8 + +#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac + +#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0 + +#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4 + +#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8 + +#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc + +#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0 + +#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4 + +#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8 + +#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8 + +#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc + +#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec + +#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8 + +#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc + +#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118 + +#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138 + +#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140 + +#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148 + +#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c + +#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150 + +#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c + +#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160 + +#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164 + +#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168 + +#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c + +#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170 + +#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174 + +#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178 + +#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c + +#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180 + +#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184 + +#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188 + +#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194 + +#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198 + +#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c + +#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0 + +#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4 + +#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8 + +#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac + +#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0 + +#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4 + +#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8 + +#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc + +#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0 + +#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4 + +#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8 + +#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc + +#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0 + +#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4 + +#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8 + +#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc + +#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0 + +#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4 + +#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8 + +#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec + +#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0 + +#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4 + +#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208 + +#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c + +#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210 + +#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214 + +#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218 + +#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c + +#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220 + +#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224 + +#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228 + +#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c + +#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230 + +#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234 + +#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238 + +#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c + +#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240 + +#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244 + +#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248 + +#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c + +#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250 + +#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258 + +#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c + +#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260 #endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index f892f2cbe8bb..b2ff68a15791 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -265,9 +265,12 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, - }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index efd469d1db45..ade9b609c7d9 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -21,6 +21,8 @@ #define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000 #define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 #define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 +#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 +#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 #define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 #define MSM_DSI_V2_VER_MINOR_8064 0x0 diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 009f5b843dd1..e8c1a727179c 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -364,6 +364,102 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, return 0; } +int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8; + s32 tmax, tmin; + s32 pcnt_clk_prep = 50; + s32 pcnt_clk_zero = 2; + s32 pcnt_clk_trail = 30; + s32 pcnt_hs_prep = 50; + s32 pcnt_hs_zero = 10; + s32 pcnt_hs_trail = 30; + s32 pcnt_hs_exit = 10; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + hb_en = 0; + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + + /* TODO: verify these calculations against latest downstream driver + * everything except clk_post/clk_pre uses calculations from v3 based + * on the downstream driver having the same calculations for v3 and v4 + */ + + temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false); + + temp = 300 * coeff - (timing->clk_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false); + + temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui; + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1; + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp / ui_x8) - 1; + timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false); + + /* recommended min + * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1 + */ + temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8; + tmin = DIV_ROUND_UP(temp, 16 * ui) - 1; + tmax = 255; + timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false); + + /* recommended min + * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns) + * val2 = (16 * bit_clk_ns) + * final = roundup(val1/val2, 0) - 1 + */ + temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff; + tmin = DIV_ROUND_UP(temp, 16 * ui) - 1; + tmax = 255; + timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst); + + return 0; +} + void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask) { @@ -508,6 +604,12 @@ static const struct of_device_id dsi_phy_dt_match[] = { { .compatible = "qcom,dsi-phy-10nm-8998", .data = &dsi_phy_10nm_8998_cfgs }, #endif +#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY + { .compatible = "qcom,dsi-phy-7nm", + .data = &dsi_phy_7nm_cfgs }, + { .compatible = "qcom,dsi-phy-7nm-8150", + .data = &dsi_phy_7nm_8150_cfgs }, +#endif {} }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index ef8672d7b123..d2bd74b6f357 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -48,10 +48,10 @@ extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs; struct msm_dsi_dphy_timing { - u32 clk_pre; - u32 clk_post; u32 clk_zero; u32 clk_trail; u32 clk_prepare; @@ -102,6 +102,8 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c new file mode 100644 index 000000000000..255b5f5ab2ce --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -0,0 +1,255 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <linux/iopoll.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data = 0; + + data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); + mb(); /* make sure read happened */ + + return (data & BIT(0)); +} + +static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *lane_base = phy->lane_base; + int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ + + /* + * LPRX and CDRX need to enabled only for physical data lane + * corresponding to the logical data lane 0 + */ + if (enable) + dsi_phy_write(lane_base + + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3); + else + dsi_phy_write(lane_base + + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0); +} + +static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy) +{ + int i; + const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; + const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 }; + const u8 *tx_dctrl = tx_dctrl_0; + void __iomem *lane_base = phy->lane_base; + + if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) + tx_dctrl = tx_dctrl_1; + + /* Strength ctrl settings */ + for (i = 0; i < 5; i++) { + /* + * Disable LPRX and CDRX for all lanes. And later on, it will + * be only enabled for the physical data lane corresponding + * to the logical data lane 0 + */ + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0); + } + + dsi_phy_hw_v4_0_config_lpcdrx(phy, true); + + /* other settings */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa); + dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]); + } +} + +static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + struct msm_dsi_phy_clk_request *clk_req) +{ + int ret; + u32 status; + u32 const delay_us = 5; + u32 const timeout_us = 1000; + struct msm_dsi_dphy_timing *timing = &phy->timing; + void __iomem *base = phy->base; + bool less_than_1500_mhz; + u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; + u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl; + u32 data; + + DBG(""); + + if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) { + DRM_DEV_ERROR(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + if (dsi_phy_hw_v4_0_is_pll_on(phy)) + pr_warn("PLL turned on before configuring PHY\n"); + + /* wait for REFGEN READY */ + ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS, + status, (status & BIT(0)), + delay_us, timeout_us); + if (ret) { + pr_err("Ref gen not ready. Aborting\n"); + return -EINVAL; + } + + /* TODO: CPHY enable path (this is for DPHY only) */ + + /* Alter PHY configurations if data rate less than 1.5GHZ*/ + less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000); + + if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) { + vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52; + glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00; + glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c; + glbl_str_swi_cal_sel_ctrl = 0x00; + glbl_hstx_str_ctrl_0 = 0x88; + } else { + vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59; + glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00; + glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88; + glbl_rescode_top_ctrl = 0x03; + glbl_rescode_bot_ctrl = 0x3c; + } + + /* de-assert digital and pll power down */ + data = BIT(6) | BIT(5); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data); + + /* Assert PLL core reset */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00); + + /* turn off resync FIFO */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00); + + /* program CMN_CTRL_4 for minor_ver 2 chipsets*/ + data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0); + data = data & (0xf0); + if (data == 0x20) + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04); + + /* Configure PHY lane swap (TODO: we need to calculate this) */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84); + + /* Enable LDO */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL, + glbl_str_swi_cal_sel_ctrl); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0, + glbl_hstx_str_ctrl_0); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL, + glbl_rescode_top_ctrl); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL, + glbl_rescode_bot_ctrl); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55); + + /* Remove power down from all blocks */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f); + + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f); + + /* Select full-rate mode */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40); + + ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); + if (ret) { + DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* DSI PHY timings */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12, + timing->shared_timings.clk_pre); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13, + timing->shared_timings.clk_post); + + /* DSI lane settings */ + dsi_phy_hw_v4_0_lane_settings(phy); + + DBG("DSI%d PHY enabled", phy->id); + + return 0; +} + +static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) +{ + /* TODO */ +} + +static int dsi_7nm_phy_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + + phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", + "DSI_PHY_LANE"); + if (IS_ERR(phy->lane_base)) { + DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = { + .type = MSM_DSI_PHY_7NM_V4_1, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vdds", 36000, 32}, + }, + }, + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .init = dsi_7nm_phy_init, + }, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, +}; + +const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = { + .type = MSM_DSI_PHY_7NM, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vdds", 36000, 32}, + }, + }, + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .init = dsi_7nm_phy_init, + }, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 4a4aa3c61d71..a45fe95aff49 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -161,6 +161,10 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, case MSM_DSI_PHY_10NM: pll = msm_dsi_pll_10nm_init(pdev, id); break; + case MSM_DSI_PHY_7NM: + case MSM_DSI_PHY_7NM_V4_1: + pll = msm_dsi_pll_7nm_init(pdev, id); + break; default: pll = ERR_PTR(-ENXIO); break; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index c6a3623f905d..3405982a092c 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -116,5 +116,15 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) return ERR_PTR(-ENODEV); } #endif +#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id); +#else +static inline struct msm_dsi_pll * +msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif + #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c new file mode 100644 index 000000000000..de0dfb815125 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c @@ -0,0 +1,904 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/iopoll.h> + +#include "dsi_pll.h" +#include "dsi.xml.h" + +/* + * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram + * + * dsi0_pll_out_div_clk dsi0_pll_bit_clk + * | | + * | | + * +---------+ | +----------+ | +----+ + * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk + * +---------+ | +----------+ | +----+ + * | | + * | | dsi0_pll_by_2_bit_clk + * | | | + * | | +----+ | |\ dsi0_pclk_mux + * | |--| /2 |--o--| \ | + * | | +----+ | \ | +---------+ + * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk + * |------------------------------| / +---------+ + * | +-----+ | / + * -----------| /4? |--o----------|/ + * +-----+ | | + * | |dsiclk_sel + * | + * dsi0_pll_post_out_div_clk + */ + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 +#define NUM_PROVIDED_CLKS 2 + +#define VCO_REF_CLK_RATE 19200000 + +struct dsi_pll_regs { + u32 pll_prop_gain_rate; + u32 pll_lockdet_rate; + u32 decimal_div_start; + u32 frac_div_start_low; + u32 frac_div_start_mid; + u32 frac_div_start_high; + u32 pll_clock_inverters; + u32 ssc_stepsize_low; + u32 ssc_stepsize_high; + u32 ssc_div_per_low; + u32 ssc_div_per_high; + u32 ssc_adjper_low; + u32 ssc_adjper_high; + u32 ssc_control; +}; + +struct dsi_pll_config { + u32 ref_freq; + bool div_override; + u32 output_div; + bool ignore_frac; + bool disable_prescaler; + bool enable_ssc; + bool ssc_center; + u32 dec_bits; + u32 frac_bits; + u32 lock_timer; + u32 ssc_freq; + u32 ssc_offset; + u32 ssc_adj_per; + u32 thresh_cycles; + u32 refclk_cycles; +}; + +struct pll_7nm_cached_state { + unsigned long vco_rate; + u8 bit_clk_div; + u8 pix_clk_div; + u8 pll_out_div; + u8 pll_mux; +}; + +struct dsi_pll_7nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + + void __iomem *phy_cmn_mmio; + void __iomem *mmio; + + u64 vco_ref_clk_rate; + u64 vco_current_rate; + + /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + int vco_delay; + struct dsi_pll_config pll_configuration; + struct dsi_pll_regs reg_setup; + + /* private clocks: */ + struct clk_hw *out_div_clk_hw; + struct clk_hw *bit_clk_hw; + struct clk_hw *byte_clk_hw; + struct clk_hw *by_2_bit_clk_hw; + struct clk_hw *post_out_div_clk_hw; + struct clk_hw *pclk_mux_hw; + struct clk_hw *out_dsiclk_hw; + + /* clock-provider: */ + struct clk_hw_onecell_data *hw_data; + + struct pll_7nm_cached_state cached_state; + + enum msm_dsi_phy_usecase uc; + struct dsi_pll_7nm *slave; +}; + +#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, base) + +/* + * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX]; + +static void dsi_pll_setup_config(struct dsi_pll_7nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + + config->ref_freq = pll->vco_ref_clk_rate; + config->output_div = 1; + config->dec_bits = 8; + config->frac_bits = 18; + config->lock_timer = 64; + config->ssc_freq = 31500; + config->ssc_offset = 4800; + config->ssc_adj_per = 2; + config->thresh_cycles = 32; + config->refclk_cycles = 256; + + config->div_override = false; + config->ignore_frac = false; + config->disable_prescaler = false; + + /* TODO: ssc enable */ + config->enable_ssc = false; + config->ssc_center = 0; +} + +static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u64 fref = pll->vco_ref_clk_rate; + u64 pll_freq; + u64 divider; + u64 dec, dec_multiple; + u32 frac; + u64 multiplier; + + pll_freq = pll->vco_current_rate; + + if (config->disable_prescaler) + divider = fref; + else + divider = fref * 2; + + multiplier = 1 << config->frac_bits; + dec_multiple = div_u64(pll_freq * multiplier, divider); + div_u64_rem(dec_multiple, multiplier, &frac); + + dec = div_u64(dec_multiple, multiplier); + + if (pll->base.type != MSM_DSI_PHY_7NM_V4_1) + regs->pll_clock_inverters = 0x28; + else if (pll_freq <= 1000000000ULL) + regs->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + regs->pll_clock_inverters = 0x20; + else if (pll_freq <= 3020000000ULL) + regs->pll_clock_inverters = 0x00; + else + regs->pll_clock_inverters = 0x40; + + regs->pll_lockdet_rate = config->lock_timer; + regs->decimal_div_start = dec; + regs->frac_div_start_low = (frac & 0xff); + regs->frac_div_start_mid = (frac & 0xff00) >> 8; + regs->frac_div_start_high = (frac & 0x30000) >> 16; +} + +#define SSC_CENTER BIT(0) +#define SSC_EN BIT(1) + +static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u32 ssc_per; + u32 ssc_mod; + u64 ssc_step_size; + u64 frac; + + if (!config->enable_ssc) { + DBG("SSC not enabled\n"); + return; + } + + ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1; + ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); + ssc_per -= ssc_mod; + + frac = regs->frac_div_start_low | + (regs->frac_div_start_mid << 8) | + (regs->frac_div_start_high << 16); + ssc_step_size = regs->decimal_div_start; + ssc_step_size *= (1 << config->frac_bits); + ssc_step_size += frac; + ssc_step_size *= config->ssc_offset; + ssc_step_size *= (config->ssc_adj_per + 1); + ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); + ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); + + regs->ssc_div_per_low = ssc_per & 0xFF; + regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8; + regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF); + regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8); + regs->ssc_adjper_low = config->ssc_adj_per & 0xFF; + regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8; + + regs->ssc_control = config->ssc_center ? SSC_CENTER : 0; + + pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", + regs->decimal_div_start, frac, config->frac_bits); + pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", + ssc_per, (u32)ssc_step_size, config->ssc_adj_per); +} + +static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *regs = &pll->reg_setup; + + if (pll->pll_configuration.enable_ssc) { + pr_debug("SSC is enabled\n"); + + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1, + regs->ssc_stepsize_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1, + regs->ssc_stepsize_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1, + regs->ssc_div_per_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1, + regs->ssc_div_per_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1, + regs->ssc_adjper_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1, + regs->ssc_adjper_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL, + SSC_EN | regs->ssc_control); + } +} + +static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll) +{ + void __iomem *base = pll->mmio; + u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00; + + if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) { + if (pll->vco_current_rate >= 3100000000ULL) + analog_controls_five_1 = 0x03; + + if (pll->vco_current_rate < 1520000000ULL) + vco_config_1 = 0x08; + else if (pll->vco_current_rate < 2990000000ULL) + vco_config_1 = 0x01; + } + + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1, + analog_controls_five_1); + pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1); + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01); + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03); + pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e); + pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40); + pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba); + pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c); + pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80); + pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29); + pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f); + pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a); + pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, + pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22); + + if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) { + pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22); + if (pll->slave) + pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22); + } +} + +static void dsi_pll_commit(struct dsi_pll_7nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *reg = &pll->reg_setup; + + pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12); + pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start); + pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low); + pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid); + pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); + pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */ + pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters); +} + +static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate, + parent_rate); + + pll_7nm->vco_current_rate = rate; + pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; + + dsi_pll_setup_config(pll_7nm); + + dsi_pll_calc_dec_frac(pll_7nm); + + dsi_pll_calc_ssc(pll_7nm); + + dsi_pll_commit(pll_7nm); + + dsi_pll_config_hzindep_reg(pll_7nm); + + dsi_pll_ssc_commit(pll_7nm); + + /* flush, ensure all register writes are done*/ + wmb(); + + return 0; +} + +static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll) +{ + int rc; + u32 status = 0; + u32 const delay_us = 100; + u32 const timeout_us = 5000; + + rc = readl_poll_timeout_atomic(pll->mmio + + REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE, + status, + ((status & BIT(0)) > 0), + delay_us, + timeout_us); + if (rc) + pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", + pll->id, status); + + return rc; +} + +static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0); + + pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0); + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5)); + ndelay(250); +} + +static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0); + + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5)); + pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0); + ndelay(250); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +{ + u32 data; + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5)); +} + +static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll) +{ + u32 data; + + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04); + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, + data | BIT(5) | BIT(4)); +} + +static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll) +{ + /* + * Reset the PHY digital domain. This would be needed when + * coming out of a CX or analog rail power collapse while + * ensuring that the pads maintain LP00 or LP11 state + */ + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0)); + wmb(); /* Ensure that the reset is deasserted */ + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0); + wmb(); /* Ensure that the reset is deasserted */ +} + +static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + int rc; + + dsi_pll_enable_pll_bias(pll_7nm); + if (pll_7nm->slave) + dsi_pll_enable_pll_bias(pll_7nm->slave); + + /* Start PLL */ + pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01); + + /* + * ensure all PLL configurations are written prior to checking + * for PLL lock. + */ + wmb(); + + /* Check for PLL lock */ + rc = dsi_pll_7nm_lock_status(pll_7nm); + if (rc) { + pr_err("PLL(%d) lock failed\n", pll_7nm->id); + goto error; + } + + pll->pll_on = true; + + /* + * assert power on reset for PHY digital in case the PLL is + * enabled after CX of analog domain power collapse. This needs + * to be done before enabling the global clk. + */ + dsi_pll_phy_dig_reset(pll_7nm); + if (pll_7nm->slave) + dsi_pll_phy_dig_reset(pll_7nm->slave); + + dsi_pll_enable_global_clk(pll_7nm); + if (pll_7nm->slave) + dsi_pll_enable_global_clk(pll_7nm->slave); + +error: + return rc; +} + +static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll) +{ + pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0); + dsi_pll_disable_pll_bias(pll); +} + +static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + + /* + * To avoid any stray glitches while abruptly powering down the PLL + * make sure to gate the clock using the clock enable bit before + * powering down the PLL + */ + dsi_pll_disable_global_clk(pll_7nm); + pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0); + dsi_pll_disable_sub(pll_7nm); + if (pll_7nm->slave) { + dsi_pll_disable_global_clk(pll_7nm->slave); + dsi_pll_disable_sub(pll_7nm->slave); + } + /* flush, ensure all register writes are done */ + wmb(); + pll->pll_on = false; +} + +static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + void __iomem *base = pll_7nm->mmio; + u64 ref_clk = pll_7nm->vco_ref_clk_rate; + u64 vco_rate = 0x0; + u64 multiplier; + u32 frac; + u32 dec; + u64 pll_freq, tmp64; + + dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1); + dec &= 0xff; + + frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1); + frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) & + 0xff) << 8); + frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & + 0x3) << 16); + + /* + * TODO: + * 1. Assumes prescaler is disabled + * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) + */ + multiplier = 1 << 18; + pll_freq = dec * (ref_clk * 2); + tmp64 = (ref_clk * 2 * frac); + pll_freq += div_u64(tmp64, multiplier); + + vco_rate = pll_freq; + + DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", + pll_7nm->id, (unsigned long)vco_rate, dec, frac); + + return (unsigned long)vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_7nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_7nm_vco_set_rate, + .recalc_rate = dsi_pll_7nm_vco_recalc_rate, + .prepare = dsi_pll_7nm_vco_prepare, + .unprepare = dsi_pll_7nm_vco_unprepare, +}; + +/* + * PLL Callbacks + */ + +static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; + void __iomem *phy_base = pll_7nm->phy_cmn_mmio; + u32 cmn_clk_cfg0, cmn_clk_cfg1; + + cached->pll_out_div = pll_read(pll_7nm->mmio + + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + cached->pll_out_div &= 0x3; + + cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); + cached->bit_clk_div = cmn_clk_cfg0 & 0xf; + cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; + + cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + cached->pll_mux = cmn_clk_cfg1 & 0x3; + + DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", + pll_7nm->id, cached->pll_out_div, cached->bit_clk_div, + cached->pix_clk_div, cached->pll_mux); +} + +static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; + void __iomem *phy_base = pll_7nm->phy_cmn_mmio; + u32 val; + + val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; + val |= cached->pll_out_div; + pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val); + + pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + cached->bit_clk_div | (cached->pix_clk_div << 4)); + + val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + val &= ~0x3; + val |= cached->pll_mux; + pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val); + + DBG("DSI PLL%d", pll_7nm->id); + + return 0; +} + +static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + void __iomem *base = pll_7nm->phy_cmn_mmio; + u32 data = 0x0; /* internal PLL */ + + DBG("DSI PLL%d", pll_7nm->id); + + switch (uc) { + case MSM_DSI_PHY_STANDALONE: + break; + case MSM_DSI_PHY_MASTER: + pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + data = 0x1; /* external PLL */ + break; + default: + return -EINVAL; + } + + /* set PLL src */ + pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2)); + + pll_7nm->uc = uc; + + return 0; +} + +static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data; + + DBG("DSI PLL%d", pll_7nm->id); + + if (byte_clk_provider) + *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; + if (pixel_clk_provider) + *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; + + return 0; +} + +static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); + struct device *dev = &pll_7nm->pdev->dev; + + DBG("DSI PLL%d", pll_7nm->id); + of_clk_del_provider(dev->of_node); + + clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw); + clk_hw_unregister_mux(pll_7nm->pclk_mux_hw); + clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw); + clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw); + clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw); + clk_hw_unregister_divider(pll_7nm->bit_clk_hw); + clk_hw_unregister_divider(pll_7nm->out_div_clk_hw); + clk_hw_unregister(&pll_7nm->base.clk_hw); +} + +/* + * The post dividers and mux clocks are created using the standard divider and + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux + * state to follow the master PLL's divider/mux state. Therefore, we don't + * require special clock ops that also configure the slave PLL registers + */ +static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm) +{ + char clk_name[32], parent[32], vco_name[32]; + char parent2[32], parent3[32], parent4[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .name = vco_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_7nm_vco, + }; + struct device *dev = &pll_7nm->pdev->dev; + struct clk_hw_onecell_data *hw_data; + struct clk_hw *hw; + int ret; + + DBG("DSI%d", pll_7nm->id); + + hw_data = devm_kzalloc(dev, sizeof(*hw_data) + + NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id); + pll_7nm->base.clk_hw.init = &vco_init; + + ret = clk_hw_register(dev, &pll_7nm->base.clk_hw); + if (ret) + return ret; + + snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id); + + hw = clk_hw_register_divider(dev, clk_name, + parent, CLK_SET_RATE_PARENT, + pll_7nm->mmio + + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_base_clk_hw; + } + + pll_7nm->out_div_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + + /* BIT CLK: DIV_CTRL_3_0 */ + hw = clk_hw_register_divider(dev, clk_name, parent, + CLK_SET_RATE_PARENT, + pll_7nm->phy_cmn_mmio + + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, + &pll_7nm->postdiv_lock); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_out_div_clk_hw; + } + + pll_7nm->bit_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + + /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_bit_clk_hw; + } + + pll_7nm->byte_clk_hw = hw; + hw_data->hws[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 2); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_byte_clk_hw; + } + + pll_7nm->by_2_bit_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 4); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_by_2_bit_clk_hw; + } + + pll_7nm->post_out_div_clk_hw = hw; + + snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id); + snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id); + snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id); + snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id); + + hw = clk_hw_register_mux(dev, clk_name, + ((const char *[]){ + parent, parent2, parent3, parent4 + }), 4, 0, pll_7nm->phy_cmn_mmio + + REG_DSI_7nm_PHY_CMN_CLK_CFG1, + 0, 2, 0, NULL); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_post_out_div_clk_hw; + } + + pll_7nm->pclk_mux_hw = hw; + + snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id); + snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id); + + /* PIX CLK DIV : DIV_CTRL_7_4*/ + hw = clk_hw_register_divider(dev, clk_name, parent, + 0, pll_7nm->phy_cmn_mmio + + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, + &pll_7nm->postdiv_lock); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); + goto err_pclk_mux_hw; + } + + pll_7nm->out_dsiclk_hw = hw; + hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; + + hw_data->num = NUM_PROVIDED_CLKS; + pll_7nm->hw_data = hw_data; + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + pll_7nm->hw_data); + if (ret) { + DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret); + goto err_dsiclk_hw; + } + + return 0; + +err_dsiclk_hw: + clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw); +err_pclk_mux_hw: + clk_hw_unregister_mux(pll_7nm->pclk_mux_hw); +err_post_out_div_clk_hw: + clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw); +err_by_2_bit_clk_hw: + clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw); +err_byte_clk_hw: + clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw); +err_bit_clk_hw: + clk_hw_unregister_divider(pll_7nm->bit_clk_hw); +err_out_div_clk_hw: + clk_hw_unregister_divider(pll_7nm->out_div_clk_hw); +err_base_clk_hw: + clk_hw_unregister(&pll_7nm->base.clk_hw); + + return ret; +} + +struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) +{ + struct dsi_pll_7nm *pll_7nm; + struct msm_dsi_pll *pll; + int ret; + + pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL); + if (!pll_7nm) + return ERR_PTR(-ENOMEM); + + DBG("DSI PLL%d", id); + + pll_7nm->pdev = pdev; + pll_7nm->id = id; + pll_7nm_list[id] = pll_7nm; + + pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) { + DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n"); + return ERR_PTR(-ENOMEM); + } + + pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_7nm->mmio)) { + DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n"); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&pll_7nm->postdiv_lock); + + pll = &pll_7nm->base; + pll->min_rate = 1000000000UL; + pll->max_rate = 3500000000UL; + if (pll->type == MSM_DSI_PHY_7NM_V4_1) { + pll->min_rate = 600000000UL; + pll->max_rate = (unsigned long)5000000000ULL; + /* workaround for max rate overflowing on 32-bit builds: */ + pll->max_rate = max(pll->max_rate, 0xffffffffUL); + } + pll->get_provider = dsi_pll_7nm_get_provider; + pll->destroy = dsi_pll_7nm_destroy; + pll->save_state = dsi_pll_7nm_save_state; + pll->restore_state = dsi_pll_7nm_restore_state; + pll->set_usecase = dsi_pll_7nm_set_usecase; + + pll_7nm->vco_delay = 1; + + ret = pll_7nm_register(pll_7nm); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + /* TODO: Remove this when we have proper display handover support */ + msm_dsi_pll_save_state(pll); + + return pll; +} diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 79333842f70a..aa4509766d64 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -453,15 +453,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto err_msm_uninit; - if (!dev->dma_parms) { - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), - GFP_KERNEL); - if (!dev->dma_parms) { - ret = -ENOMEM; - goto err_msm_uninit; - } - } - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(dev, UINT_MAX); msm_gem_shrinker_init(ddev); @@ -594,9 +586,10 @@ static int context_init(struct drm_device *dev, struct drm_file *file) if (!ctx) return -ENOMEM; + kref_init(&ctx->ref); msm_submitqueue_init(dev, ctx); - ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL; + ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); file->driver_priv = ctx; return 0; @@ -615,7 +608,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file) static void context_close(struct msm_file_private *ctx) { msm_submitqueue_close(ctx); - kfree(ctx); + msm_file_private_put(ctx); } static void msm_postclose(struct drm_device *dev, struct drm_file *file) @@ -779,18 +772,19 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, } static int msm_ioctl_gem_info_iova(struct drm_device *dev, - struct drm_gem_object *obj, uint64_t *iova) + struct drm_file *file, struct drm_gem_object *obj, + uint64_t *iova) { - struct msm_drm_private *priv = dev->dev_private; + struct msm_file_private *ctx = file->driver_priv; - if (!priv->gpu) + if (!ctx->aspace) return -EINVAL; /* * Don't pin the memory here - just get an address so that userspace can * be productive */ - return msm_gem_get_iova(obj, priv->gpu->aspace, iova); + return msm_gem_get_iova(obj, ctx->aspace, iova); } static int msm_ioctl_gem_info(struct drm_device *dev, void *data, @@ -829,7 +823,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, args->value = msm_gem_mmap_offset(obj); break; case MSM_INFO_GET_IOVA: - ret = msm_ioctl_gem_info_iova(dev, obj, &args->value); + ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value); break; case MSM_INFO_SET_NAME: /* length check should leave room for terminating null: */ @@ -978,12 +972,6 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), }; -static const struct vm_operations_struct vm_ops = { - .fault = msm_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct file_operations fops = { .owner = THIS_MODULE, .open = drm_open, @@ -1009,18 +997,11 @@ static struct drm_driver msm_driver = { .irq_preinstall = msm_irq_preinstall, .irq_postinstall = msm_irq_postinstall, .irq_uninstall = msm_irq_uninstall, - .gem_free_object_unlocked = msm_gem_free_object, - .gem_vm_ops = &vm_ops, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_pin = msm_gem_prime_pin, - .gem_prime_unpin = msm_gem_prime_unpin, - .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, - .gem_prime_vmap = msm_gem_prime_vmap, - .gem_prime_vunmap = msm_gem_prime_vunmap, .gem_prime_mmap = msm_gem_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, @@ -1358,6 +1339,7 @@ static int __init msm_drm_register(void) msm_dsi_register(); msm_edp_register(); msm_hdmi_register(); + msm_dp_register(); adreno_register(); return platform_driver_register(&msm_platform_driver); } @@ -1366,6 +1348,7 @@ static void __exit msm_drm_unregister(void) { DBG("fini"); platform_driver_unregister(&msm_platform_driver); + msm_dp_unregister(); msm_hdmi_unregister(); adreno_unregister(); msm_edp_unregister(); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index af259b0573ea..c45789f36e48 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -57,6 +57,7 @@ struct msm_file_private { struct list_head submitqueues; int queueid; struct msm_gem_address_space *aspace; + struct kref ref; }; enum msm_mdp_plane_property { @@ -159,6 +160,8 @@ struct msm_drm_private { /* DSI is shared by mdp4 and mdp5 */ struct msm_dsi *dsi[2]; + struct msm_dp *dp; + /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; struct msm_file_private *lastctx; @@ -248,6 +251,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace, void msm_gem_close_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma); + +struct msm_gem_address_space * +msm_gem_address_space_get(struct msm_gem_address_space *aspace); + void msm_gem_address_space_put(struct msm_gem_address_space *aspace); struct msm_gem_address_space * @@ -269,7 +276,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev); int msm_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -vm_fault_t msm_gem_fault(struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova); @@ -302,9 +308,8 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive); -void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence); -void msm_gem_move_to_inactive(struct drm_gem_object *obj); +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); +void msm_gem_active_put(struct drm_gem_object *obj); int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); int msm_gem_cpu_fini(struct drm_gem_object *obj); void msm_gem_free_object(struct drm_gem_object *obj); @@ -378,6 +383,63 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, } #endif +#ifdef CONFIG_DRM_MSM_DP +int __init msm_dp_register(void); +void __exit msm_dp_unregister(void); +int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder); +int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder); +int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder); +int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder); +void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void msm_dp_irq_postinstall(struct msm_dp *dp_display); + +void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor); + +#else +static inline int __init msm_dp_register(void) +{ + return -EINVAL; +} +static inline void __exit msm_dp_unregister(void) +{ +} +static inline int msm_dp_modeset_init(struct msm_dp *dp_display, + struct drm_device *dev, + struct drm_encoder *encoder) +{ + return -EINVAL; +} +static inline int msm_dp_display_enable(struct msm_dp *dp, + struct drm_encoder *encoder) +{ + return -EINVAL; +} +static inline int msm_dp_display_disable(struct msm_dp *dp, + struct drm_encoder *encoder) +{ + return -EINVAL; +} +static inline void msm_dp_display_mode_set(struct msm_dp *dp, + struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display) +{ +} + +static inline void msm_dp_debugfs_init(struct msm_dp *dp_display, + struct drm_minor *minor) +{ +} + +#endif + void __init msm_mdp_register(void); void __exit msm_mdp_unregister(void); void __init msm_dpu_register(void); @@ -398,8 +460,9 @@ void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); #else static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } __printf(3, 4) -static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, - const char *fmt, ...) {} +static inline void msm_rd_dump_submit(struct msm_rd_state *rd, + struct msm_gem_submit *submit, + const char *fmt, ...) {} static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {} #endif @@ -419,7 +482,8 @@ struct msm_gpu_submitqueue; int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, u32 id); -int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, +int msm_submitqueue_create(struct drm_device *drm, + struct msm_file_private *ctx, u32 prio, u32 flags, u32 *id); int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, struct drm_msm_submitqueue_query *args); @@ -428,6 +492,26 @@ void msm_submitqueue_close(struct msm_file_private *ctx); void msm_submitqueue_destroy(struct kref *kref); +static inline void __msm_file_private_destroy(struct kref *kref) +{ + struct msm_file_private *ctx = container_of(kref, + struct msm_file_private, ref); + + msm_gem_address_space_put(ctx->aspace); + kfree(ctx); +} + +static inline void msm_file_private_put(struct msm_file_private *ctx) +{ + kref_put(&ctx->ref, __msm_file_private_destroy); +} + +static inline struct msm_file_private *msm_file_private_get( + struct msm_file_private *ctx) +{ + kref_get(&ctx->ref); + return ctx; +} #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b2f49152b4d4..2e1bce7c0b19 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -4,6 +4,7 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/dma-map-ops.h> #include <linux/spinlock.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> @@ -52,26 +53,14 @@ static void sync_for_device(struct msm_gem_object *msm_obj) { struct device *dev = msm_obj->base.dev->dev; - if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { - dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } else { - dma_map_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } + dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); } static void sync_for_cpu(struct msm_gem_object *msm_obj) { struct device *dev = msm_obj->base.dev->dev; - if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { - dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } else { - dma_unmap_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - } + dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); } /* allocate pages from VRAM carveout, used when no IOMMU: */ @@ -126,7 +115,7 @@ static struct page **get_pages(struct drm_gem_object *obj) msm_obj->pages = p; - msm_obj->sgt = drm_prime_pages_to_sg(p, npages); + msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); if (IS_ERR(msm_obj->sgt)) { void *ptr = ERR_CAST(msm_obj->sgt); @@ -247,7 +236,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return msm_gem_mmap_obj(vma->vm_private_data, vma); } -vm_fault_t msm_gem_fault(struct vm_fault *vmf) +static vm_fault_t msm_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; @@ -753,31 +742,31 @@ int msm_gem_sync_object(struct drm_gem_object *obj, return 0; } -void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); - msm_obj->gpu = gpu; - if (exclusive) - dma_resv_add_excl_fence(obj->resv, fence); - else - dma_resv_add_shared_fence(obj->resv, fence); - list_del_init(&msm_obj->mm_list); - list_add_tail(&msm_obj->mm_list, &gpu->active_list); + + if (!atomic_fetch_inc(&msm_obj->active_count)) { + msm_obj->gpu = gpu; + list_del_init(&msm_obj->mm_list); + list_add_tail(&msm_obj->mm_list, &gpu->active_list); + } } -void msm_gem_move_to_inactive(struct drm_gem_object *obj) +void msm_gem_active_put(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; - struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_drm_private *priv = obj->dev->dev_private; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); - msm_obj->gpu = NULL; - list_del_init(&msm_obj->mm_list); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + if (!atomic_dec_return(&msm_obj->active_count)) { + msm_obj->gpu = NULL; + list_del_init(&msm_obj->mm_list); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + } } int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) @@ -852,11 +841,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) seq_puts(m, " vmas:"); - list_for_each_entry(vma, &msm_obj->vmas, list) - seq_printf(m, " [%s: %08llx,%s,inuse=%d]", - vma->aspace != NULL ? vma->aspace->name : NULL, - vma->iova, vma->mapped ? "mapped" : "unmapped", + list_for_each_entry(vma, &msm_obj->vmas, list) { + const char *name, *comm; + if (vma->aspace) { + struct msm_gem_address_space *aspace = vma->aspace; + struct task_struct *task = + get_pid_task(aspace->pid, PIDTYPE_PID); + if (task) { + comm = kstrdup(task->comm, GFP_KERNEL); + } else { + comm = NULL; + } + name = aspace->name; + } else { + name = comm = NULL; + } + seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", + name, comm ? ":" : "", comm ? comm : "", + vma->aspace, vma->iova, + vma->mapped ? "mapped" : "unmapped", vma->inuse); + kfree(comm); + } seq_puts(m, "\n"); } @@ -994,6 +1000,22 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, return ret; } +static const struct vm_operations_struct vm_ops = { + .fault = msm_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs msm_gem_object_funcs = { + .free = msm_gem_free_object, + .pin = msm_gem_prime_pin, + .unpin = msm_gem_prime_unpin, + .get_sg_table = msm_gem_prime_get_sg_table, + .vmap = msm_gem_prime_vmap, + .vunmap = msm_gem_prime_vunmap, + .vm_ops = &vm_ops, +}; + static int msm_gem_new_impl(struct drm_device *dev, uint32_t size, uint32_t flags, struct drm_gem_object **obj) @@ -1024,6 +1046,7 @@ static int msm_gem_new_impl(struct drm_device *dev, INIT_LIST_HEAD(&msm_obj->vmas); *obj = &msm_obj->base; + (*obj)->funcs = &msm_gem_object_funcs; return 0; } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 972490b14ba5..a1bf741b9b89 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -24,6 +24,11 @@ struct msm_gem_address_space { spinlock_t lock; /* Protects drm_mm node allocation/removal */ struct msm_mmu *mmu; struct kref kref; + + /* For address spaces associated with a specific process, this + * will be non-NULL: + */ + struct pid *pid; }; struct msm_gem_vma { @@ -83,12 +88,14 @@ struct msm_gem_object { struct mutex lock; /* Protects resources associated with bo */ char name[32]; /* Identifier to print for the debugfs files */ + + atomic_t active_count; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) static inline bool is_active(struct msm_gem_object *msm_obj) { - return msm_obj->gpu != NULL; + return atomic_read(&msm_obj->active_count); } static inline bool is_purgeable(struct msm_gem_object *msm_obj) @@ -142,6 +149,7 @@ struct msm_gem_submit { bool valid; /* true if no cmdstream patching needed */ bool in_rb; /* "sudo" mode, copy cmds into RB */ struct msm_ringbuffer *ring; + struct msm_file_private *ctx; unsigned int nr_cmds; unsigned int nr_bos; u32 ident; /* A "identifier" for the submit for logging */ diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index d7c8948427fe..515ef80816a0 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -19,7 +19,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ return NULL; - return drm_prime_pages_to_sg(msm_obj->pages, npages); + return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); } void *msm_gem_prime_vmap(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 722d61668a97..482576d7a39a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -6,6 +6,7 @@ #include "msm_drv.h" #include "msm_gem.h" +#include "msm_gpu_trace.h" static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) { @@ -87,7 +88,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) mutex_unlock(&dev->struct_mutex); if (freed > 0) - pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); + trace_msm_gem_purge(freed << PAGE_SHIFT); return freed; } @@ -123,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) *(unsigned long *)ptr += unmapped; if (unmapped > 0) - pr_info_ratelimited("Purging %u vmaps\n", unmapped); + trace_msm_gem_purge_vmaps(unmapped); return NOTIFY_DONE; } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 8cb9aa15ff90..aa5c60a7132d 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -27,7 +27,7 @@ #define BO_PINNED 0x2000 static struct msm_gem_submit *submit_create(struct drm_device *dev, - struct msm_gpu *gpu, struct msm_gem_address_space *aspace, + struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue, uint32_t nr_bos, uint32_t nr_cmds) { @@ -43,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return NULL; submit->dev = dev; - submit->aspace = aspace; + submit->aspace = queue->ctx->aspace; submit->gpu = gpu; submit->fence = NULL; submit->cmd = (void *)&submit->bos[nr_bos]; @@ -677,7 +677,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } } - submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos, + submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); if (!submit) { ret = -ENOMEM; @@ -785,7 +785,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } } - msm_gpu_submit(gpu, submit, ctx); + msm_gpu_submit(gpu, submit); args->fence = submit->fence->seqno; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 5f6a11211b64..f914ddbaea89 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -17,6 +17,7 @@ msm_gem_address_space_destroy(struct kref *kref) drm_mm_takedown(&aspace->mm); if (aspace->mmu) aspace->mmu->funcs->destroy(aspace->mmu); + put_pid(aspace->pid); kfree(aspace); } @@ -27,6 +28,15 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace) kref_put(&aspace->kref, msm_gem_address_space_destroy); } +struct msm_gem_address_space * +msm_gem_address_space_get(struct msm_gem_address_space *aspace) +{ + if (!IS_ERR_OR_NULL(aspace)) + kref_get(&aspace->kref); + + return aspace; +} + /* Actually unmap memory for the vma */ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma) @@ -78,8 +88,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot); - if (ret) + if (ret) { vma->mapped = false; + vma->inuse--; + } return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 57ddc9438351..55d16489d0f3 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -24,7 +24,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); + struct msm_gpu *gpu = dev_to_gpu(dev); struct dev_pm_opp *opp; opp = devfreq_recommended_opp(dev, freq, flags); @@ -32,6 +32,8 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, if (IS_ERR(opp)) return PTR_ERR(opp); + trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp)); + if (gpu->funcs->gpu_set_freq) gpu->funcs->gpu_set_freq(gpu, opp); else @@ -45,7 +47,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, static int msm_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { - struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); + struct msm_gpu *gpu = dev_to_gpu(dev); ktime_t time; if (gpu->funcs->gpu_get_freq) @@ -64,7 +66,7 @@ static int msm_devfreq_get_dev_status(struct device *dev, static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) { - struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev)); + struct msm_gpu *gpu = dev_to_gpu(dev); if (gpu->funcs->gpu_get_freq) *freq = gpu->funcs->gpu_get_freq(gpu); @@ -200,6 +202,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu) int ret; DBG("%s", gpu->name); + trace_msm_gpu_resume(0); ret = enable_pwrrail(gpu); if (ret) @@ -225,6 +228,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) int ret; DBG("%s", gpu->name); + trace_msm_gpu_suspend(0); devfreq_suspend_device(gpu->devfreq.devfreq); @@ -520,7 +524,7 @@ static void recover_worker(struct work_struct *work) struct msm_ringbuffer *ring = gpu->rb[i]; list_for_each_entry(submit, &ring->submits, node) - gpu->funcs->submit(gpu, submit, NULL); + gpu->funcs->submit(gpu, submit); } } @@ -694,8 +698,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - /* move to inactive: */ - msm_gem_move_to_inactive(&msm_obj->base); + + msm_gem_active_put(&msm_obj->base); msm_gem_unpin_iova(&msm_obj->base, submit->aspace); drm_gem_object_put_locked(&msm_obj->base); } @@ -747,8 +751,7 @@ void msm_gpu_retire(struct msm_gpu *gpu) } /* add bo's to gpu's ring, and kick gpu: */ -void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx) +void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; @@ -771,6 +774,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; + struct drm_gem_object *drm_obj = &msm_obj->base; uint64_t iova; /* can't happen yet.. but when we add 2d support we'll have @@ -783,13 +787,15 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova); if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) - msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); + dma_resv_add_excl_fence(drm_obj->resv, submit->fence); else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) - msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); + dma_resv_add_shared_fence(drm_obj->resv, submit->fence); + + msm_gem_active_get(drm_obj, gpu); } - gpu->funcs->submit(gpu, submit, ctx); - priv->lastctx = ctx; + gpu->funcs->submit(gpu, submit); + priv->lastctx = submit->queue->ctx; hangcheck_timer_reset(gpu); } @@ -824,6 +830,30 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) return 0; } +/* Return a new address space for a msm_drm_private instance */ +struct msm_gem_address_space * +msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task) +{ + struct msm_gem_address_space *aspace = NULL; + if (!gpu) + return NULL; + + /* + * If the target doesn't support private address spaces then return + * the global one + */ + if (gpu->funcs->create_private_address_space) { + aspace = gpu->funcs->create_private_address_space(gpu); + if (!IS_ERR(aspace)) + aspace->pid = get_pid(task_pid(task)); + } + + if (IS_ERR_OR_NULL(aspace)) + aspace = msm_gem_address_space_get(gpu->aspace); + + return aspace; +} + int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config) @@ -892,7 +922,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->gpu_cx = NULL; gpu->pdev = pdev; - platform_set_drvdata(pdev, gpu); + platform_set_drvdata(pdev, &gpu->adreno_smmu); msm_devfreq_init(gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 37cffac4cbe3..6c9e1fdc1a76 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -7,6 +7,7 @@ #ifndef __MSM_GPU_H__ #define __MSM_GPU_H__ +#include <linux/adreno-smmu-priv.h> #include <linux/clk.h> #include <linux/interconnect.h> #include <linux/pm_opp.h> @@ -45,8 +46,7 @@ struct msm_gpu_funcs { int (*hw_init)(struct msm_gpu *gpu); int (*pm_suspend)(struct msm_gpu *gpu); int (*pm_resume)(struct msm_gpu *gpu); - void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx); + void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); irqreturn_t (*irq)(struct msm_gpu *irq); struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); @@ -66,6 +66,9 @@ struct msm_gpu_funcs { void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp); struct msm_gem_address_space *(*create_address_space) (struct msm_gpu *gpu, struct platform_device *pdev); + struct msm_gem_address_space *(*create_private_address_space) + (struct msm_gpu *gpu); + uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); }; struct msm_gpu { @@ -74,6 +77,8 @@ struct msm_gpu { struct platform_device *pdev; const struct msm_gpu_funcs *funcs; + struct adreno_smmu_priv adreno_smmu; + /* performance counters (hw & sw): */ spinlock_t perf_lock; bool perfcntr_active; @@ -144,6 +149,12 @@ struct msm_gpu { bool hw_apriv; }; +static inline struct msm_gpu *dev_to_gpu(struct device *dev) +{ + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); + return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); +} + /* It turns out that all targets use the same ringbuffer size */ #define MSM_GPU_RINGBUFFER_SZ SZ_32K #define MSM_GPU_RINGBUFFER_BLKSIZE 32 @@ -184,6 +195,7 @@ struct msm_gpu_submitqueue { u32 flags; u32 prio; int faults; + struct msm_file_private *ctx; struct list_head node; struct kref ref; }; @@ -283,13 +295,15 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs); void msm_gpu_retire(struct msm_gpu *gpu); -void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, - struct msm_file_private *ctx); +void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit); int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config); +struct msm_gem_address_space * +msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task); + void msm_gpu_cleanup(struct msm_gpu *gpu); struct msm_gpu *adreno_load_gpu(struct drm_device *dev); diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h index 122b84789238..03e0c2536b94 100644 --- a/drivers/gpu/drm/msm/msm_gpu_trace.h +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h @@ -83,6 +83,89 @@ TRACE_EVENT(msm_gpu_submit_retired, __entry->start_ticks, __entry->end_ticks) ); + +TRACE_EVENT(msm_gpu_freq_change, + TP_PROTO(u32 freq), + TP_ARGS(freq), + TP_STRUCT__entry( + __field(u32, freq) + ), + TP_fast_assign( + /* trace freq in MHz to match intel_gpu_freq_change, to make life easier + * for userspace + */ + __entry->freq = DIV_ROUND_UP(freq, 1000000); + ), + TP_printk("new_freq=%u", __entry->freq) +); + + +TRACE_EVENT(msm_gmu_freq_change, + TP_PROTO(u32 freq, u32 perf_index), + TP_ARGS(freq, perf_index), + TP_STRUCT__entry( + __field(u32, freq) + __field(u32, perf_index) + ), + TP_fast_assign( + __entry->freq = freq; + __entry->perf_index = perf_index; + ), + TP_printk("freq=%u, perf_index=%u", __entry->freq, __entry->perf_index) +); + + +TRACE_EVENT(msm_gem_purge, + TP_PROTO(u32 bytes), + TP_ARGS(bytes), + TP_STRUCT__entry( + __field(u32, bytes) + ), + TP_fast_assign( + __entry->bytes = bytes; + ), + TP_printk("Purging %u bytes", __entry->bytes) +); + + +TRACE_EVENT(msm_gem_purge_vmaps, + TP_PROTO(u32 unmapped), + TP_ARGS(unmapped), + TP_STRUCT__entry( + __field(u32, unmapped) + ), + TP_fast_assign( + __entry->unmapped = unmapped; + ), + TP_printk("Purging %u vmaps", __entry->unmapped) +); + + +TRACE_EVENT(msm_gpu_suspend, + TP_PROTO(int dummy), + TP_ARGS(dummy), + TP_STRUCT__entry( + __field(u32, dummy) + ), + TP_fast_assign( + __entry->dummy = dummy; + ), + TP_printk("%u", __entry->dummy) +); + + +TRACE_EVENT(msm_gpu_resume, + TP_PROTO(int dummy), + TP_ARGS(dummy), + TP_STRUCT__entry( + __field(u32, dummy) + ), + TP_fast_assign( + __entry->dummy = dummy; + ), + TP_printk("%u", __entry->dummy) +); + #endif #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c index 310a31b05faa..379496186c7f 100644 --- a/drivers/gpu/drm/msm/msm_gpummu.c +++ b/drivers/gpu/drm/msm/msm_gpummu.c @@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, { struct msm_gpummu *gpummu = to_msm_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; - struct scatterlist *sg; + struct sg_dma_page_iter dma_iter; unsigned prot_bits = 0; - unsigned i, j; if (prot & IOMMU_WRITE) prot_bits |= 1; if (prot & IOMMU_READ) prot_bits |= 2; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - dma_addr_t addr = sg->dma_address; - for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) { - gpummu->table[idx] = addr | prot_bits; - addr += GPUMMU_PAGE_SIZE; - } + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + dma_addr_t addr = sg_page_iter_dma_address(&dma_iter); + int i; + + for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE) + gpummu->table[idx++] = (addr + i) | prot_bits; } /* we can improve by deferring flush for multiple map() */ @@ -102,7 +101,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu) } gpummu->gpu = gpu; - msm_mmu_init(&gpummu->base, dev, &funcs); + msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU); return &gpummu->base; } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 3a381a9674c9..22ac7c692a81 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -4,15 +4,210 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/adreno-smmu-priv.h> +#include <linux/io-pgtable.h> #include "msm_drv.h" #include "msm_mmu.h" struct msm_iommu { struct msm_mmu base; struct iommu_domain *domain; + atomic_t pagetables; }; + #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) +struct msm_iommu_pagetable { + struct msm_mmu base; + struct msm_mmu *parent; + struct io_pgtable_ops *pgtbl_ops; + phys_addr_t ttbr; + u32 asid; +}; +static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) +{ + return container_of(mmu, struct msm_iommu_pagetable, base); +} + +static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, + size_t size) +{ + struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); + struct io_pgtable_ops *ops = pagetable->pgtbl_ops; + size_t unmapped = 0; + + /* Unmap the block one page at a time */ + while (size) { + unmapped += ops->unmap(ops, iova, 4096, NULL); + iova += 4096; + size -= 4096; + } + + iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain); + + return (unmapped == size) ? 0 : -EINVAL; +} + +static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, + struct sg_table *sgt, size_t len, int prot) +{ + struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); + struct io_pgtable_ops *ops = pagetable->pgtbl_ops; + struct scatterlist *sg; + size_t mapped = 0; + u64 addr = iova; + unsigned int i; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t size = sg->length; + phys_addr_t phys = sg_phys(sg); + + /* Map the block one page at a time */ + while (size) { + if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) { + msm_iommu_pagetable_unmap(mmu, iova, mapped); + return -EINVAL; + } + + phys += 4096; + addr += 4096; + size -= 4096; + mapped += 4096; + } + } + + return 0; +} + +static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu) +{ + struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); + struct msm_iommu *iommu = to_msm_iommu(pagetable->parent); + struct adreno_smmu_priv *adreno_smmu = + dev_get_drvdata(pagetable->parent->dev); + + /* + * If this is the last attached pagetable for the parent, + * disable TTBR0 in the arm-smmu driver + */ + if (atomic_dec_return(&iommu->pagetables) == 0) + adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL); + + free_io_pgtable_ops(pagetable->pgtbl_ops); + kfree(pagetable); +} + +int msm_iommu_pagetable_params(struct msm_mmu *mmu, + phys_addr_t *ttbr, int *asid) +{ + struct msm_iommu_pagetable *pagetable; + + if (mmu->type != MSM_MMU_IOMMU_PAGETABLE) + return -EINVAL; + + pagetable = to_pagetable(mmu); + + if (ttbr) + *ttbr = pagetable->ttbr; + + if (asid) + *asid = pagetable->asid; + + return 0; +} + +static const struct msm_mmu_funcs pagetable_funcs = { + .map = msm_iommu_pagetable_map, + .unmap = msm_iommu_pagetable_unmap, + .destroy = msm_iommu_pagetable_destroy, +}; + +static void msm_iommu_tlb_flush_all(void *cookie) +{ +} + +static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ +} + +static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie) +{ +} + +static const struct iommu_flush_ops null_tlb_ops = { + .tlb_flush_all = msm_iommu_tlb_flush_all, + .tlb_flush_walk = msm_iommu_tlb_flush_walk, + .tlb_flush_leaf = msm_iommu_tlb_flush_walk, + .tlb_add_page = msm_iommu_tlb_add_page, +}; + +struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) +{ + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev); + struct msm_iommu *iommu = to_msm_iommu(parent); + struct msm_iommu_pagetable *pagetable; + const struct io_pgtable_cfg *ttbr1_cfg = NULL; + struct io_pgtable_cfg ttbr0_cfg; + int ret; + + /* Get the pagetable configuration from the domain */ + if (adreno_smmu->cookie) + ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); + if (!ttbr1_cfg) + return ERR_PTR(-ENODEV); + + pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL); + if (!pagetable) + return ERR_PTR(-ENOMEM); + + msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs, + MSM_MMU_IOMMU_PAGETABLE); + + /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */ + ttbr0_cfg = *ttbr1_cfg; + + /* The incoming cfg will have the TTBR1 quirk enabled */ + ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; + ttbr0_cfg.tlb = &null_tlb_ops; + + pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, + &ttbr0_cfg, iommu->domain); + + if (!pagetable->pgtbl_ops) { + kfree(pagetable); + return ERR_PTR(-ENOMEM); + } + + /* + * If this is the first pagetable that we've allocated, send it back to + * the arm-smmu driver as a trigger to set up TTBR0 + */ + if (atomic_inc_return(&iommu->pagetables) == 1) { + ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg); + if (ret) { + free_io_pgtable_ops(pagetable->pgtbl_ops); + kfree(pagetable); + return ERR_PTR(ret); + } + } + + /* Needed later for TLB flush */ + pagetable->parent = parent; + pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; + + /* + * TODO we would like each set of page tables to have a unique ASID + * to optimize TLB invalidation. But iommu_flush_iotlb_all() will + * end up flushing the ASID used for TTBR1 pagetables, which is not + * what we want. So for now just use the same ASID as TTBR1. + */ + pagetable->asid = 0; + + return &pagetable->base; +} + static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg) { @@ -36,7 +231,11 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct msm_iommu *iommu = to_msm_iommu(mmu); size_t ret; - ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); + /* The arm-smmu driver expects the addresses to be sign extended */ + if (iova & BIT_ULL(48)) + iova |= GENMASK_ULL(63, 49); + + ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot); WARN_ON(!ret); return (ret == len) ? 0 : -EINVAL; @@ -46,6 +245,9 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) { struct msm_iommu *iommu = to_msm_iommu(mmu); + if (iova & BIT_ULL(48)) + iova |= GENMASK_ULL(63, 49); + iommu_unmap(iommu->domain, iova, len); return 0; @@ -78,9 +280,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) return ERR_PTR(-ENOMEM); iommu->domain = domain; - msm_mmu_init(&iommu->base, dev, &funcs); + msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU); iommu_set_fault_handler(domain, msm_fault_handler, iommu); + atomic_set(&iommu->pagetables, 0); + ret = iommu_attach_device(iommu->domain, dev); if (ret) { kfree(iommu); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 3a534ee59bf6..61ade89d9e48 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -17,18 +17,26 @@ struct msm_mmu_funcs { void (*destroy)(struct msm_mmu *mmu); }; +enum msm_mmu_type { + MSM_MMU_GPUMMU, + MSM_MMU_IOMMU, + MSM_MMU_IOMMU_PAGETABLE, +}; + struct msm_mmu { const struct msm_mmu_funcs *funcs; struct device *dev; int (*handler)(void *arg, unsigned long iova, int flags); void *arg; + enum msm_mmu_type type; }; static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, - const struct msm_mmu_funcs *funcs) + const struct msm_mmu_funcs *funcs, enum msm_mmu_type type) { mmu->dev = dev; mmu->funcs = funcs; + mmu->type = type; } struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); @@ -41,7 +49,13 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, mmu->handler = handler; } +struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent); + void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, dma_addr_t *tran_error); + +int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr, + int *asid); + #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 7764373d0ed2..0987d6bf848c 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -31,6 +31,7 @@ struct msm_rbmemptrs { volatile uint32_t fence; volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT]; + volatile u64 ttbr0; }; struct msm_ringbuffer { diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index a1d94be7883a..c3d206105d28 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -12,6 +12,8 @@ void msm_submitqueue_destroy(struct kref *kref) struct msm_gpu_submitqueue *queue = container_of(kref, struct msm_gpu_submitqueue, ref); + msm_file_private_put(queue->ctx); + kfree(queue); } @@ -49,8 +51,10 @@ void msm_submitqueue_close(struct msm_file_private *ctx) * No lock needed in close and there won't * be any more user ioctls coming our way */ - list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) + list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) { + list_del(&entry->node); msm_submitqueue_put(entry); + } } int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, @@ -81,6 +85,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, write_lock(&ctx->queuelock); + queue->ctx = msm_file_private_get(ctx); queue->id = ctx->queueid++; if (id) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 8c549c3931af..35122aef037b 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -21,6 +21,7 @@ #include <drm/drm_connector.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> @@ -81,8 +82,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb) clk_disable_unprepare(mxsfb->clk_axi); } +static struct drm_framebuffer * +mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + const struct drm_format_info *info; + + info = drm_get_format_info(dev, mode_cmd); + if (!info) + return ERR_PTR(-EINVAL); + + if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { + dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n"); + return ERR_PTR(-EINVAL); + } + + return drm_gem_fb_create(dev, file_priv, mode_cmd); +} + static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { - .fb_create = drm_gem_fb_create, + .fb_create = mxsfb_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index b721b8b262ce..9040835289a8 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -269,21 +269,23 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) } static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - bool has_primary = state->plane_mask & + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + bool has_primary = crtc_state->plane_mask & drm_plane_mask(crtc->primary); /* The primary plane has to be enabled when the CRTC is active. */ - if (state->active && !has_primary) + if (crtc_state->active && !has_primary) return -EINVAL; /* TODO: Is this needed ? */ - return drm_atomic_add_affected_planes(state->state, crtc); + return drm_atomic_add_affected_planes(state, crtc); } static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event; @@ -302,7 +304,7 @@ static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc, } static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev); struct drm_device *drm = mxsfb->drm; @@ -326,7 +328,7 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, } static void mxsfb_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev); struct drm_device *drm = mxsfb->drm; diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 6416b6907aeb..f9e962fd94d0 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -615,7 +615,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ret; - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret == 0) { if (disp->image[nv_crtc->index]) nouveau_bo_unpin(disp->image[nv_crtc->index]); @@ -1172,7 +1172,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -ENOMEM; if (new_bo != old_bo) { - ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) goto fail_free; } @@ -1336,10 +1336,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100, - TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); if (!ret) { - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, + NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 3ee836dc5058..7739f46470d3 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -134,7 +134,7 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime) if (!fb || !fb->obj[0]) continue; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) NV_ERROR(drm, "Could not pin framebuffer\n"); } @@ -144,7 +144,8 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime) if (!nv_crtc->cursor.nvbo) continue; - ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, + NOUVEAU_GEM_DOMAIN_VRAM, true); if (!ret && nv_crtc->cursor.set_offset) ret = nouveau_bo_map(nv_crtc->cursor.nvbo); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 193ba9498f3d..37e63e98cd08 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -142,7 +142,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) return ret; @@ -387,7 +387,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h index 498622c0c670..f75088186fba 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core.h +++ b/drivers/gpu/drm/nouveau/dispnv50/core.h @@ -44,6 +44,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32, struct nv50_core **); int core507d_init(struct nv50_core *); void core507d_ntfy_init(struct nouveau_bo *, u32); +int core507d_read_caps(struct nv50_disp *disp); int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *); int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *); int core507d_update(struct nv50_core *, u32 *, bool); @@ -55,6 +56,7 @@ extern const struct nv50_outp_func pior507d; int core827d_new(struct nouveau_drm *, s32, struct nv50_core **); int core907d_new(struct nouveau_drm *, s32, struct nv50_core **); +int core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp); extern const struct nv50_outp_func dac907d; extern const struct nv50_outp_func sor907d; diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c index 248edf69e168..e6f16a7750f0 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c @@ -78,19 +78,56 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset) } int -core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp) +core507d_read_caps(struct nv50_disp *disp) { struct nvif_push *push = disp->core->chan.push; int ret; - if ((ret = PUSH_WAIT(push, 2))) + ret = PUSH_WAIT(push, 6); + if (ret) return ret; + PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL, + NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) | + NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) | + NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE)); + PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000); + + PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL, + NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE)); + return PUSH_KICK(push); } int +core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp) +{ + struct nv50_core *core = disp->core; + struct nouveau_bo *bo = disp->sync; + s64 time; + int ret; + + NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, + NVDEF(NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, FALSE)); + + ret = core507d_read_caps(disp); + if (ret < 0) + return ret; + + time = nvif_msec(core->chan.base.device, 2000ULL, + if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY, + NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE)) + break; + usleep_range(1, 2); + ); + if (time < 0) + NV_ERROR(drm, "core caps notifier timeout\n"); + + return 0; +} + +int core507d_init(struct nv50_core *core) { struct nvif_push *push = core->chan.push; diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c index b17c03529c78..8564d4dffaff 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core907d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c @@ -22,11 +22,45 @@ #include "core.h" #include "head.h" +#include <nvif/push507c.h> +#include <nvif/timer.h> + +#include <nvhw/class/cl907d.h> + +#include "nouveau_bo.h" + +int +core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp) +{ + struct nv50_core *core = disp->core; + struct nouveau_bo *bo = disp->sync; + s64 time; + int ret; + + NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, + NVDEF(NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, FALSE)); + + ret = core507d_read_caps(disp); + if (ret < 0) + return ret; + + time = nvif_msec(core->chan.base.device, 2000ULL, + if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY, + NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, ==, TRUE)) + break; + usleep_range(1, 2); + ); + if (time < 0) + NV_ERROR(drm, "core caps notifier timeout\n"); + + return 0; +} + static const struct nv50_core_func core907d = { .init = core507d_init, .ntfy_init = core507d_ntfy_init, - .caps_init = core507d_caps_init, + .caps_init = core907d_caps_init, .ntfy_wait_done = core507d_ntfy_wait_done, .update = core507d_update, .head = &head907d, diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c index 66846f372080..1cd3a2a35dfb 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c @@ -26,7 +26,7 @@ static const struct nv50_core_func core917d = { .init = core507d_init, .ntfy_init = core507d_ntfy_init, - .caps_init = core507d_caps_init, + .caps_init = core907d_caps_init, .ntfy_wait_done = core507d_ntfy_wait_done, .update = core507d_update, .head = &head917d, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 7799530e07c1..b111fe24a06b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2069,6 +2069,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_dependencies(state); drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_atomic_helper_calc_timestamping_constants(state); if (atom->lock_core) mutex_lock(&disp->mutex); @@ -2622,10 +2623,11 @@ nv50_display_create(struct drm_device *dev) dev->mode_config.normalize_zpos = true; /* small shared memory area we use for notifiers and semaphores */ - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &disp->sync); if (!ret) { - ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true); if (!ret) { ret = nouveau_bo_map(disp->sync); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 841edfaf5b9d..537c1ef2e464 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -30,6 +30,7 @@ #include <nvif/event.h> #include <nvif/cl0046.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_vblank.h> @@ -310,12 +311,16 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) } static int -nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) +nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct nouveau_drm *drm = nouveau_drm(crtc->dev); struct nv50_head *head = nv50_head(crtc); - struct nv50_head_atom *armh = nv50_head_atom(crtc->state); - struct nv50_head_atom *asyh = nv50_head_atom(state); + struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state); + struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); struct nouveau_conn_atom *asyc = NULL; struct drm_connector_state *conns; struct drm_connector *conn; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 447ecc9fec42..0356474ad6f6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -542,7 +542,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) return 0; nvbo = nouveau_gem_object(fb->obj[0]); - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h index 2e444bac701d..6a463f308b64 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h @@ -32,7 +32,10 @@ #define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_TRUE 0x00000001 #define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_R0 15:1 #define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_TIMESTAMP 29:16 - +#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1 0x00000001 +#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE 0:0 +#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_FALSE 0x00000000 +#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_TRUE 0x00000001 // class methods #define NV507D_UPDATE (0x00000080) diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h index 34bc3eafac7d..79aff6ff3138 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h @@ -24,6 +24,10 @@ #ifndef _cl907d_h_ #define _cl907d_h_ +#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 #define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 #define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 #define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 21537ca1dd39..9a5be6f32424 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -328,7 +328,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 0, 0, &chan->ntfy); if (ret == 0) - ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); + ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART, + false); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 9140387f30dc..8133377d865d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -28,7 +28,6 @@ */ #include <linux/dma-mapping.h> -#include <linux/swiotlb.h> #include "nouveau_drv.h" #include "nouveau_chan.h" @@ -44,6 +43,10 @@ #include <nvif/if500b.h> #include <nvif/if900b.h> +static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, + struct ttm_resource *reg); +static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); + /* * NV10-NV40 tiling helpers */ @@ -136,7 +139,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) struct drm_device *dev = drm->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - WARN_ON(nvbo->pin_refcnt > 0); + WARN_ON(nvbo->bo.pin_count > 0); + nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); /* @@ -158,8 +162,7 @@ roundup_64(u64 x, u32 y) } static void -nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, - int *align, u64 *size) +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nvif_device *device = &drm->client.device; @@ -192,7 +195,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, } struct nouveau_bo * -nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, +nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, u32 tile_mode, u32 tile_flags) { struct nouveau_drm *drm = cli->drm; @@ -218,7 +221,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated * into in nouveau_gem_new(). */ - if (flags & TTM_PL_FLAG_UNCACHED) { + if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { /* Determine if we can get a cache-coherent map, forcing * uncached mapping if we can't. */ @@ -258,9 +261,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, * Skip page sizes that can't support needed domains. */ if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && - (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) continue; - if ((flags & TTM_PL_FLAG_TT) && + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) continue; @@ -287,13 +290,13 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, } nvbo->page = vmm->page[pi].shift; - nouveau_bo_fixup_align(nvbo, flags, align, size); + nouveau_bo_fixup_align(nvbo, align, size); return nvbo; } int -nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, +nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, struct sg_table *sg, struct dma_resv *robj) { int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; @@ -303,7 +306,8 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; - nouveau_bo_placement_set(nvbo, flags, 0); + nouveau_bo_placement_set(nvbo, domain, 0); + INIT_LIST_HEAD(&nvbo->io_reserve_lru); ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, &nvbo->placement, align >> PAGE_SHIFT, false, @@ -318,19 +322,19 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, int nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, - uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, + uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, struct sg_table *sg, struct dma_resv *robj, struct nouveau_bo **pnvbo) { struct nouveau_bo *nvbo; int ret; - nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, tile_flags); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); - ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); if (ret) return ret; @@ -339,27 +343,35 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, } static void -set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) +set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain) { *n = 0; - if (type & TTM_PL_FLAG_VRAM) - pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; - if (type & TTM_PL_FLAG_TT) - pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; - if (type & TTM_PL_FLAG_SYSTEM) - pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { + pl[*n].mem_type = TTM_PL_VRAM; + pl[*n].flags = 0; + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_GART) { + pl[*n].mem_type = TTM_PL_TT; + pl[*n].flags = 0; + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_CPU) { + pl[*n].mem_type = TTM_PL_SYSTEM; + pl[(*n)++].flags = 0; + } } static void -set_placement_range(struct nouveau_bo *nvbo, uint32_t type) +set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; unsigned i, fpfn, lpfn; if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && - nvbo->mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled @@ -386,26 +398,23 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) } void -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, + uint32_t busy) { struct ttm_placement *pl = &nvbo->placement; - uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : - TTM_PL_MASK_CACHING) | - (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); pl->placement = nvbo->placements; - set_placement_list(nvbo->placements, &pl->num_placement, - type, flags); + set_placement_list(nvbo->placements, &pl->num_placement, domain); pl->busy_placement = nvbo->busy_placements; set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, - type | busy, flags); + domain | busy); - set_placement_range(nvbo, type); + set_placement_range(nvbo, domain); } int -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) +nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; @@ -417,7 +426,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) return ret; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && - memtype == TTM_PL_FLAG_VRAM && contig) { + domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { if (!nvbo->contig) { nvbo->contig = true; force = true; @@ -425,36 +434,42 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) } } - if (nvbo->pin_refcnt) { - if (!(memtype & (1 << bo->mem.mem_type)) || evict) { + if (nvbo->bo.pin_count) { + bool error = evict; + + switch (bo->mem.mem_type) { + case TTM_PL_VRAM: + error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); + break; + case TTM_PL_TT: + error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); + default: + break; + } + + if (error) { NV_ERROR(drm, "bo %p pinned elsewhere: " "0x%08x vs 0x%08x\n", bo, - 1 << bo->mem.mem_type, memtype); + bo->mem.mem_type, domain); ret = -EBUSY; } - nvbo->pin_refcnt++; + ttm_bo_pin(&nvbo->bo); goto out; } if (evict) { - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); ret = nouveau_bo_validate(nvbo, false, false); if (ret) goto out; } - nvbo->pin_refcnt++; - nouveau_bo_placement_set(nvbo, memtype, 0); - - /* drop pin_refcnt temporarily, so we don't trip the assertion - * in nouveau_bo_move() that makes sure we're not trying to - * move a pinned buffer - */ - nvbo->pin_refcnt--; + nouveau_bo_placement_set(nvbo, domain, 0); ret = nouveau_bo_validate(nvbo, false, false); if (ret) goto out; - nvbo->pin_refcnt++; + + ttm_bo_pin(&nvbo->bo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -479,21 +494,14 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret, ref; + int ret; ret = ttm_bo_reserve(bo, false, false, NULL); if (ret) return ret; - ref = --nvbo->pin_refcnt; - WARN_ON_ONCE(ref < 0); - if (ref) - goto out; - - nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - - ret = nouveau_bo_validate(nvbo, false, false); - if (ret == 0) { + ttm_bo_unpin(&nvbo->bo); + if (!nvbo->bo.pin_count) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: drm->gem.vram_available += bo->mem.size; @@ -506,9 +514,8 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) } } -out: ttm_bo_unreserve(bo); - return ret; + return 0; } int @@ -539,7 +546,7 @@ void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; int i; if (!ttm_dma) @@ -549,7 +556,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->ttm.num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; i++) dma_sync_single_for_device(drm->dev->dev, ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE); @@ -559,7 +566,7 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; int i; if (!ttm_dma) @@ -569,11 +576,31 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->ttm.num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; i++) dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE); } +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); +} + +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_del_init(&nvbo->io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); +} + int nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, bool no_wait_gpu) @@ -646,6 +673,36 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) return nouveau_sgdma_create_ttm(bo, page_flags); } +static int +nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, + struct ttm_resource *reg) +{ +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); +#endif + if (!reg) + return -EINVAL; +#if IS_ENABLED(CONFIG_AGP) + if (drm->agp.bridge) + return ttm_agp_bind(ttm, reg); +#endif + return nouveau_sgdma_bind(bdev, ttm, reg); +} + +static void +nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); + + if (drm->agp.bridge) { + ttm_agp_unbind(ttm); + return; + } +#endif + nouveau_sgdma_unbind(bdev, ttm); +} + static void nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) { @@ -653,11 +710,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) switch (bo->mem.mem_type) { case TTM_PL_VRAM: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, - TTM_PL_FLAG_SYSTEM); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, + NOUVEAU_GEM_DOMAIN_CPU); break; default: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); break; } @@ -697,8 +754,9 @@ done: } static int -nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_gpu, struct ttm_resource *new_reg) +nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_reg) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; @@ -717,7 +775,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, } mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); - ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); + ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); if (ret == 0) { ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); if (ret == 0) { @@ -725,7 +783,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (ret == 0) { ret = ttm_bo_move_accel_cleanup(bo, &fence->base, - evict, + evict, false, new_reg); nouveau_fence_unref(&fence); } @@ -804,14 +862,15 @@ nouveau_bo_move_init(struct nouveau_drm *drm) } static int -nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_resource *new_reg) +nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_reg) { - struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + .mem_type = TTM_PL_TT, + .flags = 0 }; struct ttm_placement placement; struct ttm_resource tmp_reg; @@ -822,33 +881,44 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_reg = *new_reg; tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx); + ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx); if (ret) return ret; - ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx); + ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (ret) + goto out; + + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); + if (ret) + goto out; + + ret = nouveau_bo_move_m2mf(bo, true, ctx, &tmp_reg); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) goto out; - ret = ttm_bo_move_ttm(bo, &ctx, new_reg); + nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); + ttm_bo_assign_mem(bo, &tmp_reg); out: ttm_resource_free(bo, &tmp_reg); return ret; } static int -nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_resource *new_reg) +nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_reg) { - struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + .mem_type = TTM_PL_TT, + .flags = 0 }; struct ttm_placement placement; struct ttm_resource tmp_reg; @@ -859,15 +929,20 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_reg = *new_reg; tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx); + ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx); if (ret) return ret; - ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg); - if (ret) - goto out; + ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(ret != 0)) + return ret; + + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); + if (unlikely(ret != 0)) + return ret; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg); + ttm_bo_assign_mem(bo, &tmp_reg); + ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg); if (ret) goto out; @@ -888,6 +963,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, if (bo->destroy != nouveau_bo_del_ttm) return; + nouveau_bo_del_io_reserve_lru(bo); + if (mem && new_reg->mem_type != TTM_PL_SYSTEM && mem->mem.page == nvbo->page) { list_for_each_entry(vma, &nvbo->vma_list, head) { @@ -954,49 +1031,63 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + if (new_reg->mem_type == TTM_PL_TT) { + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); + if (ret) + return ret; + } + + nouveau_bo_move_ntfy(bo, evict, new_reg); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) - return ret; + goto out_ntfy; - if (nvbo->pin_refcnt) + if (nvbo->bo.pin_count) NV_WARN(drm, "Moving pinned object %p!\n", nvbo); if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); if (ret) - return ret; + goto out_ntfy; } /* Fake bo copy. */ if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { - BUG_ON(bo->mem.mm_node != NULL); - bo->mem = *new_reg; - new_reg->mm_node = NULL; + ttm_bo_move_null(bo, new_reg); + goto out; + } + + if (old_reg->mem_type == TTM_PL_SYSTEM && + new_reg->mem_type == TTM_PL_TT) { + ttm_bo_move_null(bo, new_reg); + goto out; + } + + if (old_reg->mem_type == TTM_PL_TT && + new_reg->mem_type == TTM_PL_SYSTEM) { + nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); + ttm_bo_assign_mem(bo, new_reg); goto out; } /* Hardware assisted copy. */ if (drm->ttm.move) { if (new_reg->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, - ctx->interruptible, - ctx->no_wait_gpu, new_reg); + ret = nouveau_bo_move_flipd(bo, evict, ctx, + new_reg); else if (old_reg->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, - ctx->interruptible, - ctx->no_wait_gpu, new_reg); + ret = nouveau_bo_move_flips(bo, evict, ctx, + new_reg); else - ret = nouveau_bo_move_m2mf(bo, evict, - ctx->interruptible, - ctx->no_wait_gpu, new_reg); + ret = nouveau_bo_move_m2mf(bo, evict, ctx, + new_reg); if (!ret) goto out; } /* Fallback to software copy. */ - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); - if (ret == 0) - ret = ttm_bo_move_memcpy(bo, ctx, new_reg); + ret = ttm_bo_move_memcpy(bo, ctx, new_reg); out: if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { @@ -1005,7 +1096,12 @@ out: else nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); } - +out_ntfy: + if (ret) { + swap(*new_reg, bo->mem); + nouveau_bo_move_ntfy(bo, false, new_reg); + swap(*new_reg, bo->mem); + } return ret; } @@ -1018,33 +1114,72 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) filp->private_data); } +static void +nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, + struct ttm_resource *reg) +{ + struct nouveau_mem *mem = nouveau_mem(reg); + + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { + switch (reg->mem_type) { + case TTM_PL_TT: + if (mem->kind) + nvif_object_unmap_handle(&mem->mem.object); + break; + case TTM_PL_VRAM: + nvif_object_unmap_handle(&mem->mem.object); + break; + default: + break; + } + } +} + static int nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); struct nouveau_mem *mem = nouveau_mem(reg); + struct nvif_mmu *mmu = &drm->client.mmu; + const u8 type = mmu->type[drm->ttm.type_vram].type; + int ret; + mutex_lock(&drm->ttm.io_reserve_mutex); +retry: switch (reg->mem_type) { case TTM_PL_SYSTEM: /* System memory */ - return 0; + ret = 0; + goto out; case TTM_PL_TT: #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { - reg->bus.offset = reg->start << PAGE_SHIFT; - reg->bus.base = drm->agp.base; + reg->bus.offset = (reg->start << PAGE_SHIFT) + + drm->agp.base; reg->bus.is_iomem = !drm->agp.cma; + reg->bus.caching = ttm_write_combined; } #endif - if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || + !mem->kind) { /* untiled */ + ret = 0; break; + } fallthrough; /* tiled memory */ case TTM_PL_VRAM: - reg->bus.offset = reg->start << PAGE_SHIFT; - reg->bus.base = device->func->resource_addr(device, 1); + reg->bus.offset = (reg->start << PAGE_SHIFT) + + device->func->resource_addr(device, 1); reg->bus.is_iomem = true; + + /* Some BARs do not support being ioremapped WC */ + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && + type & NVIF_MEM_UNCACHED) + reg->bus.caching = ttm_uncached; + else + reg->bus.caching = ttm_write_combined; + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { union { struct nv50_mem_map_v0 nv50; @@ -1052,7 +1187,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) } args; u64 handle, length; u32 argc = 0; - int ret; switch (mem->mem.object.oclass) { case NVIF_CLASS_MEM_NV50: @@ -1078,43 +1212,49 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) &handle, &length); if (ret != 1) { if (WARN_ON(ret == 0)) - return -EINVAL; - return ret; + ret = -EINVAL; + goto out; } - reg->bus.base = 0; reg->bus.offset = handle; + ret = 0; } break; default: - return -EINVAL; + ret = -EINVAL; } - return 0; + +out: + if (ret == -ENOSPC) { + struct nouveau_bo *nvbo; + + nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, + typeof(*nvbo), + io_reserve_lru); + if (nvbo) { + list_del_init(&nvbo->io_reserve_lru); + drm_vma_node_unmap(&nvbo->bo.base.vma_node, + bdev->dev_mapping); + nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); + goto retry; + } + + } + mutex_unlock(&drm->ttm.io_reserve_mutex); + return ret; } static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nouveau_mem *mem = nouveau_mem(reg); - if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { - switch (reg->mem_type) { - case TTM_PL_TT: - if (mem->kind) - nvif_object_unmap_handle(&mem->mem.object); - break; - case TTM_PL_VRAM: - nvif_object_unmap_handle(&mem->mem.object); - break; - default: - break; - } - } + mutex_lock(&drm->ttm.io_reserve_mutex); + nouveau_ttm_io_mem_free_locked(drm, reg); + mutex_unlock(&drm->ttm.io_reserve_mutex); } -static int -nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -1130,75 +1270,69 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) !nvbo->kind) return 0; - if (bo->mem.mem_type == TTM_PL_SYSTEM) { - nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); + if (bo->mem.mem_type != TTM_PL_SYSTEM) + return 0; + + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); - ret = nouveau_bo_validate(nvbo, false, false); - if (ret) - return ret; + } else { + /* make sure bo is in mappable vram */ + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || + bo->mem.start + bo->mem.num_pages < mappable) + return 0; + + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = 0; + nvbo->placements[i].lpfn = mappable; } - return 0; - } - /* make sure bo is in mappable vram */ - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || - bo->mem.start + bo->mem.num_pages < mappable) - return 0; + for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { + nvbo->busy_placements[i].fpfn = 0; + nvbo->busy_placements[i].lpfn = mappable; + } - for (i = 0; i < nvbo->placement.num_placement; ++i) { - nvbo->placements[i].fpfn = 0; - nvbo->placements[i].lpfn = mappable; + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); } - for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { - nvbo->busy_placements[i].fpfn = 0; - nvbo->busy_placements[i].lpfn = mappable; - } + ret = nouveau_bo_validate(nvbo, false, false); + if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(ret)) + return VM_FAULT_SIGBUS; - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); - return nouveau_bo_validate(nvbo, false, false); + ttm_bo_move_to_lru_tail_unlocked(bo); + return 0; } static int -nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct ttm_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; struct device *dev; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (slave && ttm->sg) { /* make userspace faulting work */ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, ttm_dma->dma_address, ttm->num_pages); - ttm->state = tt_unbound; return 0; } - drm = nouveau_bdev(ttm->bdev); + drm = nouveau_bdev(bdev); dev = drm->dev->dev; -#if IS_ENABLED(CONFIG_AGP) - if (drm->agp.bridge) { - return ttm_agp_tt_populate(ttm, ctx); - } -#endif - -#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - if (swiotlb_nr_tbl()) { - return ttm_dma_populate((void *)ttm, dev, ctx); - } -#endif - return ttm_populate_and_map_pages(dev, ttm_dma, ctx); + return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); } static void -nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) +nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct ttm_dma_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; struct device *dev; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1206,24 +1340,26 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) if (slave) return; - drm = nouveau_bdev(ttm->bdev); + drm = nouveau_bdev(bdev); dev = drm->dev->dev; + return ttm_pool_free(&drm->ttm.bdev.pool, ttm); +} + +static void +nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) +{ #if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); if (drm->agp.bridge) { - ttm_agp_tt_unpopulate(ttm); - return; - } -#endif - -#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - if (swiotlb_nr_tbl()) { - ttm_dma_unpopulate((void *)ttm, dev); + ttm_agp_unbind(ttm); + ttm_tt_destroy_common(bdev, ttm); + ttm_agp_destroy(ttm); return; } #endif - - ttm_unmap_and_unpopulate_pages(dev, ttm_dma); + nouveau_sgdma_destroy(bdev, ttm); } void @@ -1237,16 +1373,22 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl dma_resv_add_shared_fence(resv, &fence->base); } +static void +nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + nouveau_bo_move_ntfy(bo, false, NULL); +} + struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, + .ttm_tt_destroy = &nouveau_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = nouveau_bo_evict_flags, - .move_notify = nouveau_bo_move_ntfy, + .delete_mem_notify = nouveau_bo_delete_mem_notify, .move = nouveau_bo_move, .verify_access = nouveau_bo_verify_access, - .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index aecb7481df0d..641ef6298a0e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -18,6 +18,7 @@ struct nouveau_bo { bool force_coherent; struct ttm_bo_kmap_obj kmap; struct list_head head; + struct list_head io_reserve_lru; /* protected by ttm_bo_reserve() */ struct drm_file *reserved_by; @@ -39,9 +40,6 @@ struct nouveau_bo { struct nouveau_drm_tile *tile; - /* protect by the ttm reservation lock */ - int pin_refcnt; - struct ttm_bo_kmap_obj dma_buf_vmap; }; @@ -76,10 +74,10 @@ extern struct ttm_bo_driver nouveau_bo_driver; void nouveau_bo_move_init(struct nouveau_drm *); struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align, - u32 flags, u32 tile_mode, u32 tile_flags); -int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags, + u32 domain, u32 tile_mode, u32 tile_flags); +int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain, struct sg_table *sg, struct dma_resv *robj); -int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, +int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain, u32 tile_mode, u32 tile_flags, struct sg_table *sg, struct dma_resv *robj, struct nouveau_bo **); @@ -91,11 +89,14 @@ void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy); void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val); u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index); void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); +vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo); void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive); int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, bool no_wait_gpu); void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo); void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo); +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo); +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo); /* TODO: submit equivalent to TTM generic API upstream? */ static inline void __iomem * @@ -119,13 +120,13 @@ nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo) } static inline int -nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags, +nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain, struct nouveau_bo **pnvbo) { - int ret = nouveau_bo_new(cli, size, align, flags, + int ret = nouveau_bo_new(cli, size, align, domain, 0, 0, NULL, NULL, pnvbo); if (ret == 0) { - ret = nouveau_bo_pin(*pnvbo, flags, true); + ret = nouveau_bo_pin(*pnvbo, domain, true); if (ret == 0) { ret = nouveau_bo_map(*pnvbo); if (ret == 0) diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index b80e4ebf14a6..5d191e58edf1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -107,7 +107,7 @@ nouveau_channel_del(struct nouveau_channel **pchan) nvif_object_dtor(&chan->push.ctxdma); nouveau_vma_del(&chan->push.vma); nouveau_bo_unmap(chan->push.buffer); - if (chan->push.buffer && chan->push.buffer->pin_refcnt) + if (chan->push.buffer && chan->push.buffer->bo.pin_count) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); @@ -163,9 +163,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, atomic_set(&chan->killed, 0); /* allocate memory for dma push buffer */ - target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; + target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT; if (nouveau_vram_pushbuf) - target = TTM_PL_FLAG_VRAM; + target = NOUVEAU_GEM_DOMAIN_VRAM; ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL, &chan->push.buffer); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 49dd0cbc332f..6f21f36719fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1023,29 +1023,6 @@ get_tmds_link_bandwidth(struct drm_connector *connector) return 112000 * duallink_scale; } -enum drm_mode_status -nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode, - const unsigned min_clock, - const unsigned max_clock, - unsigned int *clock_out) -{ - unsigned int clock = mode->clock; - - if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == - DRM_MODE_FLAG_3D_FRAME_PACKING) - clock *= 2; - - if (clock < min_clock) - return MODE_CLOCK_LOW; - if (clock > max_clock) - return MODE_CLOCK_HIGH; - - if (clock_out) - *clock_out = clock; - - return MODE_OK; -} - static enum drm_mode_status nouveau_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) @@ -1053,7 +1030,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; struct drm_encoder *encoder = to_drm_encoder(nv_encoder); - unsigned min_clock = 25000, max_clock = min_clock; + unsigned int min_clock = 25000, max_clock = min_clock, clock = mode->clock; switch (nv_encoder->dcb->type) { case DCB_OUTPUT_LVDS: @@ -1082,8 +1059,15 @@ nouveau_connector_mode_valid(struct drm_connector *connector, return MODE_BAD; } - return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock, - NULL); + if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) + clock *= 2; + + if (clock < min_clock) + return MODE_CLOCK_LOW; + if (clock > max_clock) + return MODE_CLOCK_HIGH; + + return MODE_OK; } static struct drm_encoder * diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 4e8112fde3e6..92987daa5e17 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -101,7 +101,7 @@ unsigned long nouveau_dmem_page_addr(struct page *page) { struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) - - chunk->pagemap.res.start; + chunk->pagemap.range.start; return chunk->bo->offset + off; } @@ -249,17 +249,19 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) chunk->drm = drm; chunk->pagemap.type = MEMORY_DEVICE_PRIVATE; - chunk->pagemap.res = *res; + chunk->pagemap.range.start = res->start; + chunk->pagemap.range.end = res->end; + chunk->pagemap.nr_range = 1; chunk->pagemap.ops = &nouveau_dmem_pagemap_ops; chunk->pagemap.owner = drm->dev; ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0, - TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL, &chunk->bo); if (ret) goto out_release; - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) goto out_bo_free; @@ -273,7 +275,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage) list_add(&chunk->list, &drm->dmem->chunks); mutex_unlock(&drm->dmem->mutex); - pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT; + pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT; page = pfn_to_page(pfn_first); spin_lock(&drm->dmem->lock); for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) { @@ -294,8 +296,7 @@ out_bo_unpin: out_bo_free: nouveau_bo_ref(NULL, &chunk->bo); out_release: - release_mem_region(chunk->pagemap.res.start, - resource_size(&chunk->pagemap.res)); + release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range)); out_free: kfree(chunk); out: @@ -346,7 +347,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm) mutex_lock(&drm->dmem->mutex); list_for_each_entry(chunk, &drm->dmem->chunks, list) { - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); /* FIXME handle pin failure */ WARN_ON(ret); } @@ -382,8 +383,8 @@ nouveau_dmem_fini(struct nouveau_drm *drm) nouveau_bo_ref(NULL, &chunk->bo); list_del(&chunk->list); memunmap_pages(&chunk->pagemap); - release_mem_region(chunk->pagemap.res.start, - resource_size(&chunk->pagemap.res)); + release_mem_region(chunk->pagemap.range.start, + range_len(&chunk->pagemap.range)); kfree(chunk); } diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 810bf6956568..040ed88d362d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -231,23 +231,30 @@ nv50_dp_mode_valid(struct drm_connector *connector, const struct drm_display_mode *mode, unsigned *out_clock) { - const unsigned min_clock = 25000; - unsigned max_clock, ds_clock, clock; - enum drm_mode_status ret; + const unsigned int min_clock = 25000; + unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock; + const u8 bpp = connector->display_info.bpc * 3; if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace) return MODE_NO_INTERLACE; - max_clock = outp->dp.link_nr * outp->dp.link_bw; - ds_clock = drm_dp_downstream_max_clock(outp->dp.dpcd, - outp->dp.downstream_ports); - if (ds_clock) - max_clock = min(max_clock, ds_clock); + if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) + clock *= 2; + + max_rate = outp->dp.link_nr * outp->dp.link_bw; + mode_rate = DIV_ROUND_UP(clock * bpp, 8); + if (mode_rate > max_rate) + return MODE_CLOCK_HIGH; + + ds_max_dotclock = drm_dp_downstream_max_dotclock(outp->dp.dpcd, outp->dp.downstream_ports); + if (ds_max_dotclock && clock > ds_max_dotclock) + return MODE_CLOCK_HIGH; + + if (clock < min_clock) + return MODE_CLOCK_LOW; - clock = mode->clock * (connector->display_info.bpc * 3) / 10; - ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock, - &clock); if (out_clock) *out_clock = clock; - return ret; + + return MODE_OK; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 42fc5c813a9b..d141a5f004af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -820,6 +820,7 @@ static int nouveau_do_suspend(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); + struct ttm_resource_manager *man; int ret; nouveau_svm_suspend(drm); @@ -836,7 +837,9 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) } NV_DEBUG(drm, "evicting buffers...\n"); - ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); + + man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + ttm_resource_manager_evict_all(&drm->ttm.bdev, man); NV_DEBUG(drm, "waiting for kernel channels to go idle...\n"); if (drm->cechan) { @@ -1207,16 +1210,7 @@ driver_stub = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_pin = nouveau_gem_prime_pin, - .gem_prime_unpin = nouveau_gem_prime_unpin, - .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table, .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, - .gem_prime_vmap = nouveau_gem_prime_vmap, - .gem_prime_vunmap = nouveau_gem_prime_vunmap, - - .gem_free_object_unlocked = nouveau_gem_object_del, - .gem_open_object = nouveau_gem_object_open, - .gem_close_object = nouveau_gem_object_close, .dumb_create = nouveau_display_dumb_create, .dumb_map_offset = nouveau_display_dumb_map_offset, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 73ebf5fba2fc..9d04d1b36434 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -56,7 +56,6 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_memory.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/drm_audio_component.h> @@ -164,6 +163,8 @@ struct nouveau_drm { int type_vram; int type_host[2]; int type_ncoh[2]; + struct mutex io_reserve_mutex; + struct list_head io_reserve_lru; } ttm; /* GEM interface support */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index fad8030ec1f8..24ec5339efb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, if (ret) goto out_unref; - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (ret) { NV_ERROR(drm, "failed to pin fb: %d\n", ret); goto out_unref; @@ -378,8 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->fbops = &nouveau_fbcon_sw_ops; - info->fix.smem_start = nvbo->bo.mem.bus.base + - nvbo->bo.mem.bus.offset; + info->fix.smem_start = nvbo->bo.mem.bus.offset; info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT; info->screen_base = nvbo_kmap_obj_iovirtual(nvbo); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 81f111ad3f4f..dd51cd0ae20c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -169,6 +169,17 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) ttm_bo_unreserve(&nvbo->bo); } +const struct drm_gem_object_funcs nouveau_gem_object_funcs = { + .free = nouveau_gem_object_del, + .open = nouveau_gem_object_open, + .close = nouveau_gem_object_close, + .pin = nouveau_gem_prime_pin, + .unpin = nouveau_gem_prime_unpin, + .get_sg_table = nouveau_gem_prime_get_sg_table, + .vmap = nouveau_gem_prime_vmap, + .vunmap = nouveau_gem_prime_vunmap, +}; + int nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, @@ -176,33 +187,28 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, { struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; - u32 flags = 0; int ret; - if (domain & NOUVEAU_GEM_DOMAIN_VRAM) - flags |= TTM_PL_FLAG_VRAM; - if (domain & NOUVEAU_GEM_DOMAIN_GART) - flags |= TTM_PL_FLAG_TT; - if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) - flags |= TTM_PL_FLAG_SYSTEM; - - if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) - flags |= TTM_PL_FLAG_UNCACHED; + if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) + domain |= NOUVEAU_GEM_DOMAIN_CPU; - nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, tile_flags); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); + nvbo->bo.base.funcs = &nouveau_gem_object_funcs; + /* Initialize the embedded gem-object. We return a single gem-reference * to the caller, instead of a normal nouveau_bo ttm reference. */ ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); if (ret) { - nouveau_bo_ref(NULL, &nvbo); + drm_gem_object_release(&nvbo->bo.base); + kfree(nvbo); return ret; } - ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL); + ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL); if (ret) { nouveau_bo_ref(NULL, &nvbo); return ret; @@ -217,7 +223,6 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) nvbo->valid_domains &= domain; - nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp; *pnvbo = nvbo; return 0; } @@ -296,32 +301,28 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, struct ttm_buffer_object *bo = &nvbo->bo; uint32_t domains = valid_domains & nvbo->valid_domains & (write_domains ? write_domains : read_domains); - uint32_t pref_flags = 0, valid_flags = 0; + uint32_t pref_domains = 0;; if (!domains) return -EINVAL; - if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) - valid_flags |= TTM_PL_FLAG_VRAM; - - if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) - valid_flags |= TTM_PL_FLAG_TT; + valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && bo->mem.mem_type == TTM_PL_VRAM) - pref_flags |= TTM_PL_FLAG_VRAM; + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && bo->mem.mem_type == TTM_PL_TT) - pref_flags |= TTM_PL_FLAG_TT; + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) - pref_flags |= TTM_PL_FLAG_VRAM; + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; else - pref_flags |= TTM_PL_FLAG_TT; + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; - nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); + nouveau_bo_placement_set(nvbo, pref_domains, valid_domains); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index 978e07591990..b35c180322e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h @@ -5,6 +5,8 @@ #include "nouveau_drv.h" #include "nouveau_bo.h" +extern const struct drm_gem_object_funcs nouveau_gem_object_funcs; + static inline struct nouveau_bo * nouveau_gem_object(struct drm_gem_object *gem) { diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 269d8707acc3..a1049e9feee1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem) } int -nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) +nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) { struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; @@ -116,8 +116,10 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) mem->comp = 0; } - if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl; - else args.dma = tt->dma_address; + if (tt->sg) + args.sgl = tt->sg->sgl; + else + args.dma = tt->dma_address; mutex_lock(&drm->master.lock); cli->base.super = true; @@ -176,6 +178,8 @@ void nouveau_mem_del(struct ttm_resource *reg) { struct nouveau_mem *mem = nouveau_mem(reg); + if (!mem) + return; nouveau_mem_fini(mem); kfree(reg->mm_node); reg->mm_node = NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h index 3fe1cfed57a1..7df3848e85aa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.h +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h @@ -1,7 +1,7 @@ #ifndef __NOUVEAU_MEM_H__ #define __NOUVEAU_MEM_H__ #include <drm/ttm/ttm_bo_api.h> -struct ttm_dma_tt; +struct ttm_tt; #include <nvif/mem.h> #include <nvif/vmm.h> @@ -24,7 +24,7 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, struct ttm_resource *); void nouveau_mem_del(struct ttm_resource *); int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); -int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *); +int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *); void nouveau_mem_fini(struct nouveau_mem *); int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index bae6a3eccee0..a8264aebf3d4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -32,7 +32,7 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) struct nouveau_bo *nvbo = nouveau_gem_object(obj); int npages = nvbo->bo.num_pages; - return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); + return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages); } void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) @@ -64,14 +64,12 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, struct nouveau_bo *nvbo; struct dma_resv *robj = attach->dmabuf->resv; u64 size = attach->dmabuf->size; - u32 flags = 0; int align = 0; int ret; - flags = TTM_PL_FLAG_TT; - dma_resv_lock(robj, NULL); - nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0); + nvbo = nouveau_bo_alloc(&drm->client, &size, &align, + NOUVEAU_GEM_DOMAIN_GART, 0, 0); if (IS_ERR(nvbo)) { obj = ERR_CAST(nvbo); goto unlock; @@ -79,6 +77,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; + nvbo->bo.base.funcs = &nouveau_gem_object_funcs; + /* Initialize the embedded gem-object. We return a single gem-reference * to the caller, instead of a normal nouveau_bo ttm reference. */ ret = drm_gem_object_init(dev, &nvbo->bo.base, size); @@ -88,7 +88,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, goto unlock; } - ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); + ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART, + sg, robj); if (ret) { nouveau_bo_ref(NULL, &nvbo); obj = ERR_PTR(ret); @@ -108,7 +109,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj) int ret; /* pin buffer into GTT */ - ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false); + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index eef75c53a197..a2e23fd4906c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -5,99 +5,88 @@ #include "nouveau_drv.h" #include "nouveau_mem.h" #include "nouveau_ttm.h" +#include "nouveau_bo.h" struct nouveau_sgdma_be { /* this has to be the first field so populate/unpopulated in * nouve_bo.c works properly, otherwise have to move them here */ - struct ttm_dma_tt ttm; + struct ttm_tt ttm; struct nouveau_mem *mem; }; -static void -nouveau_sgdma_destroy(struct ttm_tt *ttm) +void +nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; if (ttm) { - ttm_dma_tt_fini(&nvbe->ttm); + nouveau_sgdma_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); + ttm_tt_fini(&nvbe->ttm); kfree(nvbe); } } -static int -nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) +int +nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; + struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_mem *mem = nouveau_mem(reg); int ret; + if (nvbe->mem) + return 0; + ret = nouveau_mem_host(reg, &nvbe->ttm); if (ret) return ret; - ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]); - if (ret) { - nouveau_mem_fini(mem); - return ret; + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]); + if (ret) { + nouveau_mem_fini(mem); + return ret; + } } nvbe->mem = mem; return 0; } -static void -nv04_sgdma_unbind(struct ttm_tt *ttm) -{ - struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - nouveau_mem_fini(nvbe->mem); -} - -static struct ttm_backend_func nv04_sgdma_backend = { - .bind = nv04_sgdma_bind, - .unbind = nv04_sgdma_unbind, - .destroy = nouveau_sgdma_destroy -}; - -static int -nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg) +void +nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; - struct nouveau_mem *mem = nouveau_mem(reg); - int ret; - - ret = nouveau_mem_host(reg, &nvbe->ttm); - if (ret) - return ret; - - nvbe->mem = mem; - return 0; + if (nvbe->mem) { + nouveau_mem_fini(nvbe->mem); + nvbe->mem = NULL; + } } -static struct ttm_backend_func nv50_sgdma_backend = { - .bind = nv50_sgdma_bind, - .unbind = nv04_sgdma_unbind, - .destroy = nouveau_sgdma_destroy -}; - struct ttm_tt * nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_sgdma_be *nvbe; + enum ttm_caching caching; + + if (nvbo->force_coherent) + caching = ttm_uncached; + else if (drm->agp.bridge) + caching = ttm_write_combined; + else + caching = ttm_cached; nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); if (!nvbe) return NULL; - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) - nvbe->ttm.ttm.func = &nv04_sgdma_backend; - else - nvbe->ttm.ttm.func = &nv50_sgdma_backend; - - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) { kfree(nvbe); return NULL; } - return &nvbe->ttm.ttm; + return &nvbe->ttm; } diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 2df1c0460559..4f69e4c3dafd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -105,11 +105,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_nouveau_svm_bind *args = data; unsigned target, cmd, priority; - unsigned long addr, end, size; + unsigned long addr, end; struct mm_struct *mm; args->va_start &= PAGE_MASK; - args->va_end &= PAGE_MASK; + args->va_end = ALIGN(args->va_end, PAGE_SIZE); /* Sanity check arguments */ if (args->reserved0 || args->reserved1) @@ -118,8 +118,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, return -EINVAL; if (args->va_start >= args->va_end) return -EINVAL; - if (!args->npages) - return -EINVAL; cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT; cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK; @@ -151,12 +149,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, if (args->stride) return -EINVAL; - size = ((unsigned long)args->npages) << PAGE_SHIFT; - if ((args->va_start + size) <= args->va_start) - return -EINVAL; - if ((args->va_start + size) > args->va_end) - return -EINVAL; - /* * Ok we are ask to do something sane, for now we only support migrate * commands but we will add things like memory policy (what to do on @@ -171,7 +163,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, return -EINVAL; } - for (addr = args->va_start, end = args->va_start + size; addr < end;) { + for (addr = args->va_start, end = args->va_end; addr < end;) { struct vm_area_struct *vma; unsigned long next; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 53c6f8827322..a37bc3d7b38b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -22,6 +22,10 @@ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#include <linux/limits.h> +#include <linux/swiotlb.h> + #include "nouveau_drv.h" #include "nouveau_gem.h" #include "nouveau_mem.h" @@ -108,7 +112,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man, return ret; ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, - reg->num_pages << PAGE_SHIFT, &mem->vma[0]); + (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { nouveau_mem_del(reg); return ret; @@ -123,13 +127,53 @@ const struct ttm_resource_manager_func nv04_gart_manager = { .free = nouveau_manager_del, }; +static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ttm_buffer_object *bo = vma->vm_private_data; + pgprot_t prot; + vm_fault_t ret; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + ret = nouveau_ttm_fault_reserve_notify(bo); + if (ret) + goto error_unlock; + + nouveau_bo_del_io_reserve_lru(bo); + prot = vm_get_page_prot(vma->vm_flags); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + nouveau_bo_add_io_reserve_lru(bo); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +error_unlock: + dma_resv_unlock(bo->base.resv); + return ret; +} + +static struct vm_operations_struct nouveau_ttm_vm_ops = { + .fault = nouveau_ttm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + int nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv = filp->private_data; struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); + int ret; - return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); + ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev); + if (ret) + return ret; + + vma->vm_ops = &nouveau_ttm_vm_ops; + return 0; } static int @@ -156,24 +200,13 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) static int nouveau_ttm_init_vram(struct nouveau_drm *drm) { - struct nvif_mmu *mmu = &drm->client.mmu; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - /* Some BARs do not support being ioremapped WC */ - const u8 type = mmu->type[drm->ttm.type_vram].type; struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL); + if (!man) return -ENOMEM; - man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - - if (type & NVIF_MEM_UNCACHED) { - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; - } - man->func = &nouveau_vram_manager; - man->use_io_reserve_lru = true; ttm_resource_manager_init(man, drm->gem.vram_available >> PAGE_SHIFT); @@ -181,9 +214,7 @@ nouveau_ttm_init_vram(struct nouveau_drm *drm) ttm_resource_manager_set_used(man, true); return 0; } else { - return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, - TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, - TTM_PL_FLAG_WC, false, + return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false, drm->gem.vram_available >> PAGE_SHIFT); } } @@ -195,7 +226,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { ttm_resource_manager_set_used(man, false); - ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_evict_all(&drm->ttm.bdev, man); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); kfree(man); @@ -208,25 +239,14 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) { struct ttm_resource_manager *man; unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT; - unsigned available_caching, default_caching; const struct ttm_resource_manager_func *func = NULL; - if (drm->agp.bridge) { - available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - default_caching = TTM_PL_FLAG_WC; - } else { - available_caching = TTM_PL_MASK_CACHING; - default_caching = TTM_PL_FLAG_CACHED; - } if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) func = &nouveau_gart_manager; else if (!drm->agp.bridge) func = &nv04_gart_manager; else - return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, - available_caching, default_caching, - true, + return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true, size_pages); man = kzalloc(sizeof(*man), GFP_KERNEL); @@ -234,8 +254,6 @@ nouveau_ttm_init_gtt(struct nouveau_drm *drm) return -ENOMEM; man->func = func; - man->available_caching = available_caching; - man->default_caching = default_caching; man->use_tt = true; ttm_resource_manager_init(man, size_pages); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man); @@ -253,7 +271,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm) ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); else { ttm_resource_manager_set_used(man, false); - ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_evict_all(&drm->ttm.bdev, man); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL); kfree(man); @@ -267,6 +285,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) struct nvkm_pci *pci = device->pci; struct nvif_mmu *mmu = &drm->client.mmu; struct drm_device *dev = drm->dev; + bool need_swiotlb = false; int typei, ret; ret = nouveau_ttm_init_host(drm, 0); @@ -301,11 +320,14 @@ nouveau_ttm_init(struct nouveau_drm *drm) drm->agp.cma = pci->agp.cma; } - ret = ttm_bo_device_init(&drm->ttm.bdev, - &nouveau_bo_driver, - dev->anon_inode->i_mapping, - dev->vma_offset_manager, - drm->client.mmu.dmabits <= 32 ? true : false); +#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) + need_swiotlb = !!swiotlb_nr_tbl(); +#endif + + ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver, + drm->dev->dev, dev->anon_inode->i_mapping, + dev->vma_offset_manager, need_swiotlb, + drm->client.mmu.dmabits <= 32); if (ret) { NV_ERROR(drm, "error initialising bo driver, %d\n", ret); return ret; @@ -339,6 +361,9 @@ nouveau_ttm_init(struct nouveau_drm *drm) return ret; } + mutex_init(&drm->ttm.io_reserve_mutex); + INIT_LIST_HEAD(&drm->ttm.io_reserve_lru); + NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h index eaf25461cd91..69552049bb96 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.h +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h @@ -22,4 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); int nouveau_ttm_global_init(struct nouveau_drm *); void nouveau_ttm_global_release(struct nouveau_drm *); +int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); +void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); #endif diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 6b697ee6bc0e..1253fdec712d 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -130,10 +130,11 @@ nv17_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &priv->bo); if (!ret) { - ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(priv->bo); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 49b46f51073c..447238e3cbe7 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -81,10 +81,11 @@ nv50_fence_create(struct nouveau_drm *drm) priv->base.context_del = nv10_fence_context_del; spin_lock_init(&priv->lock); - ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, + ret = nouveau_bo_new(&drm->client, 4096, 0x1000, + NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL, &priv->bo); if (!ret) { - ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); + ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false); if (!ret) { ret = nouveau_bo_map(priv->bo); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 7ed36b3a6b7d..7c9c928c3196 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -209,12 +209,13 @@ nv84_fence_create(struct nouveau_drm *drm) mutex_init(&priv->mutex); /* Use VRAM if there is any ; otherwise fallback to system memory */ - domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : - /* - * fences created in sysmem must be non-cached or we - * will lose CPU/GPU coherency! - */ - TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; + domain = drm->client.device.info.ram_size != 0 ? + NOUVEAU_GEM_DOMAIN_VRAM : + /* + * fences created in sysmem must be non-cached or we + * will lose CPU/GPU coherency! + */ + NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT; ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0, domain, 0, 0, NULL, NULL, &priv->bo); if (ret == 0) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 9f4ac2672cf2..7851bec5f0e5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2924,17 +2924,34 @@ nvkm_device_del(struct nvkm_device **pdevice) } } +/* returns true if the GPU is in the CPU native byte order */ static inline bool nvkm_device_endianness(struct nvkm_device *device) { - u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001; #ifdef __BIG_ENDIAN - if (!boot1) - return false; + const bool big_endian = true; #else - if (boot1) - return false; + const bool big_endian = false; #endif + + /* Read NV_PMC_BOOT_1, and assume non-functional endian switch if it + * doesn't contain the expected values. + */ + u32 pmc_boot_1 = nvkm_rd32(device, 0x000004); + if (pmc_boot_1 && pmc_boot_1 != 0x01000001) + return !big_endian; /* Assume GPU is LE in this case. */ + + /* 0 means LE and 0x01000001 means BE GPU. Condition is true when + * GPU/CPU endianness don't match. + */ + if (big_endian == !pmc_boot_1) { + nvkm_wr32(device, 0x000004, 0x01000001); + nvkm_rd32(device, 0x000000); + if (nvkm_rd32(device, 0x000004) != (big_endian ? 0x01000001 : 0x00000000)) + return !big_endian; /* Assume GPU is LE on any unexpected read-back. */ + } + + /* CPU/GPU endianness should (hopefully) match. */ return true; } @@ -2987,14 +3004,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func, if (detect) { /* switch mmio to cpu's native endianness */ if (!nvkm_device_endianness(device)) { - nvkm_wr32(device, 0x000004, 0x01000001); - nvkm_rd32(device, 0x000000); - if (!nvkm_device_endianness(device)) { - nvdev_error(device, - "GPU not supported on big-endian\n"); - ret = -ENOSYS; - goto done; - } + nvdev_error(device, + "Couldn't switch GPU to CPUs endianess\n"); + ret = -ENOSYS; + goto done; } boot0 = nvkm_rd32(device, 0x000000); @@ -3149,6 +3162,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; default: nvdev_error(device, "unknown chipset (%08x)\n", boot0); + ret = -ENODEV; goto done; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 985f2990ab0d..13d4d7ac0697 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -594,8 +594,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index, nvkm_info(&imem->base.subdev, "using IOMMU\n"); } else { - imem->attrs = DMA_ATTR_NON_CONSISTENT | - DMA_ATTR_WEAK_ORDERING | + imem->attrs = DMA_ATTR_WEAK_ORDERING | DMA_ATTR_WRITE_COMBINE; nvkm_info(&imem->base.subdev, "using DMA API\n"); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 328a4a74f534..49621b2e1ab5 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -436,7 +436,7 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc) } static void omap_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct omap_drm_private *priv = crtc->dev->dev_private; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); @@ -462,7 +462,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, } static void omap_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct omap_drm_private *priv = crtc->dev->dev_private; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); @@ -569,22 +569,25 @@ static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc) } static int omap_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_plane_state *pri_state; - if (state->color_mgmt_changed && state->gamma_lut) { - unsigned int length = state->gamma_lut->length / + if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) { + unsigned int length = crtc_state->gamma_lut->length / sizeof(struct drm_color_lut); if (length < 2) return -EINVAL; } - pri_state = drm_atomic_get_new_plane_state(state->state, crtc->primary); + pri_state = drm_atomic_get_new_plane_state(state, + crtc->primary); if (pri_state) { struct omap_crtc_state *omap_crtc_state = - to_omap_crtc_state(state); + to_omap_crtc_state(crtc_state); /* Mirror new values for zpos and rotation in omap_crtc_state */ omap_crtc_state->zpos = pri_state->zpos; @@ -598,12 +601,12 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc, } static void omap_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } static void omap_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct omap_drm_private *priv = crtc->dev->dev_private; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 53d5e184ee77..2e598b8b72af 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -521,12 +521,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file) return 0; } -static const struct vm_operations_struct omap_gem_vm_ops = { - .fault = omap_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct file_operations omapdriver_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -549,10 +543,7 @@ static struct drm_driver omap_drm_driver = { #endif .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = omap_gem_prime_export, .gem_prime_import = omap_gem_prime_import, - .gem_free_object_unlocked = omap_gem_free_object, - .gem_vm_ops = &omap_gem_vm_ops, .dumb_create = omap_gem_dumb_create, .dumb_map_offset = omap_gem_dumb_map_offset, .ioctls = ioctls, diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index d0d12d5dd76c..d8e09792793a 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -487,7 +487,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj, * vma->vm_private_data points to the GEM object that is backing this * mapping. */ -vm_fault_t omap_gem_fault(struct vm_fault *vmf) +static vm_fault_t omap_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; @@ -1089,7 +1089,7 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) * Constructor & Destructor */ -void omap_gem_free_object(struct drm_gem_object *obj) +static void omap_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct omap_drm_private *priv = dev->dev_private; @@ -1169,6 +1169,18 @@ static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags) return true; } +static const struct vm_operations_struct omap_gem_vm_ops = { + .fault = omap_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs omap_gem_object_funcs = { + .free = omap_gem_free_object, + .export = omap_gem_prime_export, + .vm_ops = &omap_gem_vm_ops, +}; + /* GEM buffer object constructor */ struct drm_gem_object *omap_gem_new(struct drm_device *dev, union omap_gem_size gsize, u32 flags) @@ -1236,6 +1248,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, size = PAGE_ALIGN(gsize.bytes); } + obj->funcs = &omap_gem_object_funcs; + /* Initialize the GEM object. */ if (!(flags & OMAP_BO_MEM_SHMEM)) { drm_gem_private_object_init(dev, obj, size); @@ -1297,10 +1311,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj->dma_addr = sg_dma_address(sgt->sgl); } else { /* Create pages list from sgt */ - struct sg_page_iter iter; struct page **pages; unsigned int npages; - unsigned int i = 0; + unsigned int ret; npages = DIV_ROUND_UP(size, PAGE_SIZE); pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); @@ -1311,14 +1324,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, } omap_obj->pages = pages; - - for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { - pages[i++] = sg_page_iter_page(&iter); - if (i > npages) - break; - } - - if (WARN_ON(i != npages)) { + ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, + npages); + if (ret) { omap_gem_free_object(obj); obj = ERR_PTR(-ENOMEM); goto done; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h index 729b7812a815..eda9b4839c30 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.h +++ b/drivers/gpu/drm/omapdrm/omap_gem.h @@ -48,7 +48,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, struct sg_table *sgt); int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, union omap_gem_size gsize, u32 flags, u32 *handle); -void omap_gem_free_object(struct drm_gem_object *obj); void *omap_gem_vaddr(struct drm_gem_object *obj); /* Dumb Buffers Interface */ @@ -69,7 +68,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags); struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, struct dma_buf *buffer); -vm_fault_t omap_gem_fault(struct vm_fault *vmf); int omap_gem_roll(struct drm_gem_object *obj, u32 roll); void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff); void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 8d97d07c5871..e386524b2d77 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -208,6 +208,16 @@ config DRM_PANEL_NOVATEK_NT35510 around the Novatek NT35510 display controller, such as some Hydis panels. +config DRM_PANEL_NOVATEK_NT36672A + tristate "Novatek NT36672A DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the panels built + around the Novatek NT36672A display controller, such as some + Tianma panels used in a few Xiaomi Poco F1 mobile phones. + config DRM_PANEL_NOVATEK_NT39016 tristate "Novatek NT39016 RGB/SPI panel" depends on OF && SPI @@ -324,13 +334,30 @@ config DRM_PANEL_SAMSUNG_S6E63J0X03 select VIDEOMODE_HELPERS config DRM_PANEL_SAMSUNG_S6E63M0 - tristate "Samsung S6E63M0 RGB/SPI panel" + tristate "Samsung S6E63M0 RGB panel" depends on OF - depends on SPI depends on BACKLIGHT_CLASS_DEVICE help Say Y here if you want to enable support for Samsung S6E63M0 - AMOLED LCD panel. + AMOLED LCD panel. This panel can be accessed using SPI or + DSI. + +config DRM_PANEL_SAMSUNG_S6E63M0_SPI + tristate "Samsung S6E63M0 RGB SPI interface" + depends on SPI + depends on DRM_PANEL_SAMSUNG_S6E63M0 + default DRM_PANEL_SAMSUNG_S6E63M0 + help + Say Y here if you want to be able to access the Samsung + S6E63M0 panel using SPI. + +config DRM_PANEL_SAMSUNG_S6E63M0_DSI + tristate "Samsung S6E63M0 RGB DSI interface" + depends on DRM_MIPI_DSI + depends on DRM_PANEL_SAMSUNG_S6E63M0 + help + Say Y here if you want to be able to access the Samsung + S6E63M0 panel using DSI. config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller" @@ -433,6 +460,17 @@ config DRM_PANEL_SONY_ACX565AKM Say Y here if you want to enable support for the Sony ACX565AKM 800x600 3.5" panel (found on the Nokia N900). +config DRM_PANEL_TDO_TL070WSH30 + tristate "TDO TL070WSH30 DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for TDO TL070WSH30 TFT-LCD + panel module. The panel has a 1024×600 resolution and uses + 24 bit RGB per pixel. It provides a MIPI DSI interface to + the host, a built-in LED backlight and touch controller. + config DRM_PANEL_TPO_TD028TTEC1 tristate "Toppoly (TPO) TD028TTEC1 panel driver" depends on OF && SPI diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 15a4e7752951..d1f8cc572f37 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o +obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o @@ -34,6 +35,8 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o @@ -45,6 +48,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o +obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c index 3482e28e30fc..0c5f22e95c2d 100644 --- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c +++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c @@ -26,7 +26,9 @@ struct mantix { struct device *dev; struct drm_panel panel; + struct gpio_desc *reset_gpio; + struct gpio_desc *tp_rstn_gpio; struct regulator *avdd; struct regulator *avee; @@ -124,6 +126,10 @@ static int mantix_unprepare(struct drm_panel *panel) { struct mantix *ctx = panel_to_mantix(panel); + gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_disable(ctx->avee); regulator_disable(ctx->avdd); /* T11 */ @@ -165,13 +171,10 @@ static int mantix_prepare(struct drm_panel *panel) return ret; } - /* T3+T5 */ - usleep_range(10000, 12000); - - gpiod_set_value_cansleep(ctx->reset_gpio, 1); - usleep_range(5150, 7000); - + /* T3 + T4 + time for voltage to become stable: */ + usleep_range(6000, 7000); gpiod_set_value_cansleep(ctx->reset_gpio, 0); + gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 0); /* T6 */ msleep(50); @@ -204,7 +207,7 @@ static int mantix_get_modes(struct drm_panel *panel, if (!mode) { dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, - drm_mode_vrefresh(mode)); + drm_mode_vrefresh(&default_mode)); return -ENOMEM; } @@ -236,12 +239,18 @@ static int mantix_probe(struct mipi_dsi_device *dsi) if (!ctx) return -ENOMEM; - ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset_gpio)) { dev_err(dev, "cannot get reset gpio\n"); return PTR_ERR(ctx->reset_gpio); } + ctx->tp_rstn_gpio = devm_gpiod_get(dev, "mantix,tp-rstn", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->tp_rstn_gpio)) { + dev_err(dev, "cannot get tp-rstn gpio\n"); + return PTR_ERR(ctx->tp_rstn_gpio); + } + mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c new file mode 100644 index 000000000000..533cd3934b8b --- /dev/null +++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020 Linaro Ltd + * Author: Sumit Semwal <sumit.semwal@linaro.org> + * + * This driver is for the DSI interface to panels using the NT36672A display driver IC + * from Novatek. + * Currently supported are the Tianma FHD+ panels found in some Xiaomi phones, including + * some variants of the Poco F1 phone. + * + * Panels using the Novatek NT37762A IC should add appropriate configuration per-panel and + * use this driver. + */ + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> + +#include <linux/gpio/consumer.h> +#include <linux/pinctrl/consumer.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_device.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#include <video/mipi_display.h> + +struct nt36672a_panel_cmd { + const char data[2]; +}; + +static const char * const nt36672a_regulator_names[] = { + "vddio", + "vddpos", + "vddneg", +}; + +static unsigned long const nt36672a_regulator_enable_loads[] = { + 62000, + 100000, + 100000 +}; + +struct nt36672a_panel_desc { + const struct drm_display_mode *display_mode; + const char *panel_name; + + unsigned int width_mm; + unsigned int height_mm; + + unsigned long mode_flags; + enum mipi_dsi_pixel_format format; + unsigned int lanes; + + unsigned int num_on_cmds_1; + const struct nt36672a_panel_cmd *on_cmds_1; + unsigned int num_on_cmds_2; + const struct nt36672a_panel_cmd *on_cmds_2; + + unsigned int num_off_cmds; + const struct nt36672a_panel_cmd *off_cmds; +}; + +struct nt36672a_panel { + struct drm_panel base; + struct mipi_dsi_device *link; + const struct nt36672a_panel_desc *desc; + + struct regulator_bulk_data supplies[ARRAY_SIZE(nt36672a_regulator_names)]; + + struct gpio_desc *reset_gpio; + + bool prepared; +}; + +static inline struct nt36672a_panel *to_nt36672a_panel(struct drm_panel *panel) +{ + return container_of(panel, struct nt36672a_panel, base); +} + +static int nt36672a_send_cmds(struct drm_panel *panel, const struct nt36672a_panel_cmd *cmds, + int num) +{ + struct nt36672a_panel *pinfo = to_nt36672a_panel(panel); + unsigned int i; + int err; + + for (i = 0; i < num; i++) { + const struct nt36672a_panel_cmd *cmd = &cmds[i]; + + err = mipi_dsi_dcs_write(pinfo->link, cmd->data[0], cmd->data + 1, 1); + + if (err < 0) + return err; + } + + return 0; +} + +static int nt36672a_panel_power_off(struct drm_panel *panel) +{ + struct nt36672a_panel *pinfo = to_nt36672a_panel(panel); + int ret = 0; + + gpiod_set_value(pinfo->reset_gpio, 1); + + ret = regulator_bulk_disable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies); + if (ret) + dev_err(panel->dev, "regulator_bulk_disable failed %d\n", ret); + + return ret; +} + +static int nt36672a_panel_unprepare(struct drm_panel *panel) +{ + struct nt36672a_panel *pinfo = to_nt36672a_panel(panel); + int ret; + + if (!pinfo->prepared) + return 0; + + /* send off cmds */ + ret = nt36672a_send_cmds(panel, pinfo->desc->off_cmds, + pinfo->desc->num_off_cmds); + + if (ret < 0) + dev_err(panel->dev, "failed to send DCS off cmds: %d\n", ret); + + ret = mipi_dsi_dcs_set_display_off(pinfo->link); + if (ret < 0) + dev_err(panel->dev, "set_display_off cmd failed ret = %d\n", ret); + + /* 120ms delay required here as per DCS spec */ + msleep(120); + + ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->link); + if (ret < 0) + dev_err(panel->dev, "enter_sleep cmd failed ret = %d\n", ret); + + /* 0x3C = 60ms delay */ + msleep(60); + + ret = nt36672a_panel_power_off(panel); + if (ret < 0) + dev_err(panel->dev, "power_off failed ret = %d\n", ret); + + pinfo->prepared = false; + + return ret; +} + +static int nt36672a_panel_power_on(struct nt36672a_panel *pinfo) +{ + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies); + if (ret < 0) + return ret; + + /* + * As per downstream kernel, Reset sequence of Tianma FHD panel requires the panel to + * be out of reset for 10ms, followed by being held in reset for 10ms. But for Android + * AOSP, we needed to bump it upto 200ms otherwise we get white screen sometimes. + * FIXME: Try to reduce this 200ms to a lesser value. + */ + gpiod_set_value(pinfo->reset_gpio, 1); + msleep(200); + gpiod_set_value(pinfo->reset_gpio, 0); + msleep(200); + + return 0; +} + +static int nt36672a_panel_prepare(struct drm_panel *panel) +{ + struct nt36672a_panel *pinfo = to_nt36672a_panel(panel); + int err; + + if (pinfo->prepared) + return 0; + + err = nt36672a_panel_power_on(pinfo); + if (err < 0) + goto poweroff; + + /* send first part of init cmds */ + err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_1, + pinfo->desc->num_on_cmds_1); + + if (err < 0) { + dev_err(panel->dev, "failed to send DCS Init 1st Code: %d\n", err); + goto poweroff; + } + + err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link); + if (err < 0) { + dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); + goto poweroff; + } + + /* 0x46 = 70 ms delay */ + msleep(70); + + err = mipi_dsi_dcs_set_display_on(pinfo->link); + if (err < 0) { + dev_err(panel->dev, "failed to Set Display ON: %d\n", err); + goto poweroff; + } + + /* Send rest of the init cmds */ + err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_2, + pinfo->desc->num_on_cmds_2); + + if (err < 0) { + dev_err(panel->dev, "failed to send DCS Init 2nd Code: %d\n", err); + goto poweroff; + } + + msleep(120); + + pinfo->prepared = true; + + return 0; + +poweroff: + gpiod_set_value(pinfo->reset_gpio, 0); + return err; +} + +static int nt36672a_panel_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct nt36672a_panel *pinfo = to_nt36672a_panel(panel); + const struct drm_display_mode *m = pinfo->desc->display_mode; + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, m); + if (!mode) { + dev_err(panel->dev, "failed to add mode %ux%u@%u\n", m->hdisplay, + m->vdisplay, drm_mode_vrefresh(m)); + return -ENOMEM; + } + + connector->display_info.width_mm = pinfo->desc->width_mm; + connector->display_info.height_mm = pinfo->desc->height_mm; + + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs panel_funcs = { + .unprepare = nt36672a_panel_unprepare, + .prepare = nt36672a_panel_prepare, + .get_modes = nt36672a_panel_get_modes, +}; + +static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_1[] = { + /* skin enhancement mode */ + { .data = {0xFF, 0x22} }, + { .data = {0x00, 0x40} }, + { .data = {0x01, 0xC0} }, + { .data = {0x02, 0x40} }, + { .data = {0x03, 0x40} }, + { .data = {0x04, 0x40} }, + { .data = {0x05, 0x40} }, + { .data = {0x06, 0x40} }, + { .data = {0x07, 0x40} }, + { .data = {0x08, 0x40} }, + { .data = {0x09, 0x40} }, + { .data = {0x0A, 0x40} }, + { .data = {0x0B, 0x40} }, + { .data = {0x0C, 0x40} }, + { .data = {0x0D, 0x40} }, + { .data = {0x0E, 0x40} }, + { .data = {0x0F, 0x40} }, + { .data = {0x10, 0x40} }, + { .data = {0x11, 0x50} }, + { .data = {0x12, 0x60} }, + { .data = {0x13, 0x70} }, + { .data = {0x14, 0x58} }, + { .data = {0x15, 0x68} }, + { .data = {0x16, 0x78} }, + { .data = {0x17, 0x77} }, + { .data = {0x18, 0x39} }, + { .data = {0x19, 0x2D} }, + { .data = {0x1A, 0x2E} }, + { .data = {0x1B, 0x32} }, + { .data = {0x1C, 0x37} }, + { .data = {0x1D, 0x3A} }, + { .data = {0x1E, 0x40} }, + { .data = {0x1F, 0x40} }, + { .data = {0x20, 0x40} }, + { .data = {0x21, 0x40} }, + { .data = {0x22, 0x40} }, + { .data = {0x23, 0x40} }, + { .data = {0x24, 0x40} }, + { .data = {0x25, 0x40} }, + { .data = {0x26, 0x40} }, + { .data = {0x27, 0x40} }, + { .data = {0x28, 0x40} }, + { .data = {0x2D, 0x00} }, + { .data = {0x2F, 0x40} }, + { .data = {0x30, 0x40} }, + { .data = {0x31, 0x40} }, + { .data = {0x32, 0x40} }, + { .data = {0x33, 0x40} }, + { .data = {0x34, 0x40} }, + { .data = {0x35, 0x40} }, + { .data = {0x36, 0x40} }, + { .data = {0x37, 0x40} }, + { .data = {0x38, 0x40} }, + { .data = {0x39, 0x40} }, + { .data = {0x3A, 0x40} }, + { .data = {0x3B, 0x40} }, + { .data = {0x3D, 0x40} }, + { .data = {0x3F, 0x40} }, + { .data = {0x40, 0x40} }, + { .data = {0x41, 0x40} }, + { .data = {0x42, 0x40} }, + { .data = {0x43, 0x40} }, + { .data = {0x44, 0x40} }, + { .data = {0x45, 0x40} }, + { .data = {0x46, 0x40} }, + { .data = {0x47, 0x40} }, + { .data = {0x48, 0x40} }, + { .data = {0x49, 0x40} }, + { .data = {0x4A, 0x40} }, + { .data = {0x4B, 0x40} }, + { .data = {0x4C, 0x40} }, + { .data = {0x4D, 0x40} }, + { .data = {0x4E, 0x40} }, + { .data = {0x4F, 0x40} }, + { .data = {0x50, 0x40} }, + { .data = {0x51, 0x40} }, + { .data = {0x52, 0x40} }, + { .data = {0x53, 0x01} }, + { .data = {0x54, 0x01} }, + { .data = {0x55, 0xFE} }, + { .data = {0x56, 0x77} }, + { .data = {0x58, 0xCD} }, + { .data = {0x59, 0xD0} }, + { .data = {0x5A, 0xD0} }, + { .data = {0x5B, 0x50} }, + { .data = {0x5C, 0x50} }, + { .data = {0x5D, 0x50} }, + { .data = {0x5E, 0x50} }, + { .data = {0x5F, 0x50} }, + { .data = {0x60, 0x50} }, + { .data = {0x61, 0x50} }, + { .data = {0x62, 0x50} }, + { .data = {0x63, 0x50} }, + { .data = {0x64, 0x50} }, + { .data = {0x65, 0x50} }, + { .data = {0x66, 0x50} }, + { .data = {0x67, 0x50} }, + { .data = {0x68, 0x50} }, + { .data = {0x69, 0x50} }, + { .data = {0x6A, 0x50} }, + { .data = {0x6B, 0x50} }, + { .data = {0x6C, 0x50} }, + { .data = {0x6D, 0x50} }, + { .data = {0x6E, 0x50} }, + { .data = {0x6F, 0x50} }, + { .data = {0x70, 0x07} }, + { .data = {0x71, 0x00} }, + { .data = {0x72, 0x00} }, + { .data = {0x73, 0x00} }, + { .data = {0x74, 0x06} }, + { .data = {0x75, 0x0C} }, + { .data = {0x76, 0x03} }, + { .data = {0x77, 0x09} }, + { .data = {0x78, 0x0F} }, + { .data = {0x79, 0x68} }, + { .data = {0x7A, 0x88} }, + { .data = {0x7C, 0x80} }, + { .data = {0x7D, 0x80} }, + { .data = {0x7E, 0x80} }, + { .data = {0x7F, 0x00} }, + { .data = {0x80, 0x00} }, + { .data = {0x81, 0x00} }, + { .data = {0x83, 0x01} }, + { .data = {0x84, 0x00} }, + { .data = {0x85, 0x80} }, + { .data = {0x86, 0x80} }, + { .data = {0x87, 0x80} }, + { .data = {0x88, 0x40} }, + { .data = {0x89, 0x91} }, + { .data = {0x8A, 0x98} }, + { .data = {0x8B, 0x80} }, + { .data = {0x8C, 0x80} }, + { .data = {0x8D, 0x80} }, + { .data = {0x8E, 0x80} }, + { .data = {0x8F, 0x80} }, + { .data = {0x90, 0x80} }, + { .data = {0x91, 0x80} }, + { .data = {0x92, 0x80} }, + { .data = {0x93, 0x80} }, + { .data = {0x94, 0x80} }, + { .data = {0x95, 0x80} }, + { .data = {0x96, 0x80} }, + { .data = {0x97, 0x80} }, + { .data = {0x98, 0x80} }, + { .data = {0x99, 0x80} }, + { .data = {0x9A, 0x80} }, + { .data = {0x9B, 0x80} }, + { .data = {0x9C, 0x80} }, + { .data = {0x9D, 0x80} }, + { .data = {0x9E, 0x80} }, + { .data = {0x9F, 0x80} }, + { .data = {0xA0, 0x8A} }, + { .data = {0xA2, 0x80} }, + { .data = {0xA6, 0x80} }, + { .data = {0xA7, 0x80} }, + { .data = {0xA9, 0x80} }, + { .data = {0xAA, 0x80} }, + { .data = {0xAB, 0x80} }, + { .data = {0xAC, 0x80} }, + { .data = {0xAD, 0x80} }, + { .data = {0xAE, 0x80} }, + { .data = {0xAF, 0x80} }, + { .data = {0xB7, 0x76} }, + { .data = {0xB8, 0x76} }, + { .data = {0xB9, 0x05} }, + { .data = {0xBA, 0x0D} }, + { .data = {0xBB, 0x14} }, + { .data = {0xBC, 0x0F} }, + { .data = {0xBD, 0x18} }, + { .data = {0xBE, 0x1F} }, + { .data = {0xBF, 0x05} }, + { .data = {0xC0, 0x0D} }, + { .data = {0xC1, 0x14} }, + { .data = {0xC2, 0x03} }, + { .data = {0xC3, 0x07} }, + { .data = {0xC4, 0x0A} }, + { .data = {0xC5, 0xA0} }, + { .data = {0xC6, 0x55} }, + { .data = {0xC7, 0xFF} }, + { .data = {0xC8, 0x39} }, + { .data = {0xC9, 0x44} }, + { .data = {0xCA, 0x12} }, + { .data = {0xCD, 0x80} }, + { .data = {0xDB, 0x80} }, + { .data = {0xDC, 0x80} }, + { .data = {0xDD, 0x80} }, + { .data = {0xE0, 0x80} }, + { .data = {0xE1, 0x80} }, + { .data = {0xE2, 0x80} }, + { .data = {0xE3, 0x80} }, + { .data = {0xE4, 0x80} }, + { .data = {0xE5, 0x40} }, + { .data = {0xE6, 0x40} }, + { .data = {0xE7, 0x40} }, + { .data = {0xE8, 0x40} }, + { .data = {0xE9, 0x40} }, + { .data = {0xEA, 0x40} }, + { .data = {0xEB, 0x40} }, + { .data = {0xEC, 0x40} }, + { .data = {0xED, 0x40} }, + { .data = {0xEE, 0x40} }, + { .data = {0xEF, 0x40} }, + { .data = {0xF0, 0x40} }, + { .data = {0xF1, 0x40} }, + { .data = {0xF2, 0x40} }, + { .data = {0xF3, 0x40} }, + { .data = {0xF4, 0x40} }, + { .data = {0xF5, 0x40} }, + { .data = {0xF6, 0x40} }, + { .data = {0xFB, 0x1} }, + { .data = {0xFF, 0x23} }, + { .data = {0xFB, 0x01} }, + /* dimming enable */ + { .data = {0x01, 0x84} }, + { .data = {0x05, 0x2D} }, + { .data = {0x06, 0x00} }, + /* resolution 1080*2246 */ + { .data = {0x11, 0x01} }, + { .data = {0x12, 0x7B} }, + { .data = {0x15, 0x6F} }, + { .data = {0x16, 0x0B} }, + /* UI mode */ + { .data = {0x29, 0x0A} }, + { .data = {0x30, 0xFF} }, + { .data = {0x31, 0xFF} }, + { .data = {0x32, 0xFF} }, + { .data = {0x33, 0xFF} }, + { .data = {0x34, 0xFF} }, + { .data = {0x35, 0xFF} }, + { .data = {0x36, 0xFF} }, + { .data = {0x37, 0xFF} }, + { .data = {0x38, 0xFC} }, + { .data = {0x39, 0xF8} }, + { .data = {0x3A, 0xF4} }, + { .data = {0x3B, 0xF1} }, + { .data = {0x3D, 0xEE} }, + { .data = {0x3F, 0xEB} }, + { .data = {0x40, 0xE8} }, + { .data = {0x41, 0xE5} }, + /* STILL mode */ + { .data = {0x2A, 0x13} }, + { .data = {0x45, 0xFF} }, + { .data = {0x46, 0xFF} }, + { .data = {0x47, 0xFF} }, + { .data = {0x48, 0xFF} }, + { .data = {0x49, 0xFF} }, + { .data = {0x4A, 0xFF} }, + { .data = {0x4B, 0xFF} }, + { .data = {0x4C, 0xFF} }, + { .data = {0x4D, 0xED} }, + { .data = {0x4E, 0xD5} }, + { .data = {0x4F, 0xBF} }, + { .data = {0x50, 0xA6} }, + { .data = {0x51, 0x96} }, + { .data = {0x52, 0x86} }, + { .data = {0x53, 0x76} }, + { .data = {0x54, 0x66} }, + /* MOVING mode */ + { .data = {0x2B, 0x0E} }, + { .data = {0x58, 0xFF} }, + { .data = {0x59, 0xFF} }, + { .data = {0x5A, 0xFF} }, + { .data = {0x5B, 0xFF} }, + { .data = {0x5C, 0xFF} }, + { .data = {0x5D, 0xFF} }, + { .data = {0x5E, 0xFF} }, + { .data = {0x5F, 0xFF} }, + { .data = {0x60, 0xF6} }, + { .data = {0x61, 0xEA} }, + { .data = {0x62, 0xE1} }, + { .data = {0x63, 0xD8} }, + { .data = {0x64, 0xCE} }, + { .data = {0x65, 0xC3} }, + { .data = {0x66, 0xBA} }, + { .data = {0x67, 0xB3} }, + { .data = {0xFF, 0x25} }, + { .data = {0xFB, 0x01} }, + { .data = {0x05, 0x04} }, + { .data = {0xFF, 0x26} }, + { .data = {0xFB, 0x01} }, + { .data = {0x1C, 0xAF} }, + { .data = {0xFF, 0x10} }, + { .data = {0xFB, 0x01} }, + { .data = {0x51, 0xFF} }, + { .data = {0x53, 0x24} }, + { .data = {0x55, 0x00} }, +}; + +static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_2[] = { + { .data = {0xFF, 0x24} }, + { .data = {0xFB, 0x01} }, + { .data = {0xC3, 0x01} }, + { .data = {0xC4, 0x54} }, + { .data = {0xFF, 0x10} }, +}; + +static const struct nt36672a_panel_cmd tianma_fhd_video_off_cmds[] = { + { .data = {0xFF, 0x24} }, + { .data = {0xFB, 0x01} }, + { .data = {0xC3, 0x01} }, + { .data = {0xFF, 0x10} }, +}; + +static const struct drm_display_mode tianma_fhd_video_panel_default_mode = { + .clock = 161331, + + .hdisplay = 1080, + .hsync_start = 1080 + 40, + .hsync_end = 1080 + 40 + 20, + .htotal = 1080 + 40 + 20 + 44, + + .vdisplay = 2246, + .vsync_start = 2246 + 15, + .vsync_end = 2246 + 15 + 2, + .vtotal = 2246 + 15 + 2 + 8, + + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static const struct nt36672a_panel_desc tianma_fhd_video_panel_desc = { + .display_mode = &tianma_fhd_video_panel_default_mode, + + .width_mm = 68, + .height_mm = 136, + + .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO + | MIPI_DSI_MODE_VIDEO_HSE + | MIPI_DSI_CLOCK_NON_CONTINUOUS + | MIPI_DSI_MODE_VIDEO_BURST, + .format = MIPI_DSI_FMT_RGB888, + .lanes = 4, + .on_cmds_1 = tianma_fhd_video_on_cmds_1, + .num_on_cmds_1 = ARRAY_SIZE(tianma_fhd_video_on_cmds_1), + .on_cmds_2 = tianma_fhd_video_on_cmds_2, + .num_on_cmds_2 = ARRAY_SIZE(tianma_fhd_video_on_cmds_2), + .off_cmds = tianma_fhd_video_off_cmds, + .num_off_cmds = ARRAY_SIZE(tianma_fhd_video_off_cmds), +}; + +static int nt36672a_panel_add(struct nt36672a_panel *pinfo) +{ + struct device *dev = &pinfo->link->dev; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) + pinfo->supplies[i].supply = nt36672a_regulator_names[i]; + + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pinfo->supplies), + pinfo->supplies); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) { + ret = regulator_set_load(pinfo->supplies[i].consumer, + nt36672a_regulator_enable_loads[i]); + if (ret) + return dev_err_probe(dev, ret, "failed to set regulator enable loads\n"); + } + + pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(pinfo->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio), + "failed to get reset gpio from DT\n"); + + drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI); + + drm_panel_add(&pinfo->base); + + return 0; +} + +static int nt36672a_panel_probe(struct mipi_dsi_device *dsi) +{ + struct nt36672a_panel *pinfo; + const struct nt36672a_panel_desc *desc; + int err; + + pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + desc = of_device_get_match_data(&dsi->dev); + dsi->mode_flags = desc->mode_flags; + dsi->format = desc->format; + dsi->lanes = desc->lanes; + pinfo->desc = desc; + pinfo->link = dsi; + + mipi_dsi_set_drvdata(dsi, pinfo); + + err = nt36672a_panel_add(pinfo); + if (err < 0) + return err; + + return mipi_dsi_attach(dsi); +} + +static int nt36672a_panel_remove(struct mipi_dsi_device *dsi) +{ + struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi); + int err; + + err = drm_panel_unprepare(&pinfo->base); + if (err < 0) + dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err); + + err = drm_panel_disable(&pinfo->base); + if (err < 0) + dev_err(&dsi->dev, "failed to disable panel: %d\n", err); + + err = mipi_dsi_detach(dsi); + if (err < 0) + dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err); + + drm_panel_remove(&pinfo->base); + + return 0; +} + +static void nt36672a_panel_shutdown(struct mipi_dsi_device *dsi) +{ + struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi); + + drm_panel_disable(&pinfo->base); + drm_panel_unprepare(&pinfo->base); +} + +static const struct of_device_id tianma_fhd_video_of_match[] = { + { .compatible = "tianma,fhd-video", .data = &tianma_fhd_video_panel_desc }, + { }, +}; +MODULE_DEVICE_TABLE(of, tianma_fhd_video_of_match); + +static struct mipi_dsi_driver nt36672a_panel_driver = { + .driver = { + .name = "panel-tianma-nt36672a", + .of_match_table = tianma_fhd_video_of_match, + }, + .probe = nt36672a_panel_probe, + .remove = nt36672a_panel_remove, + .shutdown = nt36672a_panel_shutdown, +}; +module_mipi_dsi_driver(nt36672a_panel_driver); + +MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>"); +MODULE_DESCRIPTION("NOVATEK NT36672A based MIPI-DSI LCD panel driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c index b6e377aa1131..f80b44a8a700 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c @@ -99,20 +99,6 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data, dev_warn(ctx->dev, "mipi dsi dcs write buffer failed\n"); } -static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data, - size_t len) -{ - struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - - /* data will be sent in dsi hs mode (ie. no lpm) */ - dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; - - otm8009a_dcs_write_buf(ctx, data, len); - - /* restore back the dsi lpm mode */ - dsi->mode_flags |= MIPI_DSI_MODE_LPM; -} - #define dcs_write_seq(ctx, seq...) \ ({ \ static const u8 d[] = { seq }; \ @@ -400,7 +386,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd) */ data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS; data[1] = bd->props.brightness; - otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data)); + otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data)); /* set Brightness Control & Backlight on */ data[1] = 0x24; @@ -412,7 +398,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd) /* Update Brightness Control & Backlight */ data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY; - otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data)); + otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data)); return 0; } @@ -452,7 +438,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi) dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM; + MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c index f908eeafb1af..412c0dbcb2b6 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c @@ -82,15 +82,15 @@ struct rm68200 { }; static const struct drm_display_mode default_mode = { - .clock = 52582, + .clock = 54000, .hdisplay = 720, - .hsync_start = 720 + 38, - .hsync_end = 720 + 38 + 8, - .htotal = 720 + 38 + 8 + 38, + .hsync_start = 720 + 48, + .hsync_end = 720 + 48 + 9, + .htotal = 720 + 48 + 9 + 48, .vdisplay = 1280, .vsync_start = 1280 + 12, - .vsync_end = 1280 + 12 + 4, - .vtotal = 1280 + 12 + 4 + 12, + .vsync_end = 1280 + 12 + 5, + .vtotal = 1280 + 12 + 5 + 12, .flags = 0, .width_mm = 68, .height_mm = 122, @@ -391,7 +391,7 @@ static int rm68200_probe(struct mipi_dsi_device *dsi) dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM; + MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index 535c8d1cca21..a3782830ae3c 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -75,13 +75,8 @@ static int rb070d30_panel_unprepare(struct drm_panel *panel) static int rb070d30_panel_enable(struct drm_panel *panel) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); - int ret; - ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); - if (ret) - return ret; - - return 0; + return mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); } static int rb070d30_panel_disable(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c index 1d1c79a18613..0ab1b7ec84cd 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c @@ -214,7 +214,7 @@ static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = { 0x00, 0x00 } }; -unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = { +static const unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = { 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21 }; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c new file mode 100644 index 000000000000..eec74c10ddda --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DSI interface to the Samsung S6E63M0 panel. + * (C) 2019 Linus Walleij + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/of_device.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> + +#include "panel-samsung-s6e63m0.h" + +#define MCS_GLOBAL_PARAM 0xb0 +#define S6E63M0_DSI_MAX_CHUNK 15 /* CMD + 15 bytes max */ + +static int s6e63m0_dsi_dcs_read(struct device *dev, const u8 cmd, u8 *data) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + int ret; + + ret = mipi_dsi_dcs_read(dsi, cmd, data, 1); + if (ret < 0) { + dev_err(dev, "could not read DCS CMD %02x\n", cmd); + return ret; + } + + dev_info(dev, "DSI read CMD %02x = %02x\n", cmd, *data); + + return 0; +} + +static int s6e63m0_dsi_dcs_write(struct device *dev, const u8 *data, size_t len) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + const u8 *seqp = data; + u8 cmd; + u8 cmdwritten; + int remain; + int chunk; + int ret; + + dev_info(dev, "DSI writing dcs seq: %*ph\n", (int)len, data); + + /* Pick out and skip past the DCS command */ + cmd = *seqp; + seqp++; + cmdwritten = 0; + remain = len - 1; + chunk = remain; + + /* Send max S6E63M0_DSI_MAX_CHUNK bytes at a time */ + if (chunk > S6E63M0_DSI_MAX_CHUNK) + chunk = S6E63M0_DSI_MAX_CHUNK; + ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); + if (ret < 0) { + dev_err(dev, "error sending DCS command seq cmd %02x\n", cmd); + return ret; + } + cmdwritten += chunk; + seqp += chunk; + + while (cmdwritten < remain) { + chunk = remain - cmdwritten; + if (chunk > S6E63M0_DSI_MAX_CHUNK) + chunk = S6E63M0_DSI_MAX_CHUNK; + ret = mipi_dsi_dcs_write(dsi, MCS_GLOBAL_PARAM, &cmdwritten, 1); + if (ret < 0) { + dev_err(dev, "error sending CMD %02x global param %02x\n", + cmd, cmdwritten); + return ret; + } + ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); + if (ret < 0) { + dev_err(dev, "error sending CMD %02x chunk\n", cmd); + return ret; + } + cmdwritten += chunk; + seqp += chunk; + } + dev_info(dev, "sent command %02x %02x bytes\n", cmd, cmdwritten); + + usleep_range(8000, 9000); + + return 0; +} + +static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + int ret; + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->hs_rate = 349440000; + dsi->lp_rate = 9600000; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | + MIPI_DSI_MODE_EOT_PACKET | + MIPI_DSI_MODE_VIDEO_BURST; + + ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write, + true); + if (ret) + return ret; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) + s6e63m0_remove(dev); + + return ret; +} + +static int s6e63m0_dsi_remove(struct mipi_dsi_device *dsi) +{ + mipi_dsi_detach(dsi); + return s6e63m0_remove(&dsi->dev); +} + +static const struct of_device_id s6e63m0_dsi_of_match[] = { + { .compatible = "samsung,s6e63m0" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s6e63m0_dsi_of_match); + +static struct mipi_dsi_driver s6e63m0_dsi_driver = { + .probe = s6e63m0_dsi_probe, + .remove = s6e63m0_dsi_remove, + .driver = { + .name = "panel-samsung-s6e63m0", + .of_match_table = s6e63m0_dsi_of_match, + }, +}; +module_mipi_dsi_driver(s6e63m0_dsi_driver); + +MODULE_AUTHOR("Linus Walleij <linusw@kernel.org>"); +MODULE_DESCRIPTION("s6e63m0 LCD DSI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c new file mode 100644 index 000000000000..d298d780220d --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/delay.h> + +#include <drm/drm_print.h> + +#include "panel-samsung-s6e63m0.h" + +#define DATA_MASK 0x100 + +static int s6e63m0_spi_dcs_read(struct device *dev, const u8 cmd, u8 *data) +{ + /* + * FIXME: implement reading DCS commands over SPI so we can + * properly identify which physical panel is connected. + */ + *data = 0; + + return 0; +} + +static int s6e63m0_spi_write_word(struct device *dev, u16 data) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_transfer xfer = { + .len = 2, + .tx_buf = &data, + }; + struct spi_message msg; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(spi, &msg); +} + +static int s6e63m0_spi_dcs_write(struct device *dev, const u8 *data, size_t len) +{ + int ret = 0; + + dev_dbg(dev, "SPI writing dcs seq: %*ph\n", (int)len, data); + ret = s6e63m0_spi_write_word(dev, *data); + + while (!ret && --len) { + ++data; + ret = s6e63m0_spi_write_word(dev, *data | DATA_MASK); + } + + if (ret) { + dev_err(dev, "SPI error %d writing dcs seq: %*ph\n", ret, + (int)len, data); + } + + usleep_range(300, 310); + + return ret; +} + +static int s6e63m0_spi_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + int ret; + + spi->bits_per_word = 9; + spi->mode = SPI_MODE_3; + ret = spi_setup(spi); + if (ret < 0) { + dev_err(dev, "spi setup failed.\n"); + return ret; + } + return s6e63m0_probe(dev, s6e63m0_spi_dcs_read, s6e63m0_spi_dcs_write, + false); +} + +static int s6e63m0_spi_remove(struct spi_device *spi) +{ + return s6e63m0_remove(&spi->dev); +} + +static const struct of_device_id s6e63m0_spi_of_match[] = { + { .compatible = "samsung,s6e63m0" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s6e63m0_spi_of_match); + +static struct spi_driver s6e63m0_spi_driver = { + .probe = s6e63m0_spi_probe, + .remove = s6e63m0_spi_remove, + .driver = { + .name = "panel-samsung-s6e63m0", + .of_match_table = s6e63m0_spi_of_match, + }, +}; +module_spi_driver(s6e63m0_spi_driver); + +MODULE_AUTHOR("Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>"); +MODULE_DESCRIPTION("s6e63m0 LCD SPI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 2cc772fdc456..3eee67e2d86a 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -16,25 +16,34 @@ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/regulator/consumer.h> -#include <linux/spi/spi.h> #include <video/mipi_display.h> +#include "panel-samsung-s6e63m0.h" + /* Manufacturer Command Set */ #define MCS_ELVSS_ON 0xb1 #define MCS_MIECTL1 0xc0 #define MCS_BCMODE 0xc1 +#define MCS_ERROR_CHECK 0xd5 +#define MCS_READ_ID1 0xda +#define MCS_READ_ID2 0xdb +#define MCS_READ_ID3 0xdc +#define MCS_LEVEL_2_KEY 0xf0 +#define MCS_MTP_KEY 0xf1 #define MCS_DISCTL 0xf2 #define MCS_SRCCTL 0xf6 #define MCS_IFCTL 0xf7 #define MCS_PANELCTL 0xF8 #define MCS_PGAMMACTL 0xfa +#define S6E63M0_LCD_ID_VALUE_M2 0xA4 +#define S6E63M0_LCD_ID_VALUE_SM2 0xB4 +#define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6 + #define NUM_GAMMA_LEVELS 11 #define GAMMA_TABLE_COUNT 23 -#define DATA_MASK 0x100 - #define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) /* array of gamma tables for gamma value 2.2 */ @@ -87,8 +96,11 @@ static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = { struct s6e63m0 { struct device *dev; + int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val); + int (*dcs_write)(struct device *dev, const u8 *data, size_t len); struct drm_panel panel; struct backlight_device *bl_dev; + u8 lcd_type; struct regulator_bulk_data supplies[2]; struct gpio_desc *reset_gpio; @@ -134,42 +146,20 @@ static int s6e63m0_clear_error(struct s6e63m0 *ctx) return ret; } -static int s6e63m0_spi_write_word(struct s6e63m0 *ctx, u16 data) +static void s6e63m0_dcs_read(struct s6e63m0 *ctx, const u8 cmd, u8 *data) { - struct spi_device *spi = to_spi_device(ctx->dev); - struct spi_transfer xfer = { - .len = 2, - .tx_buf = &data, - }; - struct spi_message msg; - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); + if (ctx->error < 0) + return; - return spi_sync(spi, &msg); + ctx->error = ctx->dcs_read(ctx->dev, cmd, data); } static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len) { - int ret = 0; - if (ctx->error < 0 || len == 0) return; - dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data); - ret = s6e63m0_spi_write_word(ctx, *data); - - while (!ret && --len) { - ++data; - ret = s6e63m0_spi_write_word(ctx, *data | DATA_MASK); - } - - if (ret) { - dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, (int)len, data); - ctx->error = ret; - } - - usleep_range(300, 310); + ctx->error = ctx->dcs_write(ctx->dev, data, len); } #define s6e63m0_dcs_write_seq_static(ctx, seq ...) \ @@ -178,6 +168,43 @@ static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len) s6e63m0_dcs_write(ctx, d, ARRAY_SIZE(d)); \ }) +static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx) +{ + u8 id1, id2, id3; + int ret; + + s6e63m0_dcs_read(ctx, MCS_READ_ID1, &id1); + s6e63m0_dcs_read(ctx, MCS_READ_ID2, &id2); + s6e63m0_dcs_read(ctx, MCS_READ_ID3, &id3); + + ret = s6e63m0_clear_error(ctx); + if (ret) { + dev_err(ctx->dev, "error checking LCD type (%d)\n", ret); + ctx->lcd_type = 0x00; + return ret; + } + + dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); + + /* We attempt to detect what panel is mounted on the controller */ + switch (id2) { + case S6E63M0_LCD_ID_VALUE_M2: + dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n"); + break; + case S6E63M0_LCD_ID_VALUE_SM2: + case S6E63M0_LCD_ID_VALUE_SM2_1: + dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n"); + break; + default: + dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2); + break; + } + + ctx->lcd_type = id2; + + return 0; +} + static void s6e63m0_init(struct s6e63m0 *ctx) { s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL, @@ -249,8 +276,6 @@ static void s6e63m0_init(struct s6e63m0 *ctx) s6e63m0_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0b); - - s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE); } static int s6e63m0_power_on(struct s6e63m0 *ctx) @@ -263,6 +288,9 @@ static int s6e63m0_power_on(struct s6e63m0 *ctx) msleep(25); + /* Be sure to send a reset pulse */ + gpiod_set_value(ctx->reset_gpio, 1); + msleep(5); gpiod_set_value(ctx->reset_gpio, 0); msleep(120); @@ -292,8 +320,10 @@ static int s6e63m0_disable(struct drm_panel *panel) backlight_disable(ctx->bl_dev); + s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF); + msleep(10); s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE); - msleep(200); + msleep(120); ctx->enabled = false; @@ -331,6 +361,15 @@ static int s6e63m0_prepare(struct drm_panel *panel) if (ret < 0) return ret; + /* Magic to unlock level 2 control of the display */ + s6e63m0_dcs_write_seq_static(ctx, MCS_LEVEL_2_KEY, 0x5a, 0x5a); + /* Magic to unlock MTP reading */ + s6e63m0_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0x5a, 0x5a); + + ret = s6e63m0_check_lcd_type(ctx); + if (ret < 0) + return ret; + s6e63m0_init(ctx); ret = s6e63m0_clear_error(ctx); @@ -350,7 +389,15 @@ static int s6e63m0_enable(struct drm_panel *panel) if (ctx->enabled) return 0; + s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(120); s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON); + msleep(10); + + s6e63m0_dcs_write_seq_static(ctx, MCS_ERROR_CHECK, + 0xE7, 0x14, 0x60, 0x17, 0x0A, 0x49, 0xC3, + 0x8F, 0x19, 0x64, 0x91, 0x84, 0x76, 0x20, + 0x0F, 0x00); backlight_enable(ctx->bl_dev); @@ -429,9 +476,11 @@ static int s6e63m0_backlight_register(struct s6e63m0 *ctx) return ret; } -static int s6e63m0_probe(struct spi_device *spi) +int s6e63m0_probe(struct device *dev, + int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val), + int (*dcs_write)(struct device *dev, const u8 *data, size_t len), + bool dsi_mode) { - struct device *dev = &spi->dev; struct s6e63m0 *ctx; int ret; @@ -439,7 +488,9 @@ static int s6e63m0_probe(struct spi_device *spi) if (!ctx) return -ENOMEM; - spi_set_drvdata(spi, ctx); + ctx->dcs_read = dcs_read; + ctx->dcs_write = dcs_write; + dev_set_drvdata(dev, ctx); ctx->dev = dev; ctx->enabled = false; @@ -460,15 +511,8 @@ static int s6e63m0_probe(struct spi_device *spi) return PTR_ERR(ctx->reset_gpio); } - spi->bits_per_word = 9; - spi->mode = SPI_MODE_3; - ret = spi_setup(spi); - if (ret < 0) { - dev_err(dev, "spi setup failed.\n"); - return ret; - } - drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs, + dsi_mode ? DRM_MODE_CONNECTOR_DSI : DRM_MODE_CONNECTOR_DPI); ret = s6e63m0_backlight_register(ctx); @@ -479,31 +523,17 @@ static int s6e63m0_probe(struct spi_device *spi) return 0; } +EXPORT_SYMBOL_GPL(s6e63m0_probe); -static int s6e63m0_remove(struct spi_device *spi) +int s6e63m0_remove(struct device *dev) { - struct s6e63m0 *ctx = spi_get_drvdata(spi); + struct s6e63m0 *ctx = dev_get_drvdata(dev); drm_panel_remove(&ctx->panel); return 0; } - -static const struct of_device_id s6e63m0_of_match[] = { - { .compatible = "samsung,s6e63m0" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, s6e63m0_of_match); - -static struct spi_driver s6e63m0_driver = { - .probe = s6e63m0_probe, - .remove = s6e63m0_remove, - .driver = { - .name = "panel-samsung-s6e63m0", - .of_match_table = s6e63m0_of_match, - }, -}; -module_spi_driver(s6e63m0_driver); +EXPORT_SYMBOL_GPL(s6e63m0_remove); MODULE_AUTHOR("Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>"); MODULE_DESCRIPTION("s6e63m0 LCD Driver"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h new file mode 100644 index 000000000000..c669fec91763 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _PANEL_SAMSUNG_S6E63M0_H +#define _PANEL_SAMSUNG_S6E63M0_H + +int s6e63m0_probe(struct device *dev, + int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val), + int (*dcs_write)(struct device *dev, const u8 *data, + size_t len), + bool dsi_mode); +int s6e63m0_remove(struct device *dev); + +#endif /* _PANEL_SAMSUNG_S6E63M0_H */ diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 2be358fb46f7..8b82ec33f08a 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -3873,6 +3873,32 @@ static const struct panel_desc winstar_wf35ltiacd = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode = { + .clock = 51200, + .hdisplay = 1024, + .hsync_start = 1024 + 100, + .hsync_end = 1024 + 100 + 100, + .htotal = 1024 + 100 + 100 + 120, + .vdisplay = 600, + .vsync_start = 600 + 10, + .vsync_end = 600 + 10 + 10, + .vtotal = 600 + 10 + 10 + 15, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, +}; + +static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = { + .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 154, + .height = 90, + }, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode arm_rtsm_mode[] = { { .clock = 65000, @@ -4300,6 +4326,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "winstar,wf35ltiacd", .data = &winstar_wf35ltiacd, }, { + .compatible = "yes-optoelectronics,ytc700tlag-05-201c", + .data = &yes_optoelectronics_ytc700tlag_05_201c, + }, { /* Must be the last entry */ .compatible = "panel-dpi", .data = &panel_dpi, diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c index c22e7c49e077..b30510b1696a 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c @@ -153,7 +153,7 @@ static const struct drm_display_mode jh057n00900_mode = { .height_mm = 130, }; -struct st7703_panel_desc jh057n00900_panel_desc = { +static const struct st7703_panel_desc jh057n00900_panel_desc = { .mode = &jh057n00900_mode, .lanes = 4, .mode_flags = MIPI_DSI_MODE_VIDEO | diff --git a/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c new file mode 100644 index 000000000000..820731be7147 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct tdo_tl070wsh30_panel { + struct drm_panel base; + struct mipi_dsi_device *link; + + struct regulator *supply; + struct gpio_desc *reset_gpio; + + bool prepared; +}; + +static inline +struct tdo_tl070wsh30_panel *to_tdo_tl070wsh30_panel(struct drm_panel *panel) +{ + return container_of(panel, struct tdo_tl070wsh30_panel, base); +} + +static int tdo_tl070wsh30_panel_prepare(struct drm_panel *panel) +{ + struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel); + int err; + + if (tdo_tl070wsh30->prepared) + return 0; + + err = regulator_enable(tdo_tl070wsh30->supply); + if (err < 0) + return err; + + usleep_range(10000, 11000); + + gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 1); + + usleep_range(10000, 11000); + + gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 0); + + msleep(200); + + err = mipi_dsi_dcs_exit_sleep_mode(tdo_tl070wsh30->link); + if (err < 0) { + dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); + regulator_disable(tdo_tl070wsh30->supply); + return err; + } + + msleep(200); + + err = mipi_dsi_dcs_set_display_on(tdo_tl070wsh30->link); + if (err < 0) { + dev_err(panel->dev, "failed to set display on: %d\n", err); + regulator_disable(tdo_tl070wsh30->supply); + return err; + } + + msleep(20); + + tdo_tl070wsh30->prepared = true; + + return 0; +} + +static int tdo_tl070wsh30_panel_unprepare(struct drm_panel *panel) +{ + struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel); + int err; + + if (!tdo_tl070wsh30->prepared) + return 0; + + err = mipi_dsi_dcs_set_display_off(tdo_tl070wsh30->link); + if (err < 0) + dev_err(panel->dev, "failed to set display off: %d\n", err); + + usleep_range(10000, 11000); + + err = mipi_dsi_dcs_enter_sleep_mode(tdo_tl070wsh30->link); + if (err < 0) { + dev_err(panel->dev, "failed to enter sleep mode: %d\n", err); + return err; + } + + usleep_range(10000, 11000); + + regulator_disable(tdo_tl070wsh30->supply); + + tdo_tl070wsh30->prepared = false; + + return 0; +} + +static const struct drm_display_mode default_mode = { + .clock = 47250, + .hdisplay = 1024, + .hsync_start = 1024 + 46, + .hsync_end = 1024 + 46 + 80, + .htotal = 1024 + 46 + 80 + 100, + .vdisplay = 600, + .vsync_start = 600 + 5, + .vsync_end = 600 + 5 + 5, + .vtotal = 600 + 5 + 5 + 20, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, +}; + +static int tdo_tl070wsh30_panel_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &default_mode); + if (!mode) { + dev_err(panel->dev, "failed to add mode %ux%u@%u\n", + default_mode.hdisplay, default_mode.vdisplay, + drm_mode_vrefresh(&default_mode)); + return -ENOMEM; + } + + drm_mode_set_name(mode); + + drm_mode_probed_add(connector, mode); + + connector->display_info.width_mm = 154; + connector->display_info.height_mm = 85; + connector->display_info.bpc = 8; + + return 1; +} + +static const struct drm_panel_funcs tdo_tl070wsh30_panel_funcs = { + .unprepare = tdo_tl070wsh30_panel_unprepare, + .prepare = tdo_tl070wsh30_panel_prepare, + .get_modes = tdo_tl070wsh30_panel_get_modes, +}; + +static const struct of_device_id tdo_tl070wsh30_of_match[] = { + { .compatible = "tdo,tl070wsh30", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, tdo_tl070wsh30_of_match); + +static int tdo_tl070wsh30_panel_add(struct tdo_tl070wsh30_panel *tdo_tl070wsh30) +{ + struct device *dev = &tdo_tl070wsh30->link->dev; + int err; + + tdo_tl070wsh30->supply = devm_regulator_get(dev, "power"); + if (IS_ERR(tdo_tl070wsh30->supply)) + return PTR_ERR(tdo_tl070wsh30->supply); + + tdo_tl070wsh30->reset_gpio = devm_gpiod_get(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(tdo_tl070wsh30->reset_gpio)) { + err = PTR_ERR(tdo_tl070wsh30->reset_gpio); + dev_dbg(dev, "failed to get reset gpio: %d\n", err); + return err; + } + + drm_panel_init(&tdo_tl070wsh30->base, &tdo_tl070wsh30->link->dev, + &tdo_tl070wsh30_panel_funcs, DRM_MODE_CONNECTOR_DSI); + + err = drm_panel_of_backlight(&tdo_tl070wsh30->base); + if (err) + return err; + + drm_panel_add(&tdo_tl070wsh30->base); + + return 0; +} + +static int tdo_tl070wsh30_panel_probe(struct mipi_dsi_device *dsi) +{ + struct tdo_tl070wsh30_panel *tdo_tl070wsh30; + int err; + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; + + tdo_tl070wsh30 = devm_kzalloc(&dsi->dev, sizeof(*tdo_tl070wsh30), + GFP_KERNEL); + if (!tdo_tl070wsh30) + return -ENOMEM; + + mipi_dsi_set_drvdata(dsi, tdo_tl070wsh30); + tdo_tl070wsh30->link = dsi; + + err = tdo_tl070wsh30_panel_add(tdo_tl070wsh30); + if (err < 0) + return err; + + return mipi_dsi_attach(dsi); +} + +static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi) +{ + struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi); + int err; + + err = mipi_dsi_detach(dsi); + if (err < 0) + dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err); + + drm_panel_remove(&tdo_tl070wsh30->base); + drm_panel_disable(&tdo_tl070wsh30->base); + drm_panel_unprepare(&tdo_tl070wsh30->base); + + return 0; +} + +static void tdo_tl070wsh30_panel_shutdown(struct mipi_dsi_device *dsi) +{ + struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi); + + drm_panel_disable(&tdo_tl070wsh30->base); + drm_panel_unprepare(&tdo_tl070wsh30->base); +} + +static struct mipi_dsi_driver tdo_tl070wsh30_panel_driver = { + .driver = { + .name = "panel-tdo-tl070wsh30", + .of_match_table = tdo_tl070wsh30_of_match, + }, + .probe = tdo_tl070wsh30_panel_probe, + .remove = tdo_tl070wsh30_panel_remove, + .shutdown = tdo_tl070wsh30_panel_shutdown, +}; +module_mipi_dsi_driver(tdo_tl070wsh30_panel_driver); + +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_DESCRIPTION("TDO TL070WSH30 panel driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index 037c14fd6bac..ba0c00d1a001 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -242,13 +242,8 @@ static int td028ttec1_prepare(struct drm_panel *panel) static int td028ttec1_enable(struct drm_panel *panel) { struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); - int ret; - ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL); - if (ret) - return ret; - - return 0; + return jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL); } static int td028ttec1_disable(struct drm_panel *panel) diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 8ab025d0035f..913eaa6d0bc6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -29,18 +29,13 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct dev_pm_opp *opp; - int err; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); dev_pm_opp_put(opp); - err = dev_pm_opp_set_rate(dev, *freq); - if (err) - return err; - - return 0; + return dev_pm_opp_set_rate(dev, *freq); } static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index e6896733838a..1daf9322954a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -18,19 +18,13 @@ static int panfrost_reset_init(struct panfrost_device *pfdev) { - int err; - - pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true); + pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev); if (IS_ERR(pfdev->rstc)) { dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc)); return PTR_ERR(pfdev->rstc); } - err = reset_control_deassert(pfdev->rstc); - if (err) - return err; - - return 0; + return reset_control_deassert(pfdev->rstc); } static void panfrost_reset_fini(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 953f7536a773..140e004a3790 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -70,6 +70,9 @@ struct panfrost_compatible { int num_pm_domains; /* Only required if num_pm_domains > 1. */ const char * const *pm_domain_names; + + /* Vendor implementation quirks callback */ + void (*vendor_quirk)(struct panfrost_device *pfdev); }; struct panfrost_device { @@ -85,6 +88,7 @@ struct panfrost_device { /* pm_domains for devices with more than one. */ struct device *pm_domain_devs[MAX_PM_DOMAINS]; struct device_link *pm_domain_links[MAX_PM_DOMAINS]; + bool coherent; struct panfrost_features features; const struct panfrost_compatible *comp; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 36463c89e966..6e5dedacb777 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -587,6 +587,8 @@ static int panfrost_probe(struct platform_device *pdev) if (!pfdev->comp) return -ENODEV; + pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT; + /* Allocate and initialze the DRM device. */ ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev); if (IS_ERR(ddev)) @@ -656,7 +658,18 @@ static const struct panfrost_compatible default_data = { .pm_domain_names = NULL, }; +static const struct panfrost_compatible amlogic_data = { + .num_supplies = ARRAY_SIZE(default_supplies), + .supply_names = default_supplies, + .vendor_quirk = panfrost_gpu_amlogic_quirk, +}; + static const struct of_device_id dt_match[] = { + /* Set first to probe before the generic compatibles */ + { .compatible = "amlogic,meson-gxm-mali", + .data = &amlogic_data, }, + { .compatible = "amlogic,meson-g12a-mali", + .data = &amlogic_data, }, { .compatible = "arm,mali-t604", .data = &default_data, }, { .compatible = "arm,mali-t624", .data = &default_data, }, { .compatible = "arm,mali-t628", .data = &default_data, }, diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 33355dd302f1..fb9f7334ce18 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) for (i = 0; i < n_sgt; i++) { if (bo->sgts[i].sgl) { - dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl, - bo->sgts[i].nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(pfdev->dev, &bo->sgts[i], + DMA_BIDIRECTIONAL, 0); sg_free_table(&bo->sgts[i]); } } @@ -220,6 +220,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { */ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) { + struct panfrost_device *pfdev = dev->dev_private; struct panfrost_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); @@ -229,6 +230,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t INIT_LIST_HEAD(&obj->mappings.list); mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; + obj->base.map_cached = pfdev->coherent; return &obj->base.base; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index e0f190e43813..2aae636f1cf5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -76,6 +76,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) return 0; } +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev) +{ + /* + * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs + * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order + * to operate correctly. + */ + gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK); + gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16)); +} + static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) { u32 quirks = 0; @@ -136,6 +147,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) if (quirks) gpu_write(pfdev, GPU_JM_CONFIG, quirks); + + /* Here goes platform specific quirks */ + if (pfdev->comp->vendor_quirk) + pfdev->comp->vendor_quirk(pfdev); } #define MAX_HW_REVS 6 @@ -305,16 +320,18 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) int ret; u32 val; + panfrost_gpu_init_quirks(pfdev); + /* Just turn on everything for now */ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, - val, val == pfdev->features.l2_present, 100, 1000); + val, val == pfdev->features.l2_present, 100, 20000); if (ret) dev_err(pfdev->dev, "error powering up gpu L2"); gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO, - val, val == pfdev->features.shader_present, 100, 1000); + val, val == pfdev->features.shader_present, 100, 20000); if (ret) dev_err(pfdev->dev, "error powering up gpu shader"); @@ -344,6 +361,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) dma_set_mask_and_coherent(pfdev->dev, DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + dma_set_max_seg_size(pfdev->dev, UINT_MAX); irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); if (irq <= 0) @@ -356,7 +374,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) return err; } - panfrost_gpu_init_quirks(pfdev); panfrost_gpu_power_on(pfdev); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index 4112412087b2..468c51e7e46d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev); void panfrost_gpu_power_on(struct panfrost_device *pfdev); void panfrost_gpu_power_off(struct panfrost_device *pfdev); +void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); + #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 30e7b7196dab..e75b7d2192f7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -25,7 +25,8 @@ struct panfrost_queue_state { struct drm_gpu_scheduler sched; - + bool stopped; + struct mutex lock; u64 fence_context; u64 emit_seqno; }; @@ -369,6 +370,24 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) job_write(pfdev, JOB_INT_MASK, irq_mask); } +static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue, + struct drm_sched_job *bad) +{ + bool stopped = false; + + mutex_lock(&queue->lock); + if (!queue->stopped) { + drm_sched_stop(&queue->sched, bad); + if (bad) + drm_sched_increase_karma(bad); + queue->stopped = true; + stopped = true; + } + mutex_unlock(&queue->lock); + + return stopped; +} + static void panfrost_job_timedout(struct drm_sched_job *sched_job) { struct panfrost_job *job = to_panfrost_job(sched_job); @@ -392,19 +411,39 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) job_read(pfdev, JS_TAIL_LO(js)), sched_job); + /* Scheduler is already stopped, nothing to do. */ + if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) + return; + if (!mutex_trylock(&pfdev->reset_lock)) return; for (i = 0; i < NUM_JOB_SLOTS; i++) { struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; - drm_sched_stop(sched, sched_job); - if (js != i) - /* Ensure any timeouts on other slots have finished */ + /* + * If the queue is still active, make sure we wait for any + * pending timeouts. + */ + if (!pfdev->js->queue[i].stopped) cancel_delayed_work_sync(&sched->work_tdr); - } - drm_sched_increase_karma(sched_job); + /* + * If the scheduler was not already stopped, there's a tiny + * chance a timeout has expired just before we stopped it, and + * drm_sched_stop() does not flush pending works. Let's flush + * them now so the timeout handler doesn't get called in the + * middle of a reset. + */ + if (panfrost_scheduler_stop(&pfdev->js->queue[i], NULL)) + cancel_delayed_work_sync(&sched->work_tdr); + + /* + * Now that we cancelled the pending timeouts, we can safely + * reset the stopped state. + */ + pfdev->js->queue[i].stopped = false; + } spin_lock_irqsave(&pfdev->js->job_lock, flags); for (i = 0; i < NUM_JOB_SLOTS; i++) { @@ -421,11 +460,11 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); + mutex_unlock(&pfdev->reset_lock); + /* restart scheduler after GPU is usable again */ for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_start(&pfdev->js->queue[i].sched, true); - - mutex_unlock(&pfdev->reset_lock); } static const struct drm_sched_backend_ops panfrost_sched_ops = { @@ -515,6 +554,8 @@ int panfrost_job_init(struct panfrost_device *pfdev) } for (j = 0; j < NUM_JOB_SLOTS; j++) { + mutex_init(&js->queue[j].lock); + js->queue[j].fence_context = dma_fence_context_alloc(1); ret = drm_sched_init(&js->queue[j].sched, @@ -545,8 +586,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev) job_write(pfdev, JOB_INT_MASK, 0); - for (j = 0; j < NUM_JOB_SLOTS; j++) + for (j = 0; j < NUM_JOB_SLOTS; j++) { drm_sched_fini(&js->queue[j].sched); + mutex_destroy(&js->queue[j].lock); + } } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e8f7b11352d2..be8d68fb0e11 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, struct io_pgtable_ops *ops = mmu->pgtbl_ops; u64 start_iova = iova; - for_each_sg(sgt->sgl, sgl, sgt->nents, count) { + for_each_sgtable_dma_sg(sgt, sgl, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); @@ -371,6 +371,7 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) .pgsize_bitmap = SZ_4K | SZ_2M, .ias = FIELD_GET(0xff, pfdev->features.mmu_features), .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), + .coherent_walk = pfdev->coherent, .tlb = &mmu_tlb_ops, .iommu_dev = pfdev->dev, }; @@ -517,10 +518,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, if (ret) goto err_pages; - if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) { - ret = -EINVAL; + ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) goto err_map; - } mmu_map_sg(pfdev, bomapping->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index ea38ac60581c..eddaa62ad8b0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -51,6 +51,10 @@ #define GPU_STATUS 0x34 #define GPU_STATUS_PRFCNT_ACTIVE BIT(2) #define GPU_LATEST_FLUSH_ID 0x38 +#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */ +#define GPU_PWR_KEY_UNLOCK 0x2968A819 +#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */ +#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */ #define GPU_FAULT_STATUS 0x3C #define GPU_FAULT_ADDRESS_LO 0x40 #define GPU_FAULT_ADDRESS_HI 0x44 diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 46b0d1c4a16c..ecef8a2383d2 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -224,15 +224,12 @@ static struct drm_driver pl111_drm_driver = { .major = 1, .minor = 0, .patchlevel = 0, + .gem_create_object = drm_gem_cma_create_object_default_funcs, .dumb_create = drm_gem_cma_dumb_create, - .gem_free_object_unlocked = drm_gem_cma_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = pl111_gem_import_sg_table, - .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, .gem_prime_mmap = drm_gem_cma_prime_mmap, - .gem_prime_vmap = drm_gem_cma_prime_vmap, #if defined(CONFIG_DEBUG_FS) .debugfs_init = pl111_debugfs_init, diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 524d35b648d8..183d15e2cf58 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -67,7 +67,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) seq_printf(m, "size %ld, pc %d, num releases %d\n", (unsigned long)bo->tbo.base.size, - bo->pin_count, rel); + bo->tbo.pin_count, rel); } return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 6063f3a15329..07a3e3c23f09 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -372,7 +372,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, } static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { qxl_crtc_update_monitors_config(crtc, "flush"); } @@ -444,13 +444,13 @@ static const struct drm_framebuffer_funcs qxl_fb_funcs = { }; static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { qxl_crtc_update_monitors_config(crtc, "enable"); } static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { qxl_crtc_update_monitors_config(crtc, "disable"); } @@ -768,7 +768,6 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, struct drm_gem_object *obj; struct qxl_bo *user_bo; struct qxl_surface surf; - int ret; if (!new_state->fb) return 0; @@ -804,11 +803,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, } } - ret = qxl_bo_pin(user_bo); - if (ret) - return ret; - - return 0; + return qxl_bo_pin(user_bo); } static void qxl_plane_cleanup_fb(struct drm_plane *plane, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index aae90a9ee1db..3602e8b34189 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -80,7 +80,6 @@ struct qxl_bo { struct ttm_place placements[3]; struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; - unsigned int pin_count; void *kptr; unsigned int map_count; int type; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 5cea6eea72ab..0bab9ec6adc1 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -326,8 +326,8 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, if (ret) goto out; - if (!qobj->pin_count) { - qxl_ttm_placement_from_domain(qobj, qobj->type, false); + if (!qobj->tbo.pin_count) { + qxl_ttm_placement_from_domain(qobj, qobj->type); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); if (unlikely(ret)) goto out; diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index f838b6d689aa..547d46c14d56 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -51,29 +51,35 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) return false; } -void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) +void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) { u32 c = 0; u32 pflag = 0; unsigned int i; - if (pinned) - pflag |= TTM_PL_FLAG_NO_EVICT; if (qbo->tbo.base.size <= PAGE_SIZE) pflag |= TTM_PL_FLAG_TOPDOWN; qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; - if (domain == QXL_GEM_DOMAIN_VRAM) - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; + if (domain == QXL_GEM_DOMAIN_VRAM) { + qbo->placements[c].mem_type = TTM_PL_VRAM; + qbo->placements[c++].flags = pflag; + } if (domain == QXL_GEM_DOMAIN_SURFACE) { - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag; - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; + qbo->placements[c].mem_type = TTM_PL_PRIV; + qbo->placements[c++].flags = pflag; + qbo->placements[c].mem_type = TTM_PL_VRAM; + qbo->placements[c++].flags = pflag; + } + if (domain == QXL_GEM_DOMAIN_CPU) { + qbo->placements[c].mem_type = TTM_PL_SYSTEM; + qbo->placements[c++].flags = pflag; + } + if (!c) { + qbo->placements[c].mem_type = TTM_PL_SYSTEM; + qbo->placements[c++].flags = 0; } - if (domain == QXL_GEM_DOMAIN_CPU) - qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; - if (!c) - qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; qbo->placement.num_placement = c; qbo->placement.num_busy_placement = c; for (i = 0; i < c; ++i) { @@ -100,6 +106,7 @@ int qxl_bo_create(struct qxl_device *qdev, struct qxl_surface *surf, struct qxl_bo **bo_ptr) { + struct ttm_operation_ctx ctx = { !kernel, false }; struct qxl_bo *bo; enum ttm_bo_type type; int r; @@ -120,18 +127,17 @@ int qxl_bo_create(struct qxl_device *qdev, } bo->tbo.base.funcs = &qxl_object_funcs; bo->type = domain; - bo->pin_count = pinned ? 1 : 0; bo->surface_id = 0; INIT_LIST_HEAD(&bo->list); if (surf) bo->surf = *surf; - qxl_ttm_placement_from_domain(bo, domain, pinned); + qxl_ttm_placement_from_domain(bo, domain); - r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, 0, !kernel, size, - NULL, NULL, &qxl_ttm_bo_destroy); + r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type, + &bo->placement, 0, &ctx, size, + NULL, NULL, &qxl_ttm_bo_destroy); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(qdev->ddev.dev, @@ -139,6 +145,9 @@ int qxl_bo_create(struct qxl_device *qdev, size, domain); return r; } + if (pinned) + ttm_bo_pin(&bo->tbo); + ttm_bo_unreserve(&bo->tbo); *bo_ptr = bo; return 0; } @@ -167,6 +176,7 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) { + unsigned long offset; void *rptr; int ret; struct io_mapping *map; @@ -178,9 +188,8 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, else goto fallback; - ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem); - - return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); + offset = bo->tbo.mem.start << PAGE_SHIFT; + return io_mapping_map_atomic_wc(map, offset + page_offset); fallback: if (bo->kptr) { rptr = bo->kptr + (page_offset * PAGE_SIZE); @@ -240,39 +249,22 @@ static int __qxl_bo_pin(struct qxl_bo *bo) struct drm_device *ddev = bo->tbo.base.dev; int r; - if (bo->pin_count) { - bo->pin_count++; + if (bo->tbo.pin_count) { + ttm_bo_pin(&bo->tbo); return 0; } - qxl_ttm_placement_from_domain(bo, bo->type, true); + qxl_ttm_placement_from_domain(bo, bo->type); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (likely(r == 0)) { - bo->pin_count = 1; - } + if (likely(r == 0)) + ttm_bo_pin(&bo->tbo); if (unlikely(r != 0)) dev_err(ddev->dev, "%p pin failed\n", bo); return r; } -static int __qxl_bo_unpin(struct qxl_bo *bo) +static void __qxl_bo_unpin(struct qxl_bo *bo) { - struct ttm_operation_ctx ctx = { false, false }; - struct drm_device *ddev = bo->tbo.base.dev; - int r, i; - - if (!bo->pin_count) { - dev_warn(ddev->dev, "%p unpin not necessary\n", bo); - return 0; - } - bo->pin_count--; - if (bo->pin_count) - return 0; - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (unlikely(r != 0)) - dev_err(ddev->dev, "%p validate failed for unpin\n", bo); - return r; + ttm_bo_unpin(&bo->tbo); } /* @@ -306,9 +298,9 @@ int qxl_bo_unpin(struct qxl_bo *bo) if (r) return r; - r = __qxl_bo_unpin(bo); + __qxl_bo_unpin(bo); qxl_bo_unreserve(bo); - return r; + return 0; } void qxl_bo_force_delete(struct qxl_device *qdev) @@ -359,10 +351,16 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) int qxl_surf_evict(struct qxl_device *qdev) { - return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV); + return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); } int qxl_vram_evict(struct qxl_device *qdev) { - return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); } diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 6b434e5ef795..09a5c818324d 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -58,29 +58,6 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); } -static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, - bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) { - struct drm_device *ddev = bo->tbo.base.dev; - - dev_err(ddev->dev, "%p reserve failed for wait\n", - bo); - } - return r; - } - if (mem_type) - *mem_type = bo->tbo.mem.mem_type; - - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; -} - extern int qxl_bo_create(struct qxl_device *qdev, unsigned long size, bool kernel, bool pinned, u32 domain, @@ -94,7 +71,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); extern void qxl_bo_unref(struct qxl_bo **bo); extern int qxl_bo_pin(struct qxl_bo *bo); extern int qxl_bo_unpin(struct qxl_bo *bo); -extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); +extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); #endif diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 4fae3e393da1..e75e364655b8 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -231,8 +231,8 @@ static int qxl_release_validate_bo(struct qxl_bo *bo) struct ttm_operation_ctx ctx = { true, false }; int ret; - if (!bo->pin_count) { - qxl_ttm_placement_from_domain(bo, bo->type, false); + if (!bo->tbo.pin_count) { + qxl_ttm_placement_from_domain(bo, bo->type); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7aae0a96f043..a80d59634143 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -32,7 +32,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_placement.h> #include "qxl_drv.h" @@ -55,7 +54,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }; if (!qxl_ttm_bo_is_qxl_bo(bo)) { @@ -66,7 +66,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, return; } qbo = to_qxl_bo(bo); - qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); + qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); *placement = qbo->placement; } @@ -81,13 +81,14 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, return 0; case TTM_PL_VRAM: mem->bus.is_iomem = true; - mem->bus.base = qdev->vram_base; - mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base; + mem->bus.caching = ttm_cached; break; case TTM_PL_PRIV: mem->bus.is_iomem = true; - mem->bus.base = qdev->surfaceram_base; - mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + qdev->surfaceram_base; + mem->bus.caching = ttm_cached; break; default: return -EINVAL; @@ -98,72 +99,43 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, /* * TTM backend functions. */ -struct qxl_ttm_tt { - struct ttm_tt ttm; - struct qxl_device *qdev; - u64 offset; -}; - -static int qxl_ttm_backend_bind(struct ttm_tt *ttm, - struct ttm_resource *bo_mem) +static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct qxl_ttm_tt *gtt = (void *)ttm; - - gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); - if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", - ttm->num_pages, bo_mem, ttm); - } - /* Not implemented */ - return -1; -} - -static void qxl_ttm_backend_unbind(struct ttm_tt *ttm) -{ - /* Not implemented */ -} - -static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) -{ - struct qxl_ttm_tt *gtt = (void *)ttm; - - ttm_tt_fini(>t->ttm); - kfree(gtt); + ttm_tt_destroy_common(bdev, ttm); + ttm_tt_fini(ttm); + kfree(ttm); } -static struct ttm_backend_func qxl_backend_func = { - .bind = &qxl_ttm_backend_bind, - .unbind = &qxl_ttm_backend_unbind, - .destroy = &qxl_ttm_backend_destroy, -}; - static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { - struct qxl_device *qdev; - struct qxl_ttm_tt *gtt; + struct ttm_tt *ttm; - qdev = qxl_get_qdev(bo->bdev); - gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); - if (gtt == NULL) + ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); + if (ttm == NULL) return NULL; - gtt->ttm.func = &qxl_backend_func; - gtt->qdev = qdev; - if (ttm_tt_init(>t->ttm, bo, page_flags)) { - kfree(gtt); + if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) { + kfree(ttm); return NULL; } - return >t->ttm; + return ttm; } -static void qxl_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) +static void qxl_bo_move_notify(struct ttm_buffer_object *bo, + bool evict, + struct ttm_resource *new_mem) { - struct ttm_resource *old_mem = &bo->mem; + struct qxl_bo *qbo; + struct qxl_device *qdev; + + if (!qxl_ttm_bo_is_qxl_bo(bo)) + return; + qbo = to_qxl_bo(bo); + qdev = to_qxl(qbo->tbo.base.dev); - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; + if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id) + qxl_surface_evict(qdev, qbo, new_mem ? true : false); } static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, @@ -173,48 +145,46 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int ret; - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + qxl_bo_move_notify(bo, evict, new_mem); + + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) - return ret; + goto out; if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - qxl_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); return 0; } - return ttm_bo_move_memcpy(bo, ctx, new_mem); + ret = ttm_bo_move_memcpy(bo, ctx, new_mem); +out: + if (ret) { + swap(*new_mem, bo->mem); + qxl_bo_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + } + return ret; } -static void qxl_bo_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *new_mem) +static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo) { - struct qxl_bo *qbo; - struct qxl_device *qdev; - - if (!qxl_ttm_bo_is_qxl_bo(bo)) - return; - qbo = to_qxl_bo(bo); - qdev = to_qxl(qbo->tbo.base.dev); - - if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id) - qxl_surface_evict(qdev, qbo, new_mem ? true : false); + qxl_bo_move_notify(bo, false, NULL); } static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, + .ttm_tt_destroy = &qxl_ttm_backend_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, .io_mem_reserve = &qxl_ttm_io_mem_reserve, - .move_notify = &qxl_bo_move_notify, + .delete_mem_notify = &qxl_bo_delete_mem_notify, }; static int qxl_ttm_init_mem_type(struct qxl_device *qdev, unsigned int type, uint64_t size) { - return ttm_range_man_init(&qdev->mman.bdev, type, TTM_PL_MASK_CACHING, - TTM_PL_FLAG_CACHED, false, size); + return ttm_range_man_init(&qdev->mman.bdev, type, false, size); } int qxl_ttm_init(struct qxl_device *qdev) @@ -223,11 +193,10 @@ int qxl_ttm_init(struct qxl_device *qdev) int num_io_pages; /* != rom->num_io_pages, we include surface0 */ /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&qdev->mman.bdev, - &qxl_bo_driver, + r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, qdev->ddev.anon_inode->i_mapping, qdev->ddev.vma_offset_manager, - false); + false, false); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 85a1cafdf303..e1132d86d250 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -497,7 +497,6 @@ struct radeon_bo { struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; u32 flags; - unsigned pin_count; void *kptr; u32 tiling_flags; u32 pitch; @@ -2816,10 +2815,12 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); -extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, +extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, + struct ttm_tt *ttm, uint64_t addr, uint32_t flags); -extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm); -extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm); +extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm); +extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm); +bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm); extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 33ae1b883268..21ce2f9502c0 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -160,7 +160,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].allowed_domains = domain; } - if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { + if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) { uint32_t domain = p->relocs[i].preferred_domains; if (!(domain & RADEON_GEM_DOMAIN_GTT)) { DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e0ae911ef427..b79686cf8bdb 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -273,10 +273,7 @@ static void radeon_unpin_work_func(struct work_struct *__work) /* unpin of the old buffer */ r = radeon_bo_reserve(work->old_rbo, false); if (likely(r == 0)) { - r = radeon_bo_unpin(work->old_rbo); - if (unlikely(r != 0)) { - DRM_ERROR("failed to unpin buffer after flip\n"); - } + radeon_bo_unpin(work->old_rbo); radeon_bo_unreserve(work->old_rbo); } else DRM_ERROR("failed to reserve buffer after flip\n"); @@ -607,9 +604,7 @@ pflip_cleanup: DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } - if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { - DRM_ERROR("failed to unpin new rbo in error path\n"); - } + radeon_bo_unpin(new_rbo); radeon_bo_unreserve(new_rbo); cleanup: diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4cd30613fa1d..65061c949aee 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -124,13 +124,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev); int radeon_driver_irq_postinstall_kms(struct drm_device *dev); void radeon_driver_irq_uninstall_kms(struct drm_device *dev); irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg); -void radeon_gem_object_free(struct drm_gem_object *obj); -int radeon_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv); -void radeon_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv); -struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, - int flags); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, unsigned int flags, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, @@ -145,14 +138,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *, struct sg_table *sg); -int radeon_gem_prime_pin(struct drm_gem_object *obj); -void radeon_gem_prime_unpin(struct drm_gem_object *obj); -void *radeon_gem_prime_vmap(struct drm_gem_object *obj); -void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -550,7 +538,7 @@ long radeon_drm_ioctl(struct file *filp, } ret = drm_ioctl(filp, cmd, arg); - + pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; @@ -609,22 +597,13 @@ static struct drm_driver kms_driver = { .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, .ioctls = radeon_ioctls_kms, - .gem_free_object_unlocked = radeon_gem_object_free, - .gem_open_object = radeon_gem_object_open, - .gem_close_object = radeon_gem_object_close, .dumb_create = radeon_mode_dumb_create, .dumb_map_offset = radeon_mode_dumb_mmap, .fops = &radeon_driver_kms_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = radeon_gem_prime_export, - .gem_prime_pin = radeon_gem_prime_pin, - .gem_prime_unpin = radeon_gem_prime_unpin, - .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, - .gem_prime_vmap = radeon_gem_prime_vmap, - .gem_prime_vunmap = radeon_gem_prime_vunmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 7f5dfe04789e..0ccd7213e41f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -35,7 +35,17 @@ #include "radeon.h" -void radeon_gem_object_free(struct drm_gem_object *gobj) +struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, + int flags); +struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); +int radeon_gem_prime_pin(struct drm_gem_object *obj); +void radeon_gem_prime_unpin(struct drm_gem_object *obj); +void *radeon_gem_prime_vmap(struct drm_gem_object *obj); +void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +static const struct drm_gem_object_funcs radeon_gem_object_funcs; + +static void radeon_gem_object_free(struct drm_gem_object *gobj) { struct radeon_bo *robj = gem_to_radeon_bo(gobj); @@ -85,6 +95,7 @@ retry: return r; } *obj = &robj->tbo.base; + (*obj)->funcs = &radeon_gem_object_funcs; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); @@ -146,7 +157,7 @@ void radeon_gem_fini(struct radeon_device *rdev) * Call from drm_gem_handle_create which appear in both new and open ioctl * case. */ -int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) +static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) { struct radeon_bo *rbo = gem_to_radeon_bo(obj); struct radeon_device *rdev = rbo->rdev; @@ -176,8 +187,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri return 0; } -void radeon_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv) +static void radeon_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct radeon_bo *rbo = gem_to_radeon_bo(obj); struct radeon_device *rdev = rbo->rdev; @@ -216,6 +227,18 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) return r; } +static const struct drm_gem_object_funcs radeon_gem_object_funcs = { + .free = radeon_gem_object_free, + .open = radeon_gem_object_open, + .close = radeon_gem_object_close, + .export = radeon_gem_prime_export, + .pin = radeon_gem_prime_pin, + .unpin = radeon_gem_prime_unpin, + .get_sg_table = radeon_gem_prime_get_sg_table, + .vmap = radeon_gem_prime_vmap, + .vunmap = radeon_gem_prime_vunmap, +}; + /* * GEM ioctls. */ @@ -331,7 +354,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, goto handle_lockup; bo = gem_to_radeon_bo(gobj); - r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); if (r) goto release_object; @@ -420,7 +443,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, return -ENOENT; } robj = gem_to_radeon_bo(gobj); - if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { + if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { drm_gem_object_put(gobj); return -EPERM; } @@ -721,7 +744,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, robj = gem_to_radeon_bo(gobj); r = -EPERM; - if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) goto out; r = radeon_bo_reserve(robj, false); diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index f93829f08a4d..97b9b6dd6dd3 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -53,7 +53,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn, struct ttm_operation_ctx ctx = { false, false }; long r; - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) + if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm)) return true; if (!mmu_notifier_range_blockable(range)) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index bb7582afd803..ab81e35cb060 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -112,58 +112,30 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { rbo->placements[c].fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + rbo->placements[c].mem_type = TTM_PL_VRAM; + rbo->placements[c++].flags = 0; } rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + rbo->placements[c].mem_type = TTM_PL_VRAM; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_GTT) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - (rbo->rdev->flags & RADEON_IS_AGP)) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_TT; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_TT; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_CPU) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_SYSTEM; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_SYSTEM; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; } if (!c) { rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; } rbo->placement.num_placement = c; @@ -171,7 +143,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) for (i = 0; i < c; ++i) { if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && - (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + (rbo->placements[i].mem_type == TTM_PL_VRAM) && !rbo->placements[i].fpfn) rbo->placements[i].lpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; @@ -331,11 +303,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, struct ttm_operation_ctx ctx = { false, false }; int r, i; - if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return -EPERM; - if (bo->pin_count) { - bo->pin_count++; + if (bo->tbo.pin_count) { + ttm_bo_pin(&bo->tbo); if (gpu_addr) *gpu_addr = radeon_bo_gpu_offset(bo); @@ -360,20 +332,18 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, radeon_ttm_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ - if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((bo->placements[i].mem_type == TTM_PL_VRAM) && !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) bo->placements[i].lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; else bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; - - bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (likely(r == 0)) { - bo->pin_count = 1; + ttm_bo_pin(&bo->tbo); if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); if (domain == RADEON_GEM_DOMAIN_VRAM) @@ -391,36 +361,22 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); } -int radeon_bo_unpin(struct radeon_bo *bo) +void radeon_bo_unpin(struct radeon_bo *bo) { - struct ttm_operation_ctx ctx = { false, false }; - int r, i; - - if (!bo->pin_count) { - dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); - return 0; - } - bo->pin_count--; - if (bo->pin_count) - return 0; - for (i = 0; i < bo->placement.num_placement; i++) { - bo->placements[i].lpfn = 0; - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - } - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (likely(r == 0)) { + ttm_bo_unpin(&bo->tbo); + if (!bo->tbo.pin_count) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) bo->rdev->vram_pin_size -= radeon_bo_size(bo); else bo->rdev->gart_pin_size -= radeon_bo_size(bo); - } else { - dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); } - return r; } int radeon_bo_evict_vram(struct radeon_device *rdev) { + struct ttm_bo_device *bdev = &rdev->mman.bdev; + struct ttm_resource_manager *man; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ #ifndef CONFIG_HIBERNATION if (rdev->flags & RADEON_IS_IGP) { @@ -429,7 +385,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) return 0; } #endif - return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); + man = ttm_manager_type(bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(bdev, man); } void radeon_bo_force_delete(struct radeon_device *rdev) @@ -549,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, list_for_each_entry(lobj, head, tv.head) { struct radeon_bo *bo = lobj->robj; - if (!bo->pin_count) { + if (!bo->tbo.pin_count) { u32 domain = lobj->preferred_domains; u32 allowed = lobj->allowed_domains; u32 current_domain = @@ -629,7 +586,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) break; old_object = reg->bo; - if (old_object->pin_count == 0) + if (old_object->tbo.pin_count == 0) steal = i; } @@ -794,7 +751,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, radeon_update_memory_usage(rbo, new_mem->mem_type, 1); } -int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct ttm_operation_ctx ctx = { false, false }; struct radeon_device *rdev; @@ -816,49 +773,38 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; /* Can't move a pinned BO to visible VRAM */ - if (rbo->pin_count > 0) - return -EINVAL; + if (rbo->tbo.pin_count > 0) + return VM_FAULT_SIGBUS; /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; for (i = 0; i < rbo->placement.num_placement; i++) { /* Force into visible VRAM */ - if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && + if ((rbo->placements[i].mem_type == TTM_PL_VRAM) && (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) rbo->placements[i].lpfn = lpfn; } r = ttm_bo_validate(bo, &rbo->placement, &ctx); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); - return ttm_bo_validate(bo, &rbo->placement, &ctx); - } else if (unlikely(r != 0)) { - return r; + r = ttm_bo_validate(bo, &rbo->placement, &ctx); + } else if (likely(!r)) { + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return VM_FAULT_SIGBUS; } - offset = bo->mem.start << PAGE_SHIFT; - /* this should never happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; + if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(r)) + return VM_FAULT_SIGBUS; + ttm_bo_move_to_lru_tail_unlocked(bo); return 0; } -int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) - return r; - if (mem_type) - *mem_type = bo->tbo.mem.mem_type; - - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; -} - /** * radeon_bo_fence - add fence to buffer object * diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 44b47241ee42..d606e9a935e3 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -133,9 +133,6 @@ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); } -extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, - bool no_wait); - extern int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, @@ -149,7 +146,7 @@ extern void radeon_bo_unref(struct radeon_bo **bo); extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, u64 *gpu_addr); -extern int radeon_bo_unpin(struct radeon_bo *bo); +extern void radeon_bo_unpin(struct radeon_bo *bo); extern int radeon_bo_evict_vram(struct radeon_device *rdev); extern void radeon_bo_force_delete(struct radeon_device *rdev); extern int radeon_bo_init(struct radeon_device *rdev); @@ -166,7 +163,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *new_mem); -extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); +extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, bool shared); diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index b906e8fbd5f3..b9de0e51c0be 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -36,7 +36,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) struct radeon_bo *bo = gem_to_radeon_bo(obj); int npages = bo->tbo.num_pages; - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); + return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); } void *radeon_gem_prime_vmap(struct drm_gem_object *obj) @@ -121,7 +121,7 @@ struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, int flags) { struct radeon_bo *bo = gem_to_radeon_bo(gobj); - if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return ERR_PTR(-EPERM); return drm_gem_prime_export(gobj, flags); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 74ad50c7491c..95038ac3382e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -47,7 +47,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_placement.h> #include "radeon_reg.h" @@ -56,6 +55,12 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev); static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); +static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem); +static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm); + struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) { struct radeon_mman *mman; @@ -69,17 +74,13 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) static int radeon_ttm_init_vram(struct radeon_device *rdev) { return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM, - TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, - TTM_PL_FLAG_WC, false, - rdev->mc.real_vram_size >> PAGE_SHIFT); + false, rdev->mc.real_vram_size >> PAGE_SHIFT); } static int radeon_ttm_init_gtt(struct radeon_device *rdev) { return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT, - TTM_PL_MASK_CACHING, - TTM_PL_FLAG_CACHED, true, - rdev->mc.gtt_size >> PAGE_SHIFT); + true, rdev->mc.gtt_size >> PAGE_SHIFT); } static void radeon_evict_flags(struct ttm_buffer_object *bo, @@ -88,7 +89,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }; struct radeon_bo *rbo; @@ -119,7 +121,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, RADEON_GEM_DOMAIN_GTT); rbo->placement.num_busy_placement = 0; for (i = 0; i < rbo->placement.num_placement; i++) { - if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { + if (rbo->placements[i].mem_type == TTM_PL_VRAM) { if (rbo->placements[i].fpfn < fpfn) rbo->placements[i].fpfn = fpfn; } else { @@ -141,25 +143,16 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); + struct radeon_device *rdev = radeon_get_rdev(bo->bdev); - if (radeon_ttm_tt_has_userptr(bo->ttm)) + if (radeon_ttm_tt_has_userptr(rdev, bo->ttm)) return -EPERM; return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, filp->private_data); } -static void radeon_move_null(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - - BUG_ON(old_mem->mm_node != NULL); - *old_mem = *new_mem; - new_mem->mm_node = NULL; -} - static int radeon_move_blit(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, struct ttm_resource *new_mem, struct ttm_resource *old_mem) { @@ -208,17 +201,16 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (IS_ERR(fence)) return PTR_ERR(fence); - r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem); radeon_fence_unref(&fence); return r; } static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool evict, + struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct ttm_resource *old_mem = &bo->mem; struct ttm_resource tmp_mem; struct ttm_place placements; @@ -233,37 +225,43 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); + placements.mem_type = TTM_PL_TT; + placements.flags = 0; + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; } - r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); if (unlikely(r)) { goto out_cleanup; } - r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx); + r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); + r = radeon_move_blit(bo, true, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, &ctx, new_mem); + r = ttm_bo_wait_ctx(bo, ctx); + if (unlikely(r)) + goto out_cleanup; + + radeon_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); + ttm_bo_assign_mem(bo, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); return r; } static int radeon_move_ram_vram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool evict, + struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct ttm_resource *old_mem = &bo->mem; struct ttm_resource tmp_mem; struct ttm_placement placement; @@ -278,16 +276,23 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placement.busy_placement = &placements; placements.fpfn = 0; placements.lpfn = 0; - placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); + placements.mem_type = TTM_PL_TT; + placements.flags = 0; + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem); - if (unlikely(r)) { + + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(r)) goto out_cleanup; - } - r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); + + r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); + if (unlikely(r)) + goto out_cleanup; + + ttm_bo_assign_mem(bo, &tmp_mem); + r = radeon_move_blit(bo, true, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } @@ -305,26 +310,38 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; - r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + if (new_mem->mem_type == TTM_PL_TT) { + r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); + if (r) + return r; + } + radeon_bo_move_notify(bo, evict, new_mem); + + r = ttm_bo_wait_ctx(bo, ctx); if (r) - return r; + goto fail; /* Can't move a pinned BO */ rbo = container_of(bo, struct radeon_bo, tbo); - if (WARN_ON_ONCE(rbo->pin_count > 0)) + if (WARN_ON_ONCE(rbo->tbo.pin_count > 0)) return -EINVAL; rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - radeon_move_null(bo, new_mem); + ttm_bo_move_null(bo, new_mem); + return 0; + } + if (old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_TT) { + ttm_bo_move_null(bo, new_mem); return 0; } - if ((old_mem->mem_type == TTM_PL_TT && - new_mem->mem_type == TTM_PL_SYSTEM) || - (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_TT)) { - /* bind is enough */ - radeon_move_null(bo, new_mem); + + if (old_mem->mem_type == TTM_PL_TT && + new_mem->mem_type == TTM_PL_SYSTEM) { + radeon_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); + ttm_bo_assign_mem(bo, new_mem); return 0; } if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || @@ -335,14 +352,12 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { - r = radeon_move_vram_ram(bo, evict, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = radeon_move_vram_ram(bo, evict, ctx, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { - r = radeon_move_ram_vram(bo, evict, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = radeon_move_ram_vram(bo, evict, ctx, new_mem); } else { - r = radeon_move_blit(bo, evict, ctx->no_wait_gpu, + r = radeon_move_blit(bo, evict, new_mem, old_mem); } @@ -350,13 +365,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, memcpy: r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) { - return r; + goto fail; } } /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); return 0; +fail: + swap(*new_mem, bo->mem); + radeon_bo_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + return r; } static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) @@ -372,9 +392,10 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso #if IS_ENABLED(CONFIG_AGP) if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = rdev->mc.agp_base; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; + mem->bus.caching = ttm_write_combined; } #endif break; @@ -383,21 +404,15 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso /* check if it's visible */ if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size) return -EINVAL; - mem->bus.base = rdev->mc.aper_base; + mem->bus.offset += rdev->mc.aper_base; mem->bus.is_iomem = true; + mem->bus.caching = ttm_write_combined; #ifdef __alpha__ /* * Alpha: use bus.addr to hold the ioremap() return, * so we can modify bus.base below. */ - if (mem->placement & TTM_PL_FLAG_WC) - mem->bus.addr = - ioremap_wc(mem->bus.base + mem->bus.offset, - bus_size); - else - mem->bus.addr = - ioremap(mem->bus.base + mem->bus.offset, - bus_size); + mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size); if (!mem->bus.addr) return -ENOMEM; @@ -407,7 +422,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso * It then can be used to build PTEs for VRAM * access, as done in ttm_bo_vm_fault(). */ - mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + + mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) + rdev->ddev->hose->dense_mem_base; #endif break; @@ -421,18 +436,19 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso * TTM backend functions. */ struct radeon_ttm_tt { - struct ttm_dma_tt ttm; + struct ttm_tt ttm; u64 offset; uint64_t userptr; struct mm_struct *usermm; uint32_t userflags; + bool bound; }; /* prepare the sg table with the user pages */ -static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) +static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_ttm_tt *gtt = (void *)ttm; unsigned pinned = 0; int r; @@ -491,9 +507,9 @@ release_pages: return r; } -static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_ttm_tt *gtt = (void *)ttm; struct sg_page_iter sg_iter; @@ -520,67 +536,84 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } -static int radeon_ttm_backend_bind(struct ttm_tt *ttm, +static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = (void*)ttm; + + return (gtt->bound); +} + +static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct radeon_ttm_tt *gtt = (void*)ttm; - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | RADEON_GART_PAGE_WRITE; int r; + if (gtt->bound) + return 0; + if (gtt->userptr) { - radeon_ttm_tt_pin_userptr(ttm); + radeon_ttm_tt_pin_userptr(bdev, ttm); flags &= ~RADEON_GART_PAGE_WRITE; } gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } - if (ttm->caching_state == tt_cached) + if (ttm->caching == ttm_cached) flags |= RADEON_GART_PAGE_SNOOP; r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08X\n", + DRM_ERROR("failed to bind %u pages at 0x%08X\n", ttm->num_pages, (unsigned)gtt->offset); return r; } + gtt->bound = true; return 0; } -static void radeon_ttm_backend_unbind(struct ttm_tt *ttm) +static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; - struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_device *rdev = radeon_get_rdev(bdev); + + if (!gtt->bound) + return; radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages); if (gtt->userptr) - radeon_ttm_tt_unpin_userptr(ttm); + radeon_ttm_tt_unpin_userptr(bdev, ttm); + gtt->bound = false; } -static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) +static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct radeon_ttm_tt *gtt = (void *)ttm; - ttm_dma_tt_fini(>t->ttm); + radeon_ttm_backend_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); + + ttm_tt_fini(>t->ttm); kfree(gtt); } -static struct ttm_backend_func radeon_backend_func = { - .bind = &radeon_ttm_backend_bind, - .unbind = &radeon_ttm_backend_unbind, - .destroy = &radeon_ttm_backend_destroy, -}; - static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt; + enum ttm_caching caching; + struct radeon_bo *rbo; + + rbo = container_of(bo, struct radeon_bo, tbo); rdev = radeon_get_rdev(bo->bdev); #if IS_ENABLED(CONFIG_AGP) @@ -594,26 +627,40 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, if (gtt == NULL) { return NULL; } - gtt->ttm.ttm.func = &radeon_backend_func; - if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) { + + if (rbo->flags & RADEON_GEM_GTT_UC) + caching = ttm_uncached; + else if (rbo->flags & RADEON_GEM_GTT_WC) + caching = ttm_write_combined; + else + caching = ttm_cached; + + if (ttm_dma_tt_init(>t->ttm, bo, page_flags, caching)) { kfree(gtt); return NULL; } - return >t->ttm.ttm; + return >t->ttm; } -static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) +static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, + struct ttm_tt *ttm) { - if (!ttm || ttm->func != &radeon_backend_func) +#if IS_ENABLED(CONFIG_AGP) + if (rdev->flags & RADEON_IS_AGP) + return NULL; +#endif + + if (!ttm) return NULL; - return (struct radeon_ttm_tt *)ttm; + return container_of(ttm, struct radeon_ttm_tt, ttm); } -static int radeon_ttm_tt_populate(struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx) +static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); - struct radeon_device *rdev; + struct radeon_device *rdev = radeon_get_rdev(bdev); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (gtt && gtt->userptr) { @@ -622,37 +669,22 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm, return -ENOMEM; ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm->state = tt_unbound; return 0; } if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); - ttm->state = tt_unbound; return 0; } - rdev = radeon_get_rdev(ttm->bdev); -#if IS_ENABLED(CONFIG_AGP) - if (rdev->flags & RADEON_IS_AGP) { - return ttm_agp_tt_populate(ttm, ctx); - } -#endif - -#ifdef CONFIG_SWIOTLB - if (rdev->need_swiotlb && swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, rdev->dev, ctx); - } -#endif - - return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx); + return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx); } -static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) +static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct radeon_device *rdev; - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + struct radeon_device *rdev = radeon_get_rdev(bdev); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); if (gtt && gtt->userptr) { @@ -664,28 +696,14 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) if (slave) return; - rdev = radeon_get_rdev(ttm->bdev); -#if IS_ENABLED(CONFIG_AGP) - if (rdev->flags & RADEON_IS_AGP) { - ttm_agp_tt_unpopulate(ttm); - return; - } -#endif - -#ifdef CONFIG_SWIOTLB - if (rdev->need_swiotlb && swiotlb_nr_tbl()) { - ttm_dma_unpopulate(>t->ttm, rdev->dev); - return; - } -#endif - - ttm_unmap_and_unpopulate_pages(rdev->dev, >t->ttm); + return ttm_pool_free(&rdev->mman.bdev.pool, ttm); } -int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, +int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, + struct ttm_tt *ttm, uint64_t addr, uint32_t flags) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); if (gtt == NULL) return -EINVAL; @@ -696,9 +714,69 @@ int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } -bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) +bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); + if (rdev->flags & RADEON_IS_AGP) + return ttm_agp_is_bound(ttm); +#endif + return radeon_ttm_backend_is_bound(ttm); +} + +static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, + struct ttm_resource *bo_mem) +{ +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); +#endif + + if (!bo_mem) + return -EINVAL; +#if IS_ENABLED(CONFIG_AGP) + if (rdev->flags & RADEON_IS_AGP) + return ttm_agp_bind(ttm, bo_mem); +#endif + + return radeon_ttm_backend_bind(bdev, ttm, bo_mem); +} + +static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); + + if (rdev->flags & RADEON_IS_AGP) { + ttm_agp_unbind(ttm); + return; + } +#endif + radeon_ttm_backend_unbind(bdev, ttm); +} + +static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct radeon_device *rdev = radeon_get_rdev(bdev); + + if (rdev->flags & RADEON_IS_AGP) { + ttm_agp_unbind(ttm); + ttm_tt_destroy_common(bdev, ttm); + ttm_agp_destroy(ttm); + return; + } +#endif + radeon_ttm_backend_destroy(bdev, ttm); +} + +bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, + struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); if (gtt == NULL) return false; @@ -706,9 +784,10 @@ bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) return !!gtt->userptr; } -bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) +bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, + struct ttm_tt *ttm) { - struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); if (gtt == NULL) return false; @@ -716,16 +795,22 @@ bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); } +static void +radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + radeon_bo_move_notify(bo, false, NULL); +} + static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, + .ttm_tt_destroy = &radeon_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, .verify_access = &radeon_verify_access, - .move_notify = &radeon_bo_move_notify, - .fault_reserve_notify = &radeon_bo_fault_reserve_notify, + .delete_mem_notify = &radeon_bo_delete_mem_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, }; @@ -734,10 +819,10 @@ int radeon_ttm_init(struct radeon_device *rdev) int r; /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&rdev->mman.bdev, - &radeon_bo_driver, + r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, rdev->ddev->anon_inode->i_mapping, rdev->ddev->vma_offset_manager, + rdev->need_swiotlb, dma_addressing_limited(&rdev->pdev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -745,6 +830,9 @@ int radeon_ttm_init(struct radeon_device *rdev) } rdev->mman.initialized = true; + ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb, + dma_addressing_limited(&rdev->pdev->dev)); + r = radeon_ttm_init_vram(rdev); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); @@ -826,17 +914,29 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf) { - struct ttm_buffer_object *bo; - struct radeon_device *rdev; + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; + struct radeon_device *rdev = radeon_get_rdev(bo->bdev); vm_fault_t ret; - bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; - if (bo == NULL) - return VM_FAULT_NOPAGE; - - rdev = radeon_get_rdev(bo->bdev); down_read(&rdev->pm.mclk_lock); - ret = ttm_bo_vm_fault(vmf); + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + goto unlock_mclk; + + ret = radeon_bo_fault_reserve_notify(bo); + if (ret) + goto unlock_resv; + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT, 1); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + goto unlock_mclk; + +unlock_resv: + dma_resv_unlock(bo->base.resv); + +unlock_mclk: up_read(&rdev->pm.mclk_lock); return ret; } @@ -880,6 +980,14 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data) return 0; } +static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + + return ttm_pool_debugfs(&rdev->mman.bdev.pool, m); +} static int ttm_pl_vram = TTM_PL_VRAM; static int ttm_pl_tt = TTM_PL_TT; @@ -887,10 +995,7 @@ static int ttm_pl_tt = TTM_PL_TT; static struct drm_info_list radeon_ttm_debugfs_list[] = { {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram}, {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt}, - {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, -#ifdef CONFIG_SWIOTLB - {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} -#endif + {"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL} }; static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) @@ -1018,11 +1123,6 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) count = ARRAY_SIZE(radeon_ttm_debugfs_list); -#ifdef CONFIG_SWIOTLB - if (!(rdev->need_swiotlb && swiotlb_nr_tbl())) - --count; -#endif - return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count); #else diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 010fb3b7a205..27b14eff532c 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -942,7 +942,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; - if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) + if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm)) bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; if (mem) { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index f65d1489dc50..b47e74421e34 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -22,11 +22,11 @@ config DRM_RCAR_CMM Enable support for R-Car Color Management Module (CMM). config DRM_RCAR_DW_HDMI - tristate "R-Car DU Gen3 HDMI Encoder Support" + tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support" depends on DRM && OF select DRM_DW_HDMI help - Enable support for R-Car Gen3 internal HDMI encoder. + Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder. config DRM_RCAR_LVDS tristate "R-Car DU LVDS Encoder Support" @@ -49,3 +49,4 @@ config DRM_RCAR_VSP config DRM_RCAR_WRITEBACK bool default y if ARM64 + depends on DRM_RCAR_DU diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index fe86a3e67757..b5fb941e0f53 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -682,20 +682,23 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) */ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state); struct drm_encoder *encoder; int ret; - ret = rcar_du_cmm_check(crtc, state); + ret = rcar_du_cmm_check(crtc, crtc_state); if (ret) return ret; /* Store the routes from the CRTC output to the DU outputs. */ rstate->outputs = 0; - drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) { + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc_state->encoder_mask) { struct rcar_du_encoder *renc; /* Skip the writeback encoder. */ @@ -710,7 +713,7 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, } static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state); @@ -748,8 +751,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, } static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state); struct rcar_du_device *rcdu = rcrtc->dev; @@ -780,7 +785,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, } static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); @@ -809,7 +814,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, } static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct drm_device *dev = rcrtc->crtc.dev; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index f53b0ec71085..447be991fa25 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -186,6 +186,35 @@ static const struct rcar_du_device_info rcar_du_r8a774c0_info = { .lvds_clk_mask = BIT(1) | BIT(0), }; +static const struct rcar_du_device_info rcar_du_r8a774e1_info = { + .gen = 3, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, + .channels_mask = BIT(3) | BIT(1) | BIT(0), + .routes = { + /* + * R8A774E1 has one RGB output, one LVDS output and one HDMI + * output. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(2), + .port = 0, + }, + [RCAR_DU_OUTPUT_HDMI0] = { + .possible_crtcs = BIT(1), + .port = 1, + }, + [RCAR_DU_OUTPUT_LVDS0] = { + .possible_crtcs = BIT(0), + .port = 2, + }, + }, + .num_lvds = 1, + .dpll_mask = BIT(1), +}; + static const struct rcar_du_device_info rcar_du_r8a7779_info = { .gen = 1, .features = RCAR_DU_FEATURE_INTERLACED @@ -216,8 +245,9 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { .channels_mask = BIT(2) | BIT(1) | BIT(0), .routes = { /* - * R8A7790 has one RGB output, two LVDS outputs and one - * (currently unsupported) TCON output. + * R8A7742 and R8A7790 each have one RGB output and two LVDS + * outputs. Additionally R8A7790 supports one TCON output + * (currently unsupported by the driver). */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(2) | BIT(1) | BIT(0), @@ -443,6 +473,7 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = { }; static const struct of_device_id rcar_du_of_table[] = { + { .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info }, { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info }, { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info }, { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, @@ -450,6 +481,7 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info }, { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info }, { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info }, + { .compatible = "renesas,du-r8a774e1", .data = &rcar_du_r8a774e1_info }, { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info }, @@ -458,6 +490,7 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info }, { .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info }, { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info }, + { .compatible = "renesas,du-r8a77961", .data = &rcar_du_r8a7796_info }, { .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info }, { .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info }, { .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info }, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 482329102f19..72dda446355f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -40,6 +40,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_RGB565, .bpp = 16, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_NONE, }, { @@ -47,6 +48,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_ARGB555, .bpp = 16, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB, .edf = PnDDCR4_EDF_NONE, }, { @@ -61,6 +63,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_XBGR32, .bpp = 32, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_RGB888, }, { @@ -68,6 +71,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_ABGR32, .bpp = 32, .planes = 1, + .hsub = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_ARGB8888, }, { @@ -75,6 +79,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_UYVY, .bpp = 16, .planes = 1, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -82,6 +87,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_YUYV, .bpp = 16, .planes = 1, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -89,6 +95,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_NV12M, .bpp = 12, .planes = 2, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -96,6 +103,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_NV21M, .bpp = 12, .planes = 2, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { @@ -103,6 +111,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_NV16M, .bpp = 16, .planes = 2, + .hsub = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, @@ -115,156 +124,187 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { .v4l2 = V4L2_PIX_FMT_RGB332, .bpp = 8, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ARGB4444, .v4l2 = V4L2_PIX_FMT_ARGB444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XRGB4444, .v4l2 = V4L2_PIX_FMT_XRGB444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBA4444, .v4l2 = V4L2_PIX_FMT_RGBA444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBX4444, .v4l2 = V4L2_PIX_FMT_RGBX444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ABGR4444, .v4l2 = V4L2_PIX_FMT_ABGR444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XBGR4444, .v4l2 = V4L2_PIX_FMT_XBGR444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRA4444, .v4l2 = V4L2_PIX_FMT_BGRA444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRX4444, .v4l2 = V4L2_PIX_FMT_BGRX444, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBA5551, .v4l2 = V4L2_PIX_FMT_RGBA555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBX5551, .v4l2 = V4L2_PIX_FMT_RGBX555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ABGR1555, .v4l2 = V4L2_PIX_FMT_ABGR555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XBGR1555, .v4l2 = V4L2_PIX_FMT_XBGR555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRA5551, .v4l2 = V4L2_PIX_FMT_BGRA555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRX5551, .v4l2 = V4L2_PIX_FMT_BGRX555, .bpp = 16, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGR888, .v4l2 = V4L2_PIX_FMT_RGB24, .bpp = 24, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGB888, .v4l2 = V4L2_PIX_FMT_BGR24, .bpp = 24, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBA8888, .v4l2 = V4L2_PIX_FMT_BGRA32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_RGBX8888, .v4l2 = V4L2_PIX_FMT_BGRX32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_ABGR8888, .v4l2 = V4L2_PIX_FMT_RGBA32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_XBGR8888, .v4l2 = V4L2_PIX_FMT_RGBX32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRA8888, .v4l2 = V4L2_PIX_FMT_ARGB32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_BGRX8888, .v4l2 = V4L2_PIX_FMT_XRGB32, .bpp = 32, .planes = 1, + .hsub = 1, }, { .fourcc = DRM_FORMAT_YVYU, .v4l2 = V4L2_PIX_FMT_YVYU, .bpp = 16, .planes = 1, + .hsub = 2, }, { .fourcc = DRM_FORMAT_NV61, .v4l2 = V4L2_PIX_FMT_NV61M, .bpp = 16, .planes = 2, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YUV420, .v4l2 = V4L2_PIX_FMT_YUV420M, .bpp = 12, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YVU420, .v4l2 = V4L2_PIX_FMT_YVU420M, .bpp = 12, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YUV422, .v4l2 = V4L2_PIX_FMT_YUV422M, .bpp = 16, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YVU422, .v4l2 = V4L2_PIX_FMT_YVU422M, .bpp = 16, .planes = 3, + .hsub = 2, }, { .fourcc = DRM_FORMAT_YUV444, .v4l2 = V4L2_PIX_FMT_YUV444M, .bpp = 24, .planes = 3, + .hsub = 1, }, { .fourcc = DRM_FORMAT_YVU444, .v4l2 = V4L2_PIX_FMT_YVU444M, .bpp = 24, .planes = 3, + .hsub = 1, }, }; @@ -311,6 +351,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, { struct rcar_du_device *rcdu = dev->dev_private; const struct rcar_du_format_info *format; + unsigned int chroma_pitch; unsigned int max_pitch; unsigned int align; unsigned int i; @@ -353,10 +394,19 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-EINVAL); } + /* + * Calculate the chroma plane(s) pitch using the horizontal subsampling + * factor. For semi-planar formats, the U and V planes are combined, the + * pitch must thus be doubled. + */ + chroma_pitch = mode_cmd->pitches[0] / format->hsub; + if (format->planes == 2) + chroma_pitch *= 2; + for (i = 1; i < format->planes; ++i) { - if (mode_cmd->pitches[i] != mode_cmd->pitches[0]) { + if (mode_cmd->pitches[i] != chroma_pitch) { dev_dbg(dev->dev, - "luma and chroma pitches do not match\n"); + "luma and chroma pitches are not compatible\n"); return ERR_PTR(-EINVAL); } } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h index 0346504d8c59..8f5fff176754 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h @@ -22,6 +22,7 @@ struct rcar_du_format_info { u32 v4l2; unsigned int bpp; unsigned int planes; + unsigned int hsub; unsigned int pnmr; unsigned int edf; }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index f1a81c9b184d..f6a69aa116e6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -13,6 +13,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> @@ -197,9 +198,8 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, goto fail; ret = vsp1_du_map_sg(vsp->vsp, sgt); - if (!ret) { + if (ret) { sg_free_table(sgt); - ret = -ENOMEM; goto fail; } } @@ -279,7 +279,7 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane, if (plane->state->visible) rcar_du_vsp_plane_setup(rplane); - else + else if (old_state->crtc) vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe, rplane->index, NULL); } @@ -341,6 +341,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = { .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state, }; +static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res) +{ + struct rcar_du_vsp *vsp = res; + + put_device(vsp->vsp); +} + int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, unsigned int crtcs) { @@ -357,6 +364,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, vsp->vsp = &pdev->dev; + ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp); + if (ret < 0) + return ret; + ret = vsp1_du_init(vsp->vsp); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index bced729a96fe..70dbbe44bb23 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -978,11 +978,13 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = { }; static const struct of_device_id rcar_lvds_of_table[] = { + { .compatible = "renesas,r8a7742-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info }, + { .compatible = "renesas,r8a774e1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 0f3eb392fe39..b7654f5e4225 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -212,15 +212,10 @@ static const struct file_operations rockchip_drm_driver_fops = { static struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .lastclose = drm_fb_helper_lastclose, - .gem_vm_ops = &drm_gem_cma_vm_ops, - .gem_free_object_unlocked = rockchip_gem_free_object, .dumb_create = rockchip_gem_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table, .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table, - .gem_prime_vmap = rockchip_gem_prime_vmap, - .gem_prime_vunmap = rockchip_gem_prime_vunmap, .gem_prime_mmap = rockchip_gem_mmap_buf, .fops = &rockchip_drm_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index b9275ba7c5a5..7d5ebb10323b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -10,6 +10,7 @@ #include <drm/drm.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_cma_helper.h> #include <drm/drm_prime.h> #include <drm/drm_vma_manager.h> @@ -36,8 +37,8 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) rk_obj->dma_addr = rk_obj->mm.start; - ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, - rk_obj->sgt->nents, prot); + ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, + prot); if (ret < rk_obj->base.size) { DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", ret, rk_obj->base.size); @@ -85,7 +86,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; - rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev, + rk_obj->pages, rk_obj->num_pages); if (IS_ERR(rk_obj->sgt)) { ret = PTR_ERR(rk_obj->sgt); goto err_put_pages; @@ -98,11 +100,10 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) * TODO: Replace this by drm_clflush_sg() once it can be implemented * without relying on symbols that are not exported. */ - for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) + for_each_sgtable_sg(rk_obj->sgt, s, i) sg_dma_address(s) = sg_phys(s); - dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE); return 0; @@ -295,6 +296,14 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) kfree(rk_obj); } +static const struct drm_gem_object_funcs rockchip_gem_object_funcs = { + .free = rockchip_gem_free_object, + .get_sg_table = rockchip_gem_prime_get_sg_table, + .vmap = rockchip_gem_prime_vmap, + .vunmap = rockchip_gem_prime_vunmap, + .vm_ops = &drm_gem_cma_vm_ops, +}; + static struct rockchip_gem_object * rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size) { @@ -309,6 +318,8 @@ static struct rockchip_gem_object * obj = &rk_obj->base; + obj->funcs = &rockchip_gem_object_funcs; + drm_gem_object_init(drm, obj, size); return rk_obj; @@ -337,7 +348,7 @@ err_free_rk_obj: } /* - * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked + * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free * callback function */ void rockchip_gem_free_object(struct drm_gem_object *obj) @@ -350,8 +361,8 @@ void rockchip_gem_free_object(struct drm_gem_object *obj) if (private->domain) { rockchip_gem_iommu_unmap(rk_obj); } else { - dma_unmap_sg(drm->dev, rk_obj->sgt->sgl, - rk_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, rk_obj->sgt, + DMA_BIDIRECTIONAL, 0); } drm_prime_gem_destroy(obj, rk_obj->sgt); } else { @@ -442,7 +453,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) int ret; if (rk_obj->pages) - return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) @@ -460,23 +471,6 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) return sgt; } -static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt, - int count) -{ - struct scatterlist *s; - dma_addr_t expected = sg_dma_address(sgt->sgl); - unsigned int i; - unsigned long size = 0; - - for_each_sg(sgt->sgl, s, count, i) { - if (sg_dma_address(s) != expected) - break; - expected = sg_dma_address(s) + sg_dma_len(s); - size += sg_dma_len(s); - } - return size; -} - static int rockchip_gem_iommu_map_sg(struct drm_device *drm, struct dma_buf_attachment *attach, @@ -493,15 +487,13 @@ rockchip_gem_dma_map_sg(struct drm_device *drm, struct sg_table *sg, struct rockchip_gem_object *rk_obj) { - int count = dma_map_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); - if (!count) - return -EINVAL; + int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); + if (err) + return err; - if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) { + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { DRM_ERROR("failed to map sg_table to contiguous linear address.\n"); - dma_unmap_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); return -EINVAL; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index c80f7d9fd13f..8cd39fca81a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -693,7 +693,7 @@ static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled) } static void vop_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct vop *vop = to_vop(crtc); @@ -1246,8 +1246,10 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, } static void vop_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct vop *vop = to_vop(crtc); /* @@ -1260,8 +1262,10 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, } static void vop_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct vop *vop = to_vop(crtc); const struct vop_data *vop_data = vop->data; struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); @@ -1413,8 +1417,10 @@ static void vop_wait_for_irq_handler(struct vop *vop) } static int vop_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct vop *vop = to_vop(crtc); struct drm_plane *plane; struct drm_plane_state *plane_state; @@ -1458,8 +1464,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc, } static void vop_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_atomic_state *old_state = old_crtc_state->state; struct drm_plane_state *old_plane_state, *new_plane_state; struct vop *vop = to_vop(crtc); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 146380118962..f8ec277a6aa8 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -73,6 +73,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, init_completion(&entity->entity_idle); + /* We start in an idle state. */ + complete(&entity->entity_idle); + spin_lock_init(&entity->rq_lock); spsc_queue_init(&entity->job_queue); diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c index bd990d178765..1d696ec001cf 100644 --- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c +++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c @@ -5,6 +5,8 @@ #define PREFIX_STR "[drm_dp_mst_helper]" +#include <linux/random.h> + #include <drm/drm_dp_mst_helper.h> #include <drm/drm_print.h> @@ -237,6 +239,21 @@ int igt_dp_mst_sideband_msg_req_decode(void *unused) in.u.i2c_write.bytes = data; DO_TEST(); + in.req_type = DP_QUERY_STREAM_ENC_STATUS; + in.u.enc_status.stream_id = 1; + DO_TEST(); + get_random_bytes(in.u.enc_status.client_id, + sizeof(in.u.enc_status.client_id)); + DO_TEST(); + in.u.enc_status.stream_event = 3; + DO_TEST(); + in.u.enc_status.valid_stream_event = 0; + DO_TEST(); + in.u.enc_status.stream_behavior = 3; + DO_TEST(); + in.u.enc_status.valid_stream_behavior = 1; + DO_TEST(); + #undef DO_TEST return 0; } diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 6f37c104c46f..409795786f03 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -23,7 +23,7 @@ #include "sti_vtg.h" static void sti_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct sti_mixer *mixer = to_sti_mixer(crtc); @@ -35,7 +35,7 @@ static void sti_crtc_atomic_enable(struct drm_crtc *crtc, } static void sti_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct sti_mixer *mixer = to_sti_mixer(crtc); @@ -133,7 +133,7 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void sti_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct drm_device *drm_dev = crtc->dev; struct sti_mixer *mixer = to_sti_mixer(crtc); diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 6e28f707092f..3980677435cb 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -420,7 +420,7 @@ static void ltdc_crtc_update_clut(struct drm_crtc *crtc) } static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); struct drm_device *ddev = crtc->dev; @@ -442,7 +442,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc, } static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); struct drm_device *ddev = crtc->dev; @@ -596,7 +596,7 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct ltdc_device *ldev = crtc_to_ltdc(crtc); struct drm_device *ddev = crtc->dev; diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 2f26f85ef538..77497b45f9a2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_graph.h> +#include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/reset.h> @@ -810,8 +811,13 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, * because of an old DT, we need to set the DMA offset by hand * on our device since the RAM mapping is at 0 for the DMA bus, * unlike the CPU. + * + * XXX(hch): this has no business in a driver and needs to move + * to the device tree. */ - drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET; + ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G); + if (ret) + return ret; } backend->engine.node = dev->of_node; diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 3a153648b369..45d9eb552d86 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -15,6 +15,7 @@ #include <video/videomode.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_modes.h> @@ -45,21 +46,25 @@ static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc) } static int sun4i_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct sunxi_engine *engine = scrtc->engine; int ret = 0; if (engine && engine->ops && engine->ops->atomic_check) - ret = engine->ops->atomic_check(engine, state); + ret = engine->ops->atomic_check(engine, crtc_state); return ret; } static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct drm_device *dev = crtc->dev; struct sunxi_engine *engine = scrtc->engine; @@ -79,7 +84,7 @@ static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, } static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct drm_pending_vblank_event *event = crtc->state->event; @@ -101,7 +106,7 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, } static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); @@ -122,7 +127,7 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, } static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc); struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c index b51cc685c13a..edb60ae0a9b7 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.c +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c @@ -407,6 +407,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend, struct drm_framebuffer *fb = state->fb; const struct drm_format_info *format = fb->format; uint64_t modifier = fb->modifier; + unsigned int ch1_phase_idx; u32 out_fmt_val; u32 in_fmt_val, in_mod_val, in_ps_val; unsigned int i; @@ -442,18 +443,19 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend, * I have no idea what this does exactly, but it seems to be * related to the scaler FIR filter phase parameters. */ + ch1_phase_idx = (format->num_planes > 1) ? 1 : 0; regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG, - frontend->data->ch_phase[0].horzphase); + frontend->data->ch_phase[0]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG, - frontend->data->ch_phase[1].horzphase); + frontend->data->ch_phase[ch1_phase_idx]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG, - frontend->data->ch_phase[0].vertphase[0]); + frontend->data->ch_phase[0]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG, - frontend->data->ch_phase[1].vertphase[0]); + frontend->data->ch_phase[ch1_phase_idx]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, - frontend->data->ch_phase[0].vertphase[1]); + frontend->data->ch_phase[0]); regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, - frontend->data->ch_phase[1].vertphase[1]); + frontend->data->ch_phase[ch1_phase_idx]); /* * Checking the input format is sufficient since we currently only @@ -687,30 +689,12 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = { }; static const struct sun4i_frontend_data sun4i_a10_frontend = { - .ch_phase = { - { - .horzphase = 0, - .vertphase = { 0, 0 }, - }, - { - .horzphase = 0xfc000, - .vertphase = { 0xfc000, 0xfc000 }, - }, - }, + .ch_phase = { 0x000, 0xfc000 }, .has_coef_rdy = true, }; static const struct sun4i_frontend_data sun8i_a33_frontend = { - .ch_phase = { - { - .horzphase = 0x400, - .vertphase = { 0x400, 0x400 }, - }, - { - .horzphase = 0x400, - .vertphase = { 0x400, 0x400 }, - }, - }, + .ch_phase = { 0x400, 0xfc400 }, .has_coef_access_ctrl = true, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h index 0c382c1ddb0f..2e7b76e50c2b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.h +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h @@ -115,11 +115,7 @@ struct reset_control; struct sun4i_frontend_data { bool has_coef_access_ctrl; bool has_coef_rdy; - - struct { - u32 horzphase; - u32 vertphase[2]; - } ch_phase[2]; + u32 ch_phase[2]; }; struct sun4i_frontend { diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h index f42441b1b14d..a55a38ad849c 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.h +++ b/drivers/gpu/drm/sun4i/sun8i_csc.h @@ -12,7 +12,7 @@ struct sun8i_mixer; /* VI channel CSC units offsets */ #define CCSC00_OFFSET 0xAA050 -#define CCSC01_OFFSET 0xFA000 +#define CCSC01_OFFSET 0xFA050 #define CCSC10_OFFSET 0xA0000 #define CCSC11_OFFSET 0xF0000 diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 4b59806b30df..5b42cf25cc86 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -307,7 +307,7 @@ static const struct regmap_config sun8i_mixer_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = 0xbfffc, /* guessed */ + .max_register = 0xffffc, /* guessed */ }; static int sun8i_mixer_of_get_id(struct device_node *node) diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 424ad60b4f38..2d86627b0d4e 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1742,7 +1742,7 @@ static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout) } static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; @@ -1799,10 +1799,10 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, } static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_display_mode *mode = &crtc->state->adjusted_mode; - struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc_state *crtc_state = to_dc_state(crtc->state); struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; int err; @@ -1882,7 +1882,7 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); /* apply PLL and pixel clock changes */ - tegra_dc_commit_state(dc, state); + tegra_dc_commit_state(dc, crtc_state); /* program display mode */ tegra_dc_set_timings(dc, mode); @@ -1918,7 +1918,7 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, } static void tegra_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { unsigned long flags; @@ -1937,17 +1937,17 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc, } static void tegra_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { - struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc_state *crtc_state = to_dc_state(crtc->state); struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; - value = state->planes << 8 | GENERAL_UPDATE; + value = crtc_state->planes << 8 | GENERAL_UPDATE; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); - value = state->planes | GENERAL_ACT_REQ; + value = crtc_state->planes | GENERAL_ACT_REQ; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index ba9d1c3e7cac..f0f581cd345e 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -858,12 +858,8 @@ static struct drm_driver tegra_drm_driver = { .debugfs_init = tegra_debugfs_init, #endif - .gem_free_object_unlocked = tegra_bo_free_object, - .gem_vm_ops = &tegra_bo_vm_ops, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = tegra_gem_prime_export, .gem_prime_import = tegra_gem_prime_import, .dumb_create = tegra_bo_dumb_create, diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index b25443255be6..f38de08e0c95 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -12,6 +12,7 @@ #include <linux/gpio/consumer.h> #include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> @@ -116,6 +117,7 @@ struct tegra_output { struct device_node *of_node; struct device *dev; + struct drm_bridge *bridge; struct drm_panel *panel; struct i2c_adapter *ddc; const struct edid *edid; diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 3387de79718b..5691ef1b0e58 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -694,11 +694,11 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi) DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3); tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3); - err = tegra_mipi_calibrate(dsi->mipi); + err = tegra_mipi_start_calibration(dsi->mipi); if (err < 0) return err; - return tegra_mipi_wait(dsi->mipi); + return tegra_mipi_finish_calibration(dsi->mipi); } static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 723df142a981..26af8daa9a16 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -98,8 +98,8 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, * the SG table needs to be copied to avoid overwriting any * other potential users of the original SG table. */ - err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents, - GFP_KERNEL); + err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, + obj->sgt->orig_nents, GFP_KERNEL); if (err < 0) goto free; } else { @@ -132,24 +132,29 @@ static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) static void *tegra_bo_mmap(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); + struct dma_buf_map map; + int ret; - if (obj->vaddr) + if (obj->vaddr) { return obj->vaddr; - else if (obj->gem.import_attach) - return dma_buf_vmap(obj->gem.import_attach->dmabuf); - else + } else if (obj->gem.import_attach) { + ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map); + return ret ? NULL : map.vaddr; + } else { return vmap(obj->pages, obj->num_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + } } static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr); if (obj->vaddr) return; else if (obj->gem.import_attach) - dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); + dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map); else vunmap(addr); } @@ -196,8 +201,7 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) bo->iova = bo->mm->start; - bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl, - bo->sgt->nents, prot); + bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot); if (!bo->size) { dev_err(tegra->drm->dev, "failed to map buffer\n"); err = -ENOMEM; @@ -231,6 +235,12 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) return 0; } +static const struct drm_gem_object_funcs tegra_gem_object_funcs = { + .free = tegra_bo_free_object, + .export = tegra_gem_prime_export, + .vm_ops = &tegra_bo_vm_ops, +}; + static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, size_t size) { @@ -241,6 +251,8 @@ static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, if (!bo) return ERR_PTR(-ENOMEM); + bo->gem.funcs = &tegra_gem_object_funcs; + host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); @@ -264,8 +276,7 @@ free: static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) { if (bo->pages) { - dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); + dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); drm_gem_put_pages(&bo->gem, bo->pages, true, true); sg_free_table(bo->sgt); kfree(bo->sgt); @@ -284,18 +295,15 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) bo->num_pages = bo->gem.size >> PAGE_SHIFT; - bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); + bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages); if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto put_pages; } - err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); - if (err == 0) { - err = -EFAULT; + err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); + if (err) goto free_sgt; - } return 0; @@ -571,7 +579,7 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, goto free; } - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto free; return sgt; @@ -590,7 +598,7 @@ static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct tegra_bo *bo = to_tegra_bo(gem); if (bo->pages) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); @@ -609,8 +617,7 @@ static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, struct drm_device *drm = gem->dev; if (bo->pages) - dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); + dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE); return 0; } @@ -623,8 +630,7 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, struct drm_device *drm = gem->dev; if (bo->pages) - dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE); return 0; } @@ -641,15 +647,17 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) return __tegra_gem_mmap(gem, vma); } -static void *tegra_gem_prime_vmap(struct dma_buf *buf) +static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map) { struct drm_gem_object *gem = buf->priv; struct tegra_bo *bo = to_tegra_bo(gem); - return bo->vaddr; + dma_buf_map_set_vaddr(map, bo->vaddr); + + return 0; } -static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) +static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map) { } diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index a3adb9e4debf..5a4fd0dbf4cf 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -5,6 +5,7 @@ */ #include <drm/drm_atomic_helper.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_simple_kms_helper.h> @@ -99,27 +100,38 @@ int tegra_output_probe(struct tegra_output *output) if (!output->of_node) output->of_node = output->dev->of_node; + err = drm_of_find_panel_or_bridge(output->of_node, -1, -1, + &output->panel, &output->bridge); + if (err && err != -ENODEV) + return err; + panel = of_parse_phandle(output->of_node, "nvidia,panel", 0); if (panel) { + /* + * Don't mix nvidia,panel phandle with the graph in a + * device-tree. + */ + WARN_ON(output->panel || output->bridge); + output->panel = of_drm_find_panel(panel); + of_node_put(panel); + if (IS_ERR(output->panel)) return PTR_ERR(output->panel); - - of_node_put(panel); } output->edid = of_get_property(output->of_node, "nvidia,edid", &size); ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0); if (ddc) { - output->ddc = of_find_i2c_adapter_by_node(ddc); + output->ddc = of_get_i2c_adapter_by_node(ddc); + of_node_put(ddc); + if (!output->ddc) { err = -EPROBE_DEFER; of_node_put(ddc); return err; } - - of_node_put(ddc); } output->hpd_gpio = devm_gpiod_get_from_of_node(output->dev, @@ -173,7 +185,7 @@ void tegra_output_remove(struct tegra_output *output) free_irq(output->hpd_irq, output); if (output->ddc) - put_device(&output->ddc->dev); + i2c_put_adapter(output->ddc); } int tegra_output_init(struct drm_device *drm, struct tegra_output *output) diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 4cd0461cc508..539d14935728 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -131,12 +131,9 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) } if (sgt) { - err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - if (err == 0) { - err = -ENOMEM; + err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); + if (err) goto unpin; - } /* * The display controller needs contiguous memory, so @@ -144,7 +141,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) * map its SG table to a single contiguous chunk of * I/O virtual memory. */ - if (err > 1) { + if (sgt->nents > 1) { err = -EINVAL; goto unpin; } @@ -166,8 +163,7 @@ unpin: struct sg_table *sgt = state->sgt[i]; if (sgt) - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); + dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; @@ -186,8 +182,7 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) struct sg_table *sgt = state->sgt[i]; if (sgt) - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); + dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 0562a7eb793f..4142a56ca764 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -7,7 +7,7 @@ #include <linux/clk.h> #include <drm/drm_atomic_helper.h> -#include <drm/drm_panel.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_simple_kms_helper.h> #include "drm.h" @@ -85,45 +85,13 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, tegra_dc_writel(dc, table[i].value, table[i].offset); } -static const struct drm_connector_funcs tegra_rgb_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .detect = tegra_output_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = tegra_output_connector_destroy, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static enum drm_mode_status -tegra_rgb_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - /* - * FIXME: For now, always assume that the mode is okay. There are - * unresolved issues with clk_round_rate(), which doesn't always - * reliably report whether a frequency can be set or not. - */ - return MODE_OK; -} - -static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { - .get_modes = tegra_output_connector_get_modes, - .mode_valid = tegra_rgb_connector_mode_valid, -}; - static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) { struct tegra_output *output = encoder_to_output(encoder); struct tegra_rgb *rgb = to_rgb(output); - if (output->panel) - drm_panel_disable(output->panel); - tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); tegra_dc_commit(rgb->dc); - - if (output->panel) - drm_panel_unprepare(output->panel); } static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) @@ -132,9 +100,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) struct tegra_rgb *rgb = to_rgb(output); u32 value; - if (output->panel) - drm_panel_prepare(output->panel); - tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; @@ -156,9 +121,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); tegra_dc_commit(rgb->dc); - - if (output->panel) - drm_panel_enable(output->panel); } static int @@ -267,24 +229,68 @@ int tegra_dc_rgb_remove(struct tegra_dc *dc) int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) { struct tegra_output *output = dc->rgb; + struct drm_connector *connector; int err; if (!dc->rgb) return -ENODEV; - drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - drm_connector_helper_add(&output->connector, - &tegra_rgb_connector_helper_funcs); - output->connector.dpms = DRM_MODE_DPMS_OFF; - drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(&output->encoder, &tegra_rgb_encoder_helper_funcs); - drm_connector_attach_encoder(&output->connector, - &output->encoder); - drm_connector_register(&output->connector); + /* + * Wrap directly-connected panel into DRM bridge in order to let + * DRM core to handle panel for us. + */ + if (output->panel) { + output->bridge = devm_drm_panel_bridge_add(output->dev, + output->panel); + if (IS_ERR(output->bridge)) { + dev_err(output->dev, + "failed to wrap panel into bridge: %pe\n", + output->bridge); + return PTR_ERR(output->bridge); + } + + output->panel = NULL; + } + + /* + * Tegra devices that have LVDS panel utilize LVDS encoder bridge + * for converting up to 28 LCD LVTTL lanes into 5/4 LVDS lanes that + * go to display panel's receiver. + * + * Encoder usually have a power-down control which needs to be enabled + * in order to transmit data to the panel. Historically devices that + * use an older device-tree version didn't model the bridge, assuming + * that encoder is turned ON by default, while today's DRM allows us + * to model LVDS encoder properly. + * + * Newer device-trees utilize LVDS encoder bridge, which provides + * us with a connector and handles the display panel. + * + * For older device-trees we wrapped panel into the panel-bridge. + */ + if (output->bridge) { + err = drm_bridge_attach(&output->encoder, output->bridge, + NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (err) { + dev_err(output->dev, "failed to attach bridge: %d\n", + err); + return err; + } + + connector = drm_bridge_connector_init(drm, &output->encoder); + if (IS_ERR(connector)) { + dev_err(output->dev, + "failed to initialize bridge connector: %pe\n", + connector); + return PTR_ERR(connector); + } + + drm_connector_attach_encoder(connector, &output->encoder); + } err = tegra_output_init(drm, output); if (err < 0) { diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 45b5258c77a2..e88a17c2937f 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3728,7 +3728,12 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->aux) return -EPROBE_DEFER; - sor->output.ddc = &sor->aux->ddc; + if (get_device(&sor->aux->ddc.dev)) { + if (try_module_get(sor->aux->ddc.owner)) + sor->output.ddc = &sor->aux->ddc; + else + put_device(&sor->aux->ddc.dev); + } } if (!sor->aux) { diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 3c5744a91d4a..2218da3b3ca3 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -85,8 +85,10 @@ void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus) /* drm_crtc_helper_funcs */ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); struct dispc_device *dispc = tidss->dispc; @@ -97,10 +99,10 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, dev_dbg(ddev->dev, "%s\n", __func__); - if (!state->enable) + if (!crtc_state->enable) return 0; - mode = &state->adjusted_mode; + mode = &crtc_state->adjusted_mode; ok = dispc_vp_mode_valid(dispc, hw_videoport, mode); if (ok != MODE_OK) { @@ -109,7 +111,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, return -EINVAL; } - return dispc_vp_bus_check(dispc, hw_videoport, state); + return dispc_vp_bus_check(dispc, hw_videoport, crtc_state); } /* @@ -161,8 +163,10 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss, } static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); @@ -212,8 +216,10 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, } static void tidss_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); @@ -255,7 +261,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc, } static void tidss_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index 43e72d0b2d84..35067ae674ea 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -10,6 +10,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include "tidss_crtc.h" #include "tidss_dispc.h" @@ -150,6 +151,7 @@ static void drm_plane_destroy(struct drm_plane *plane) } static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = tidss_plane_atomic_check, .atomic_update = tidss_plane_atomic_update, .atomic_disable = tidss_plane_atomic_disable, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 518220bd092a..30213708fc99 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -147,12 +147,9 @@ static void tilcdc_crtc_enable_irqs(struct drm_device *dev) tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA | LCDC_V1_UNDERFLOW_INT_ENA); - tilcdc_set(dev, LCDC_DMA_CTRL_REG, - LCDC_V1_END_OF_FRAME_INT_ENA); } else { tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA | - LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_FRAME_DONE | LCDC_SYNC_LOST); } } @@ -484,7 +481,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) } static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { tilcdc_crtc_enable(crtc); } @@ -532,13 +529,13 @@ static void tilcdc_crtc_disable(struct drm_crtc *crtc) } static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { tilcdc_crtc_disable(crtc); } static void tilcdc_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { if (!crtc->state->event) return; @@ -660,15 +657,17 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc, } static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); /* If we are not active we don't care */ - if (!state->active) + if (!crtc_state->active) return 0; - if (state->state->planes[0].ptr != crtc->primary || - state->state->planes[0].state == NULL || - state->state->planes[0].state->crtc != crtc) { + if (state->planes[0].ptr != crtc->primary || + state->planes[0].state == NULL || + state->planes[0].state->crtc != crtc) { dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); return -EINVAL; } @@ -678,11 +677,44 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc) { + struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct tilcdc_drm_private *priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + + tilcdc_clear_irqstatus(dev, LCDC_END_OF_FRAME0); + + if (priv->rev == 1) + tilcdc_set(dev, LCDC_DMA_CTRL_REG, + LCDC_V1_END_OF_FRAME_INT_ENA); + else + tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, + LCDC_V2_END_OF_FRAME0_INT_ENA); + + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); + return 0; } static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc) { + struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct tilcdc_drm_private *priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); + + if (priv->rev == 1) + tilcdc_clear(dev, LCDC_DMA_CTRL_REG, + LCDC_V1_END_OF_FRAME_INT_ENA); + else + tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG, + LCDC_V2_END_OF_FRAME0_INT_ENA); + + spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); } static void tilcdc_crtc_reset(struct drm_crtc *crtc) @@ -724,20 +756,6 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = { .disable_vblank = tilcdc_crtc_disable_vblank, }; -int tilcdc_crtc_max_width(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct tilcdc_drm_private *priv = dev->dev_private; - int max_width = 0; - - if (priv->rev == 1) - max_width = 1024; - else if (priv->rev == 2) - max_width = 2048; - - return max_width; -} - static enum drm_mode_status tilcdc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) @@ -750,7 +768,7 @@ tilcdc_crtc_mode_valid(struct drm_crtc *crtc, * check to see if the width is within the range that * the LCD Controller physically supports */ - if (mode->hdisplay > tilcdc_crtc_max_width(crtc)) + if (mode->hdisplay > priv->max_width) return MODE_VIRTUAL_X; /* width must be multiple of 16 */ diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 4f5fc3e87383..3d7e4db756b7 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -105,7 +105,7 @@ static void modeset_init(struct drm_device *dev) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc); + dev->mode_config.max_width = priv->max_width; dev->mode_config.max_height = 2048; dev->mode_config.funcs = &mode_config_funcs; } @@ -218,22 +218,6 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) goto init_failed; } - if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth)) - priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH; - - DBG("Maximum Bandwidth Value %d", priv->max_bandwidth); - - if (of_property_read_u32(node, "max-width", &priv->max_width)) - priv->max_width = TILCDC_DEFAULT_MAX_WIDTH; - - DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width); - - if (of_property_read_u32(node, "max-pixelclock", - &priv->max_pixelclock)) - priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK; - - DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock); - pm_runtime_enable(dev); /* Determine LCD IP Version */ @@ -287,6 +271,26 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) } } + if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth)) + priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH; + + DBG("Maximum Bandwidth Value %d", priv->max_bandwidth); + + if (of_property_read_u32(node, "max-width", &priv->max_width)) { + if (priv->rev == 1) + priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V1; + else + priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V2; + } + + DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width); + + if (of_property_read_u32(node, "max-pixelclock", + &priv->max_pixelclock)) + priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK; + + DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock); + ret = tilcdc_crtc_create(ddev); if (ret < 0) { dev_err(dev, "failed to create crtc\n"); @@ -428,8 +432,8 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg) } static struct drm_info_list tilcdc_debugfs_list[] = { - { "regs", tilcdc_regs_show, 0 }, - { "mm", tilcdc_mm_show, 0 }, + { "regs", tilcdc_regs_show, 0, NULL }, + { "mm", tilcdc_mm_show, 0, NULL }, }; static void tilcdc_debugfs_init(struct drm_minor *minor) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index 18815e75ca4f..d29806ca8817 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -28,8 +28,10 @@ struct drm_plane; /* Defaulting to pixel clock defined on AM335x */ #define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000 -/* Defaulting to max width as defined on AM335x */ -#define TILCDC_DEFAULT_MAX_WIDTH 2048 +/* Maximum display width for LCDC V1 */ +#define TILCDC_DEFAULT_MAX_WIDTH_V1 1024 +/* ... and for LCDC V2 found on AM335x: */ +#define TILCDC_DEFAULT_MAX_WIDTH_V2 2048 /* * This may need some tweaking, but want to allow at least 1280x1024@60 * with optimized DDR & EMIF settings tweaked 1920x1080@24 appears to @@ -158,7 +160,6 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc, const struct tilcdc_panel_info *info); void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, bool simulate_vesa_sync); -int tilcdc_crtc_max_width(struct drm_crtc *crtc); void tilcdc_crtc_shutdown(struct drm_crtc *crtc); int tilcdc_crtc_update_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb, diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 90c0da88cc98..b6f5f87b270f 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,9 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \ - ttm_resource.o + ttm_execbuf_util.o ttm_range_manager.o \ + ttm_resource.o ttm_pool.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o -ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 09fe80e215c5..03c86628e4ac 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -34,7 +34,6 @@ #include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> #include <drm/ttm/ttm_placement.h> #include <linux/agp_backend.h> #include <linux/module.h> @@ -48,15 +47,18 @@ struct ttm_agp_backend { struct agp_bridge_data *bridge; }; -static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) +int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem; - int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); + int ret, cached = ttm->caching == ttm_cached; unsigned i; + if (agp_be->mem) + return 0; + mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); if (unlikely(mem == NULL)) return -ENOMEM; @@ -81,8 +83,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) return ret; } +EXPORT_SYMBOL(ttm_agp_bind); -static void ttm_agp_unbind(struct ttm_tt *ttm) +void ttm_agp_unbind(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); @@ -95,8 +98,20 @@ static void ttm_agp_unbind(struct ttm_tt *ttm) agp_be->mem = NULL; } } +EXPORT_SYMBOL(ttm_agp_unbind); + +bool ttm_agp_is_bound(struct ttm_tt *ttm) +{ + struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); + + if (!ttm) + return false; + + return (agp_be->mem != NULL); +} +EXPORT_SYMBOL(ttm_agp_is_bound); -static void ttm_agp_destroy(struct ttm_tt *ttm) +void ttm_agp_destroy(struct ttm_tt *ttm) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); @@ -105,12 +120,7 @@ static void ttm_agp_destroy(struct ttm_tt *ttm) ttm_tt_fini(ttm); kfree(agp_be); } - -static struct ttm_backend_func ttm_agp_func = { - .bind = ttm_agp_bind, - .unbind = ttm_agp_unbind, - .destroy = ttm_agp_destroy, -}; +EXPORT_SYMBOL(ttm_agp_destroy); struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, struct agp_bridge_data *bridge, @@ -124,9 +134,8 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, agp_be->mem = NULL; agp_be->bridge = bridge; - agp_be->ttm.func = &ttm_agp_func; - if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) { + if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined)) { kfree(agp_be); return NULL; } @@ -134,18 +143,3 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, return &agp_be->ttm; } EXPORT_SYMBOL(ttm_agp_tt_create); - -int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) -{ - if (ttm->state != tt_unpopulated) - return 0; - - return ttm_pool_populate(ttm, ctx); -} -EXPORT_SYMBOL(ttm_agp_tt_populate); - -void ttm_agp_tt_unpopulate(struct ttm_tt *ttm) -{ - ttm_pool_unpopulate(ttm); -} -EXPORT_SYMBOL(ttm_agp_tt_unpopulate); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e3931e515906..e2a124b3affb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -64,34 +64,18 @@ static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) kfree(bo); } -static inline int ttm_mem_type_from_place(const struct ttm_place *place, - uint32_t *mem_type) -{ - int pos; - - pos = ffs(place->flags & TTM_PL_MASK_MEM); - if (unlikely(!pos)) - return -EINVAL; - - *mem_type = pos - 1; - return 0; -} - static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct drm_printer p = drm_debug_printer(TTM_PFX); - int i, ret, mem_type; struct ttm_resource_manager *man; + int i, mem_type; drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { - ret = ttm_mem_type_from_place(&placement->placement[i], - &mem_type); - if (ret) - return; + mem_type = placement->placement[i].mem_type; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", i, placement->placement[i].flags, mem_type); man = ttm_manager_type(bo->bdev, mem_type); @@ -125,22 +109,13 @@ static struct kobj_type ttm_bo_glob_kobj_type = { .default_attrs = ttm_bo_global_attrs }; - -static inline uint32_t ttm_bo_type_flags(unsigned type) -{ - return 1 << (type); -} - static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, struct ttm_resource *mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man; - if (!list_empty(&bo->lru)) - return; - - if (mem->placement & TTM_PL_FLAG_NO_EVICT) + if (!list_empty(&bo->lru) || bo->pin_count) return; man = ttm_manager_type(bdev, mem->mem_type); @@ -187,7 +162,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, ttm_bo_del_from_lru(bo); ttm_bo_add_mem_to_lru(bo, &bo->mem); - if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { + if (bulk && !bo->pin_count) { switch (bo->mem.mem_type) { case TTM_PL_TT: ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); @@ -263,11 +238,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); int ret; - ret = ttm_mem_io_lock(old_man, true); - if (unlikely(ret != 0)) - goto out_err; - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(old_man); + ttm_bo_unmap_virtual(bo); /* * Create and bind a ttm if required. @@ -281,56 +252,24 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (ret) goto out_err; - ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); - if (ret) - goto out_err; - if (mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_bind(bo->ttm, mem, ctx); + ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); if (ret) goto out_err; } - - if (bo->mem.mem_type == TTM_PL_SYSTEM) { - if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, evict, mem); - bo->mem = *mem; - goto moved; - } } - if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, evict, mem); - - if (old_man->use_tt && new_man->use_tt) - ret = ttm_bo_move_ttm(bo, ctx, mem); - else if (bdev->driver->move) - ret = bdev->driver->move(bo, evict, ctx, mem); - else - ret = ttm_bo_move_memcpy(bo, ctx, mem); - - if (ret) { - if (bdev->driver->move_notify) { - swap(*mem, bo->mem); - bdev->driver->move_notify(bo, false, mem); - swap(*mem, bo->mem); - } - + ret = bdev->driver->move(bo, evict, ctx, mem); + if (ret) goto out_err; - } - -moved: - bo->evicted = false; ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; return 0; out_err: new_man = ttm_manager_type(bdev, bo->mem.mem_type); - if (!new_man->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } + if (!new_man->use_tt) + ttm_bo_tt_destroy(bo); return ret; } @@ -345,11 +284,10 @@ out_err: static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { - if (bo->bdev->driver->move_notify) - bo->bdev->driver->move_notify(bo, false, NULL); + if (bo->bdev->driver->delete_mem_notify) + bo->bdev->driver->delete_mem_notify(bo); - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; + ttm_bo_tt_destroy(bo); ttm_resource_free(bo, &bo->mem); } @@ -538,7 +476,6 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); size_t acc_size = bo->acc_size; int ret; @@ -556,9 +493,7 @@ static void ttm_bo_release(struct kref *kref) bo->bdev->driver->release_notify(bo); drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); - ttm_mem_io_lock(man, false); - ttm_mem_io_free_vm(bo); - ttm_mem_io_unlock(man); + ttm_mem_io_free(bdev, &bo->mem); } if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || @@ -570,12 +505,12 @@ static void ttm_bo_release(struct kref *kref) spin_lock(&ttm_bo_glob.lru_lock); /* - * Make NO_EVICT bos immediately available to + * Make pinned bos immediately available to * shrinkers, now that they are queued for * destruction. */ - if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { - bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; + if (bo->pin_count) { + bo->pin_count = 0; ttm_bo_del_from_lru(bo); ttm_bo_add_mem_to_lru(bo, &bo->mem); } @@ -648,8 +583,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.bus.io_reserved_vm = false; - evict_mem.bus.io_reserved_count = 0; + evict_mem.bus.offset = 0; + evict_mem.bus.addr = NULL; ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); if (ret) { @@ -666,9 +601,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); ttm_resource_free(bo, &evict_mem); - goto out; } - bo->evicted = true; out: return ret; } @@ -679,7 +612,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, /* Don't evict this BO if it's outside of the * requested placement range */ - if (place->fpfn >= (bo->mem.start + bo->mem.size) || + if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) || (place->lpfn && place->lpfn <= bo->mem.start)) return false; @@ -704,7 +637,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->base.resv == ctx->resv) { dma_resv_assert_held(bo->base.resv); - if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT) + if (ctx->allow_res_evict) ret = true; *locked = false; if (busy) @@ -892,56 +825,11 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } -static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, - uint32_t cur_placement, - uint32_t proposed_placement) -{ - uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; - uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; - - /** - * Keep current caching if possible. - */ - - if ((cur_placement & caching) != 0) - result |= (cur_placement & caching); - else if ((man->default_caching & caching) != 0) - result |= man->default_caching; - else if ((TTM_PL_FLAG_CACHED & caching) != 0) - result |= TTM_PL_FLAG_CACHED; - else if ((TTM_PL_FLAG_WC & caching) != 0) - result |= TTM_PL_FLAG_WC; - else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) - result |= TTM_PL_FLAG_UNCACHED; - - return result; -} - -static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, - uint32_t mem_type, - const struct ttm_place *place, - uint32_t *masked_placement) -{ - uint32_t cur_flags = ttm_bo_type_flags(mem_type); - - if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) - return false; - - if ((place->flags & man->available_caching) == 0) - return false; - - cur_flags |= (place->flags & man->available_caching); - - *masked_placement = cur_flags; - return true; -} - /** * ttm_bo_mem_placement - check if placement is compatible * @bo: BO to find memory for * @place: where to search * @mem: the memory object to fill in - * @ctx: operation context * * Check if placement is compatible and fill in mem structure. * Returns -EBUSY if placement won't work or negative error code. @@ -949,35 +837,17 @@ static bool ttm_bo_mt_compatible(struct ttm_resource_manager *man, */ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, const struct ttm_place *place, - struct ttm_resource *mem, - struct ttm_operation_ctx *ctx) + struct ttm_resource *mem) { struct ttm_bo_device *bdev = bo->bdev; - uint32_t mem_type = TTM_PL_SYSTEM; struct ttm_resource_manager *man; - uint32_t cur_flags = 0; - int ret; - ret = ttm_mem_type_from_place(place, &mem_type); - if (ret) - return ret; - - man = ttm_manager_type(bdev, mem_type); + man = ttm_manager_type(bdev, place->mem_type); if (!man || !ttm_resource_manager_used(man)) return -EBUSY; - if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) - return -EBUSY; - - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags); - /* - * Use the access and other non-mapping-related flag bits from - * the memory placement flags to the current flags - */ - ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); - - mem->mem_type = mem_type; - mem->placement = cur_flags; + mem->mem_type = place->mem_type; + mem->placement = place->flags; spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_del_from_lru(bo); @@ -1012,11 +882,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, const struct ttm_place *place = &placement->placement[i]; struct ttm_resource_manager *man; - ret = ttm_bo_mem_placement(bo, place, mem, ctx); - if (ret == -EBUSY) - continue; + ret = ttm_bo_mem_placement(bo, place, mem); if (ret) - goto error; + continue; type_found = true; ret = ttm_resource_alloc(bo, place, mem); @@ -1040,11 +908,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, for (i = 0; i < placement->num_busy_placement; ++i) { const struct ttm_place *place = &placement->busy_placement[i]; - ret = ttm_bo_mem_placement(bo, place, mem, ctx); - if (ret == -EBUSY) - continue; + ret = ttm_bo_mem_placement(bo, place, mem); if (ret) - goto error; + continue; type_found = true; ret = ttm_bo_mem_force_space(bo, place, mem, ctx); @@ -1082,8 +948,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - mem.bus.io_reserved_vm = false; - mem.bus.io_reserved_count = 0; + mem.bus.offset = 0; + mem.bus.addr = NULL; mem.mm_node = NULL; /* @@ -1114,8 +980,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, continue; *new_flags = heap->flags; - if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && - (*new_flags & mem->placement & TTM_PL_MASK_MEM) && + if ((mem->mem_type == heap->mem_type) && (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) return true; @@ -1169,13 +1034,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ret = ttm_bo_move_buffer(bo, placement, ctx); if (ret) return ret; - } else { - /* - * Use the access and other non-mapping-related flag bits from - * the compatible memory placement flags to the active flags - */ - ttm_flag_masked(&bo->mem.placement, new_flags, - ~TTM_PL_MASK_MEMTYPE); } /* * We might need to add a TTM. @@ -1232,7 +1090,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); - INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->type = type; bo->num_pages = num_pages; @@ -1241,11 +1098,12 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - bo->mem.bus.io_reserved_vm = false; - bo->mem.bus.io_reserved_count = 0; + bo->mem.bus.offset = 0; + bo->mem.bus.addr = NULL; bo->moving = NULL; - bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); + bo->mem.placement = 0; bo->acc_size = acc_size; + bo->pin_count = 0; bo->sg = sg; if (resv) { bo->base.resv = resv; @@ -1325,20 +1183,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_init); -size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, - unsigned long bo_size, - unsigned struct_size) -{ - unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; - size_t size = 0; - - size += ttm_round_pot(struct_size); - size += ttm_round_pot(npages * sizeof(void *)); - size += ttm_round_pot(sizeof(struct ttm_tt)); - return size; -} -EXPORT_SYMBOL(ttm_bo_acc_size); - size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, unsigned long bo_size, unsigned struct_size) @@ -1348,56 +1192,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, size += ttm_round_pot(struct_size); size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); - size += ttm_round_pot(sizeof(struct ttm_dma_tt)); + size += ttm_round_pot(sizeof(struct ttm_tt)); return size; } EXPORT_SYMBOL(ttm_bo_dma_acc_size); -int ttm_bo_create(struct ttm_bo_device *bdev, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interruptible, - struct ttm_buffer_object **p_bo) -{ - struct ttm_buffer_object *bo; - size_t acc_size; - int ret; - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (unlikely(bo == NULL)) - return -ENOMEM; - - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); - ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, - interruptible, acc_size, - NULL, NULL, NULL); - if (likely(ret == 0)) - *p_bo = bo; - - return ret; -} -EXPORT_SYMBOL(ttm_bo_create); - -int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) -{ - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); - - if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { - pr_err("Illegal memory manager memory type %u\n", mem_type); - return -EINVAL; - } - - if (!man) { - pr_err("Memory type %u has not been initialized\n", mem_type); - return 0; - } - - return ttm_resource_manager_force_list_clean(bdev, man); -} -EXPORT_SYMBOL(ttm_bo_evict_mm); - static void ttm_bo_global_kobj_release(struct kobject *kobj) { struct ttm_bo_global *glob = @@ -1484,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) pr_debug("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); + ttm_pool_fini(&bdev->pool); + if (!ret) ttm_bo_global_release(); @@ -1500,8 +1301,6 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) * Other types need to be driver / IOCTL initialized. */ man->use_tt = true; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; ttm_resource_manager_init(man, 0); ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man); @@ -1510,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, + struct device *dev, struct address_space *mapping, struct drm_vma_offset_manager *vma_manager, - bool need_dma32) + bool use_dma_alloc, bool use_dma32) { struct ttm_bo_global *glob = &ttm_bo_glob; int ret; @@ -1527,12 +1327,12 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->driver = driver; ttm_bo_init_sysman(bdev); + ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32); bdev->vma_manager = vma_manager; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = mapping; - bdev->need_dma32 = need_dma32; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&ttm_global_mutex); @@ -1545,25 +1345,13 @@ EXPORT_SYMBOL(ttm_bo_device_init); * buffer object vm functions. */ -void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - - drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); - ttm_mem_io_free_vm(bo); -} - void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, bo->mem.mem_type); - ttm_mem_io_lock(man, false); - ttm_bo_unmap_virtual_locked(bo); - ttm_mem_io_unlock(man); + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); + ttm_mem_io_free(bdev, &bo->mem); } - - EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, @@ -1595,8 +1383,9 @@ EXPORT_SYMBOL(ttm_bo_wait); * A buffer object shrink method that tries to swap out the first * buffer object on the bo_global::swap_lru list. */ -int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) +int ttm_bo_swapout(struct ttm_operation_ctx *ctx) { + struct ttm_bo_global *glob = &ttm_bo_glob; struct ttm_buffer_object *bo; int ret = -EBUSY; bool locked; @@ -1640,14 +1429,13 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) * Move to system cached */ - if (bo->mem.mem_type != TTM_PL_SYSTEM || - bo->ttm->caching_state != tt_cached) { + if (bo->mem.mem_type != TTM_PL_SYSTEM) { struct ttm_operation_ctx ctx = { false, false }; struct ttm_resource evict_mem; evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; + evict_mem.placement = 0; evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); @@ -1673,7 +1461,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) if (bo->bdev->driver->swap_notify) bo->bdev->driver->swap_notify(bo); - ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); + ret = ttm_tt_swapout(bo->bdev, bo->ttm); out: /** @@ -1688,13 +1476,12 @@ out: } EXPORT_SYMBOL(ttm_bo_swapout); -void ttm_bo_swapout_all(void) +void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) { - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; + if (bo->ttm == NULL) + return; - while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); + ttm_tt_destroy(bo->bdev, bo->ttm); + bo->ttm = NULL; } -EXPORT_SYMBOL(ttm_bo_swapout_all); + diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ee04716b2603..ecb54415d1ca 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -45,168 +45,41 @@ struct ttm_transfer_obj { struct ttm_buffer_object *bo; }; -void ttm_bo_free_old_node(struct ttm_buffer_object *bo) -{ - ttm_resource_free(bo, &bo->mem); -} - -int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_tt *ttm = bo->ttm; - struct ttm_resource *old_mem = &bo->mem; - int ret; - - if (old_mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); - - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - pr_err("Failed to expire sync object before unbinding TTM\n"); - return ret; - } - - ttm_tt_unbind(ttm); - ttm_bo_free_old_node(bo); - ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, - TTM_PL_MASK_MEM); - old_mem->mem_type = TTM_PL_SYSTEM; - } - - ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); - if (unlikely(ret != 0)) - return ret; - - if (new_mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_bind(ttm, new_mem, ctx); - if (unlikely(ret != 0)) - return ret; - } - - *old_mem = *new_mem; - new_mem->mm_node = NULL; - - return 0; -} -EXPORT_SYMBOL(ttm_bo_move_ttm); - -int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible) -{ - if (likely(!man->use_io_reserve_lru)) - return 0; - - if (interruptible) - return mutex_lock_interruptible(&man->io_reserve_mutex); - - mutex_lock(&man->io_reserve_mutex); - return 0; -} - -void ttm_mem_io_unlock(struct ttm_resource_manager *man) -{ - if (likely(!man->use_io_reserve_lru)) - return; - - mutex_unlock(&man->io_reserve_mutex); -} - -static int ttm_mem_io_evict(struct ttm_resource_manager *man) -{ - struct ttm_buffer_object *bo; - - bo = list_first_entry_or_null(&man->io_reserve_lru, - struct ttm_buffer_object, - io_reserve_lru); - if (!bo) - return -ENOSPC; - - list_del_init(&bo->io_reserve_lru); - ttm_bo_unmap_virtual_locked(bo); - return 0; -} - int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); - int ret; - - if (mem->bus.io_reserved_count++) + if (mem->bus.offset || mem->bus.addr) return 0; + mem->bus.is_iomem = false; if (!bdev->driver->io_mem_reserve) return 0; - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.base = 0; - mem->bus.is_iomem = false; -retry: - ret = bdev->driver->io_mem_reserve(bdev, mem); - if (ret == -ENOSPC) { - ret = ttm_mem_io_evict(man); - if (ret == 0) - goto retry; - } - return ret; + return bdev->driver->io_mem_reserve(bdev, mem); } void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - if (--mem->bus.io_reserved_count) - return; - - if (!bdev->driver->io_mem_free) + if (!mem->bus.offset && !mem->bus.addr) return; - bdev->driver->io_mem_free(bdev, mem); -} - -int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); - struct ttm_resource *mem = &bo->mem; - int ret; - - if (mem->bus.io_reserved_vm) - return 0; - - ret = ttm_mem_io_reserve(bo->bdev, mem); - if (unlikely(ret != 0)) - return ret; - mem->bus.io_reserved_vm = true; - if (man->use_io_reserve_lru) - list_add_tail(&bo->io_reserve_lru, - &man->io_reserve_lru); - return 0; -} - -void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource *mem = &bo->mem; - - if (!mem->bus.io_reserved_vm) - return; + if (bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); - mem->bus.io_reserved_vm = false; - list_del_init(&bo->io_reserve_lru); - ttm_mem_io_free(bo->bdev, mem); + mem->bus.offset = 0; + mem->bus.addr = NULL; } static int ttm_resource_ioremap(struct ttm_bo_device *bdev, struct ttm_resource *mem, void **virtual) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; *virtual = NULL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); - ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; @@ -215,16 +88,12 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, } else { size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; - if (mem->placement & TTM_PL_FLAG_WC) - addr = ioremap_wc(mem->bus.base + mem->bus.offset, - bus_size); + if (mem->bus.caching == ttm_write_combined) + addr = ioremap_wc(mem->bus.offset, bus_size); else - addr = ioremap(mem->bus.base + mem->bus.offset, - bus_size); + addr = ioremap(mem->bus.offset, bus_size); if (!addr) { - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -236,15 +105,9 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev, struct ttm_resource *mem, void *virtual) { - struct ttm_resource_manager *man; - - man = ttm_manager_type(bdev, mem->mem_type); - if (virtual && mem->bus.addr == NULL) iounmap(virtual); - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -317,11 +180,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, void *new_iomap; int ret; unsigned long i; - unsigned long page; - unsigned long add = 0; - int dir; - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) return ret; @@ -342,7 +202,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * Don't move nonexistent data. Clear destination instead. */ if (old_iomap == NULL && - (ttm == NULL || (ttm->state == tt_unpopulated && + (ttm == NULL || (!ttm_tt_is_populated(ttm) && !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; @@ -352,34 +212,22 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * TTM might be null for moves within the same region. */ if (ttm) { - ret = ttm_tt_populate(ttm, ctx); + ret = ttm_tt_populate(bdev, ttm, ctx); if (ret) goto out1; } - add = 0; - dir = 1; - - if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->start < old_mem->start + old_mem->size)) { - dir = -1; - add = new_mem->num_pages - 1; - } - for (i = 0; i < new_mem->num_pages; ++i) { - page = i * dir + add; if (old_iomap == NULL) { - pgprot_t prot = ttm_io_prot(old_mem->placement, - PAGE_KERNEL); - ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, + pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL); + ret = ttm_copy_ttm_io_page(ttm, new_iomap, i, prot); } else if (new_iomap == NULL) { - pgprot_t prot = ttm_io_prot(new_mem->placement, - PAGE_KERNEL); - ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, + pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL); + ret = ttm_copy_io_ttm_page(ttm, old_iomap, i, prot); } else { - ret = ttm_copy_io_page(new_iomap, old_iomap, page); + ret = ttm_copy_io_page(new_iomap, old_iomap, i); } if (ret) goto out1; @@ -387,13 +235,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, mb(); out2: old_copy = *old_mem; - *old_mem = *new_mem; - new_mem->mm_node = NULL; - if (!man->use_tt) { - ttm_tt_destroy(ttm); - bo->ttm = NULL; - } + ttm_bo_assign_mem(bo, new_mem); + + if (!man->use_tt) + ttm_bo_tt_destroy(bo); out1: ttm_resource_iounmap(bdev, old_mem, new_iomap); @@ -444,7 +290,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, return -ENOMEM; fbo->base = *bo; - fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT; ttm_bo_get(bo); fbo->bo = bo; @@ -458,13 +303,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); - INIT_LIST_HEAD(&fbo->base.io_reserve_lru); fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); kref_init(&fbo->base.kref); fbo->base.destroy = &ttm_transfered_destroy; fbo->base.acc_size = 0; + fbo->base.pin_count = 1; if (bo->type != ttm_bo_type_sg) fbo->base.base.resv = &fbo->base.base._resv; @@ -477,21 +322,28 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, return 0; } -pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) +pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, + pgprot_t tmp) { + struct ttm_resource_manager *man; + enum ttm_caching caching; + + man = ttm_manager_type(bo->bdev, res->mem_type); + caching = man->use_tt ? bo->ttm->caching : res->bus.caching; + /* Cached mappings need no adjustment */ - if (caching_flags & TTM_PL_FLAG_CACHED) + if (caching == ttm_cached) return tmp; #if defined(__i386__) || defined(__x86_64__) - if (caching_flags & TTM_PL_FLAG_WC) + if (caching == ttm_write_combined) tmp = pgprot_writecombine(tmp); else if (boot_cpu_data.x86 > 3) tmp = pgprot_noncached(tmp); #endif #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ defined(__powerpc__) || defined(__mips__) - if (caching_flags & TTM_PL_FLAG_WC) + if (caching == ttm_write_combined) tmp = pgprot_writecombine(tmp); else tmp = pgprot_noncached(tmp); @@ -515,13 +367,11 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); } else { map->bo_kmap_type = ttm_bo_map_iomap; - if (mem->placement & TTM_PL_FLAG_WC) - map->virtual = ioremap_wc(bo->mem.bus.base + - bo->mem.bus.offset + offset, + if (mem->bus.caching == ttm_write_combined) + map->virtual = ioremap_wc(bo->mem.bus.offset + offset, size); else - map->virtual = ioremap(bo->mem.bus.base + - bo->mem.bus.offset + offset, + map->virtual = ioremap(bo->mem.bus.offset + offset, size); } return (!map->virtual) ? -ENOMEM : 0; @@ -543,11 +393,11 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, BUG_ON(!ttm); - ret = ttm_tt_populate(ttm, &ctx); + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); if (ret) return ret; - if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { + if (num_pages == 1 && ttm->caching == ttm_cached) { /* * We're mapping a single page, and the desired * page protection is consistent with the bo. @@ -561,7 +411,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, * We need to use vmap to get the desired page protection * or to make the buffer object look contiguous. */ - prot = ttm_io_prot(mem->placement, PAGE_KERNEL); + prot = ttm_io_prot(bo, mem, PAGE_KERNEL); map->bo_kmap_type = ttm_bo_map_vmap; map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot); @@ -573,8 +423,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -585,9 +433,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (start_page > bo->num_pages) return -EINVAL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); - ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -602,10 +448,6 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { - struct ttm_buffer_object *bo = map->bo; - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->mem.mem_type); - if (!map->virtual) return; switch (map->bo_kmap_type) { @@ -623,167 +465,116 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(map->bo->bdev, &map->bo->mem); - ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, - bool evict, - struct ttm_resource *new_mem) +static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); - struct ttm_resource *old_mem = &bo->mem; int ret; - struct ttm_buffer_object *ghost_obj; - - dma_resv_add_excl_fence(bo->base.resv, fence); - if (evict) { - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; - - if (!man->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } else { - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); - - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; - - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ - - if (man->use_tt) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; - - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); - } - - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ret = ttm_bo_wait(bo, false, false); + if (ret) + return ret; + if (!dst_use_tt) + ttm_bo_tt_destroy(bo); + ttm_resource_free(bo, &bo->mem); return 0; } -EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); -int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_resource *new_mem) +static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource *old_mem = &bo->mem; - - struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); - struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); - + struct ttm_buffer_object *ghost_obj; int ret; - dma_resv_add_excl_fence(bo->base.resv, fence); - - if (!evict) { - struct ttm_buffer_object *ghost_obj; - - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + /** + * This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); + ret = ttm_buffer_object_transfer(bo, &ghost_obj); + if (ret) + return ret; - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - if (to->use_tt) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; + /** + * If we're not moving to fixed memory, the TTM object + * needs to stay alive. Otherwhise hang it on the ghost + * bo to be unbound and destroyed. + */ - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); + if (dst_use_tt) + ghost_obj->ttm = NULL; + else + bo->ttm = NULL; - } else if (!from->use_tt) { + dma_resv_unlock(&ghost_obj->base._resv); + ttm_bo_put(ghost_obj); + return 0; +} - /** - * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation - */ +static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, + struct dma_fence *fence) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); - } - spin_unlock(&from->move_lock); + /** + * BO doesn't have a TTM we need to bind/unbind. Just remember + * this eviction and free up the allocation + */ + spin_lock(&from->move_lock); + if (!from->move || dma_fence_is_later(fence, from->move)) { + dma_fence_put(from->move); + from->move = dma_fence_get(fence); + } + spin_unlock(&from->move_lock); - ttm_bo_free_old_node(bo); + ttm_resource_free(bo, &bo->mem); - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); +} - } else { - /** - * Last resort, wait for the move to be completed. - * - * Should never happen in pratice. - */ +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool evict, + bool pipeline, + struct ttm_resource *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + int ret = 0; - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; + dma_resv_add_excl_fence(bo->base.resv, fence); + if (!evict) + ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); + else if (!from->use_tt && pipeline) + ttm_bo_move_pipeline_evict(bo, fence); + else + ret = ttm_bo_wait_free_node(bo, man->use_tt); - if (!to->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } + if (ret) + return ret; - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ttm_bo_assign_mem(bo, new_mem); return 0; } -EXPORT_SYMBOL(ttm_bo_pipeline_move); +EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 01693e8f24b7..2944fa0af493 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -101,8 +101,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, if (bdev->driver->io_mem_pfn) return bdev->driver->io_mem_pfn(bo, page_offset); - return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) - + page_offset; + return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; } /** @@ -158,6 +157,15 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, return VM_FAULT_NOPAGE; } + /* + * Refuse to fault imported pages. This should be handled + * (if at all) by redirecting mmap to the exporter. + */ + if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { + dma_resv_unlock(bo->base.resv); + return VM_FAULT_SIGBUS; + } + return 0; } EXPORT_SYMBOL(ttm_bo_vm_reserve); @@ -281,37 +289,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t i; vm_fault_t ret = VM_FAULT_NOPAGE; unsigned long address = vmf->address; - struct ttm_resource_manager *man = - ttm_manager_type(bdev, bo->mem.mem_type); - - /* - * Refuse to fault imported pages. This should be handled - * (if at all) by redirecting mmap to the exporter. - */ - if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) - return VM_FAULT_SIGBUS; - - if (bdev->driver->fault_reserve_notify) { - struct dma_fence *moving = dma_fence_get(bo->moving); - - err = bdev->driver->fault_reserve_notify(bo); - switch (err) { - case 0: - break; - case -EBUSY: - case -ERESTARTSYS: - dma_fence_put(moving); - return VM_FAULT_NOPAGE; - default: - dma_fence_put(moving); - return VM_FAULT_SIGBUS; - } - - if (bo->moving != moving) { - ttm_bo_move_to_lru_tail_unlocked(bo); - } - dma_fence_put(moving); - } /* * Wait for buffer data in transit, due to a pipelined @@ -321,50 +298,38 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(ret != 0)) return ret; - err = ttm_mem_io_lock(man, true); + err = ttm_mem_io_reserve(bdev, &bo->mem); if (unlikely(err != 0)) - return VM_FAULT_NOPAGE; - err = ttm_mem_io_reserve_vm(bo); - if (unlikely(err != 0)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + return VM_FAULT_SIGBUS; page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); page_last = vma_pages(vma) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->num_pages)) { - ret = VM_FAULT_SIGBUS; - goto out_io_unlock; - } + if (unlikely(page_offset >= bo->num_pages)) + return VM_FAULT_SIGBUS; - prot = ttm_io_prot(bo->mem.placement, prot); + prot = ttm_io_prot(bo, &bo->mem, prot); if (!bo->mem.bus.is_iomem) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC - + .force_alloc = true }; ttm = bo->ttm; - if (ttm_tt_populate(bo->ttm, &ctx)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; - } + if (ttm_tt_populate(bdev, bo->ttm, &ctx)) + return VM_FAULT_OOM; } else { /* Iomem should not be marked encrypted */ prot = pgprot_decrypted(prot); } /* We don't prefault on huge faults. Yet. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { - ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, - fault_page_size, prot); - goto out_io_unlock; - } + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) + return ttm_bo_vm_insert_huge(vmf, bo, page_offset, + fault_page_size, prot); /* * Speculatively prefault a number of pages. Only error on @@ -376,8 +341,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } else { page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; + return VM_FAULT_OOM; } else if (unlikely(!page)) { break; } @@ -404,7 +368,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, /* Never error on prefaulted PTEs */ if (unlikely((ret & VM_FAULT_ERROR))) { if (i == 0) - goto out_io_unlock; + return VM_FAULT_NOPAGE; else break; } @@ -413,9 +377,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, if (unlikely(++page_offset >= page_last)) break; } - ret = VM_FAULT_NOPAGE; -out_io_unlock: - ttm_mem_io_unlock(man); return ret; } EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index acd63b70d814..5ed1fc8f2ace 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -30,7 +30,6 @@ #include <drm/ttm/ttm_memory.h> #include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> @@ -38,6 +37,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/swap.h> +#include <drm/ttm/ttm_pool.h> #define TTM_MEMORY_ALLOC_RETRIES 4 @@ -259,7 +259,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, return false; } -/** +/* * At this point we only support a single shrink callback. * Extend this if needed, perhaps using a linked list of callbacks. * Note that this function is reentrant: @@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, while (ttm_zones_above_swap_target(glob, from_wq, extra)) { spin_unlock(&glob->lock); - ret = ttm_bo_swapout(&ttm_bo_glob, ctx); + ret = ttm_bo_swapout(ctx); spin_lock(&glob->lock); if (unlikely(ret != 0)) break; @@ -451,8 +451,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) pr_info("Zone %7s: Available graphics memory: %llu KiB\n", zone->name, (unsigned long long)zone->max_mem >> 10); } - ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); - ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); + ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE)); return 0; out_no_zone: ttm_mem_global_release(glob); @@ -465,8 +464,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) unsigned int i; /* let the page allocator first stop the shrink work. */ - ttm_page_alloc_fini(); - ttm_dma_page_alloc_fini(); + ttm_pool_mgr_fini(); flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); @@ -544,7 +542,8 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob, { int64_t available; - if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC) + /* We allow over commit during suspend */ + if (ctx->force_alloc) return false; available = get_nr_swap_pages() + si_mem_available(); @@ -554,7 +553,6 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob, return false; } -EXPORT_SYMBOL(ttm_check_under_lowerlimit); static int ttm_mem_global_reserve(struct ttm_mem_global *glob, struct ttm_mem_zone *single_zone, @@ -682,9 +680,3 @@ size_t ttm_round_pot(size_t size) return 0; } EXPORT_SYMBOL(ttm_round_pot); - -uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob) -{ - return glob->zone_kernel->max_mem; -} -EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c deleted file mode 100644 index b40a4678c296..000000000000 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ /dev/null @@ -1,1189 +0,0 @@ -/* - * Copyright (c) Red Hat Inc. - - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sub license, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie <airlied@redhat.com> - * Jerome Glisse <jglisse@redhat.com> - * Pauli Nieminen <suokkos@gmail.com> - */ - -/* simple list based uncached page pool - * - Pool collects resently freed pages for reuse - * - Use page->lru to keep a free list - * - doesn't track currently in use pages - */ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/highmem.h> -#include <linux/mm_types.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/seq_file.h> /* for seq_printf */ -#include <linux/slab.h> -#include <linux/dma-mapping.h> - -#include <linux/atomic.h> - -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> -#include <drm/ttm/ttm_set_memory.h> - -#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) -#define SMALL_ALLOCATION 16 -#define FREE_ALL_PAGES (~0U) -/* times are in msecs */ -#define PAGE_FREE_INTERVAL 1000 - -/** - * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. - * - * @lock: Protects the shared pool from concurrnet access. Must be used with - * irqsave/irqrestore variants because pool allocator maybe called from - * delayed work. - * @fill_lock: Prevent concurrent calls to fill. - * @list: Pool of free uc/wc pages for fast reuse. - * @gfp_flags: Flags to pass for alloc_page. - * @npages: Number of pages in pool. - */ -struct ttm_page_pool { - spinlock_t lock; - bool fill_lock; - struct list_head list; - gfp_t gfp_flags; - unsigned npages; - char *name; - unsigned long nfrees; - unsigned long nrefills; - unsigned int order; -}; - -/** - * Limits for the pool. They are handled without locks because only place where - * they may change is in sysfs store. They won't have immediate effect anyway - * so forcing serialization to access them is pointless. - */ - -struct ttm_pool_opts { - unsigned alloc_size; - unsigned max_size; - unsigned small; -}; - -#define NUM_POOLS 6 - -/** - * struct ttm_pool_manager - Holds memory pools for fst allocation - * - * Manager is read only object for pool code so it doesn't need locking. - * - * @free_interval: minimum number of jiffies between freeing pages from pool. - * @page_alloc_inited: reference counting for pool allocation. - * @work: Work that is used to shrink the pool. Work is only run when there is - * some pages to free. - * @small_allocation: Limit in number of pages what is small allocation. - * - * @pools: All pool objects in use. - **/ -struct ttm_pool_manager { - struct kobject kobj; - struct shrinker mm_shrink; - struct ttm_pool_opts options; - - union { - struct ttm_page_pool pools[NUM_POOLS]; - struct { - struct ttm_page_pool wc_pool; - struct ttm_page_pool uc_pool; - struct ttm_page_pool wc_pool_dma32; - struct ttm_page_pool uc_pool_dma32; - struct ttm_page_pool wc_pool_huge; - struct ttm_page_pool uc_pool_huge; - } ; - }; -}; - -static struct attribute ttm_page_pool_max = { - .name = "pool_max_size", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_small = { - .name = "pool_small_allocation", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_alloc_size = { - .name = "pool_allocation_size", - .mode = S_IRUGO | S_IWUSR -}; - -static struct attribute *ttm_pool_attrs[] = { - &ttm_page_pool_max, - &ttm_page_pool_small, - &ttm_page_pool_alloc_size, - NULL -}; - -static void ttm_pool_kobj_release(struct kobject *kobj) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - kfree(m); -} - -static ssize_t ttm_pool_store(struct kobject *kobj, - struct attribute *attr, const char *buffer, size_t size) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - int chars; - unsigned val; - chars = sscanf(buffer, "%u", &val); - if (chars == 0) - return size; - - /* Convert kb to number of pages */ - val = val / (PAGE_SIZE >> 10); - - if (attr == &ttm_page_pool_max) - m->options.max_size = val; - else if (attr == &ttm_page_pool_small) - m->options.small = val; - else if (attr == &ttm_page_pool_alloc_size) { - if (val > NUM_PAGES_TO_ALLOC*8) { - pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - return size; - } else if (val > NUM_PAGES_TO_ALLOC) { - pr_warn("Setting allocation size to larger than %lu is not recommended\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - } - m->options.alloc_size = val; - } - - return size; -} - -static ssize_t ttm_pool_show(struct kobject *kobj, - struct attribute *attr, char *buffer) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - unsigned val = 0; - - if (attr == &ttm_page_pool_max) - val = m->options.max_size; - else if (attr == &ttm_page_pool_small) - val = m->options.small; - else if (attr == &ttm_page_pool_alloc_size) - val = m->options.alloc_size; - - val = val * (PAGE_SIZE >> 10); - - return snprintf(buffer, PAGE_SIZE, "%u\n", val); -} - -static const struct sysfs_ops ttm_pool_sysfs_ops = { - .show = &ttm_pool_show, - .store = &ttm_pool_store, -}; - -static struct kobj_type ttm_pool_kobj_type = { - .release = &ttm_pool_kobj_release, - .sysfs_ops = &ttm_pool_sysfs_ops, - .default_attrs = ttm_pool_attrs, -}; - -static struct ttm_pool_manager *_manager; - -/** - * Select the right pool or requested caching state and ttm flags. */ -static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, - enum ttm_caching_state cstate) -{ - int pool_index; - - if (cstate == tt_cached) - return NULL; - - if (cstate == tt_wc) - pool_index = 0x0; - else - pool_index = 0x1; - - if (flags & TTM_PAGE_FLAG_DMA32) { - if (huge) - return NULL; - pool_index |= 0x2; - - } else if (huge) { - pool_index |= 0x4; - } - - return &_manager->pools[pool_index]; -} - -/* set memory back to wb and free the pages. */ -static void ttm_pages_put(struct page *pages[], unsigned npages, - unsigned int order) -{ - unsigned int i, pages_nr = (1 << order); - - if (order == 0) { - if (ttm_set_pages_array_wb(pages, npages)) - pr_err("Failed to set %d pages to wb!\n", npages); - } - - for (i = 0; i < npages; ++i) { - if (order > 0) { - if (ttm_set_pages_wb(pages[i], pages_nr)) - pr_err("Failed to set %d pages to wb!\n", pages_nr); - } - __free_pages(pages[i], order); - } -} - -static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, - unsigned freed_pages) -{ - pool->npages -= freed_pages; - pool->nfrees += freed_pages; -} - -/** - * Free pages from pool. - * - * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC - * number of pages in one go. - * - * @pool: to free the pages from - * @free_all: If set to true will free all pages in pool - * @use_static: Safe to use static buffer - **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, - bool use_static) -{ - static struct page *static_buf[NUM_PAGES_TO_ALLOC]; - unsigned long irq_flags; - struct page *p; - struct page **pages_to_free; - unsigned freed_pages = 0, - npages_to_free = nr_free; - - if (NUM_PAGES_TO_ALLOC < nr_free) - npages_to_free = NUM_PAGES_TO_ALLOC; - - if (use_static) - pages_to_free = static_buf; - else - pages_to_free = kmalloc_array(npages_to_free, - sizeof(struct page *), - GFP_KERNEL); - if (!pages_to_free) { - pr_debug("Failed to allocate memory for pool free operation\n"); - return 0; - } - -restart: - spin_lock_irqsave(&pool->lock, irq_flags); - - list_for_each_entry_reverse(p, &pool->list, lru) { - if (freed_pages >= npages_to_free) - break; - - pages_to_free[freed_pages++] = p; - /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ - if (freed_pages >= NUM_PAGES_TO_ALLOC) { - /* remove range of pages from the pool */ - __list_del(p->lru.prev, &pool->list); - - ttm_pool_update_free_locked(pool, freed_pages); - /** - * Because changing page caching is costly - * we unlock the pool to prevent stalling. - */ - spin_unlock_irqrestore(&pool->lock, irq_flags); - - ttm_pages_put(pages_to_free, freed_pages, pool->order); - if (likely(nr_free != FREE_ALL_PAGES)) - nr_free -= freed_pages; - - if (NUM_PAGES_TO_ALLOC >= nr_free) - npages_to_free = nr_free; - else - npages_to_free = NUM_PAGES_TO_ALLOC; - - freed_pages = 0; - - /* free all so restart the processing */ - if (nr_free) - goto restart; - - /* Not allowed to fall through or break because - * following context is inside spinlock while we are - * outside here. - */ - goto out; - - } - } - - /* remove range of pages from the pool */ - if (freed_pages) { - __list_del(&p->lru, &pool->list); - - ttm_pool_update_free_locked(pool, freed_pages); - nr_free -= freed_pages; - } - - spin_unlock_irqrestore(&pool->lock, irq_flags); - - if (freed_pages) - ttm_pages_put(pages_to_free, freed_pages, pool->order); -out: - if (pages_to_free != static_buf) - kfree(pages_to_free); - return nr_free; -} - -/** - * Callback for mm to request pool to reduce number of page held. - * - * XXX: (dchinner) Deadlock warning! - * - * This code is crying out for a shrinker per pool.... - */ -static unsigned long -ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) -{ - static DEFINE_MUTEX(lock); - static unsigned start_pool; - unsigned i; - unsigned pool_offset; - struct ttm_page_pool *pool; - int shrink_pages = sc->nr_to_scan; - unsigned long freed = 0; - unsigned int nr_free_pool; - - if (!mutex_trylock(&lock)) - return SHRINK_STOP; - pool_offset = ++start_pool % NUM_POOLS; - /* select start pool in round robin fashion */ - for (i = 0; i < NUM_POOLS; ++i) { - unsigned nr_free = shrink_pages; - unsigned page_nr; - - if (shrink_pages == 0) - break; - - pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; - page_nr = (1 << pool->order); - /* OK to use static buffer since global mutex is held. */ - nr_free_pool = roundup(nr_free, page_nr) >> pool->order; - shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); - freed += (nr_free_pool - shrink_pages) << pool->order; - if (freed >= sc->nr_to_scan) - break; - shrink_pages <<= pool->order; - } - mutex_unlock(&lock); - return freed; -} - - -static unsigned long -ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -{ - unsigned i; - unsigned long count = 0; - struct ttm_page_pool *pool; - - for (i = 0; i < NUM_POOLS; ++i) { - pool = &_manager->pools[i]; - count += (pool->npages << pool->order); - } - - return count; -} - -static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) -{ - manager->mm_shrink.count_objects = ttm_pool_shrink_count; - manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; - manager->mm_shrink.seeks = 1; - return register_shrinker(&manager->mm_shrink); -} - -static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) -{ - unregister_shrinker(&manager->mm_shrink); -} - -static int ttm_set_pages_caching(struct page **pages, - enum ttm_caching_state cstate, unsigned cpages) -{ - int r = 0; - /* Set page caching */ - switch (cstate) { - case tt_uncached: - r = ttm_set_pages_array_uc(pages, cpages); - if (r) - pr_err("Failed to set %d pages to uc!\n", cpages); - break; - case tt_wc: - r = ttm_set_pages_array_wc(pages, cpages); - if (r) - pr_err("Failed to set %d pages to wc!\n", cpages); - break; - default: - break; - } - return r; -} - -/** - * Free pages the pages that failed to change the caching state. If there is - * any pages that have changed their caching state already put them to the - * pool. - */ -static void ttm_handle_caching_state_failure(struct list_head *pages, - int ttm_flags, enum ttm_caching_state cstate, - struct page **failed_pages, unsigned cpages) -{ - unsigned i; - /* Failed pages have to be freed */ - for (i = 0; i < cpages; ++i) { - list_del(&failed_pages[i]->lru); - __free_page(failed_pages[i]); - } -} - -/** - * Allocate new pages with correct caching. - * - * This function is reentrant if caller updates count depending on number of - * pages returned in pages array. - */ -static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, - int ttm_flags, enum ttm_caching_state cstate, - unsigned count, unsigned order) -{ - struct page **caching_array; - struct page *p; - int r = 0; - unsigned i, j, cpages; - unsigned npages = 1 << order; - unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); - - /* allocate array for page caching change */ - caching_array = kmalloc_array(max_cpages, sizeof(struct page *), - GFP_KERNEL); - - if (!caching_array) { - pr_debug("Unable to allocate table for new pages\n"); - return -ENOMEM; - } - - for (i = 0, cpages = 0; i < count; ++i) { - p = alloc_pages(gfp_flags, order); - - if (!p) { - pr_debug("Unable to get page %u\n", i); - - /* store already allocated pages in the pool after - * setting the caching state */ - if (cpages) { - r = ttm_set_pages_caching(caching_array, - cstate, cpages); - if (r) - ttm_handle_caching_state_failure(pages, - ttm_flags, cstate, - caching_array, cpages); - } - r = -ENOMEM; - goto out; - } - - list_add(&p->lru, pages); - -#ifdef CONFIG_HIGHMEM - /* gfp flags of highmem page should never be dma32 so we - * we should be fine in such case - */ - if (PageHighMem(p)) - continue; - -#endif - for (j = 0; j < npages; ++j) { - caching_array[cpages++] = p++; - if (cpages == max_cpages) { - - r = ttm_set_pages_caching(caching_array, - cstate, cpages); - if (r) { - ttm_handle_caching_state_failure(pages, - ttm_flags, cstate, - caching_array, cpages); - goto out; - } - cpages = 0; - } - } - } - - if (cpages) { - r = ttm_set_pages_caching(caching_array, cstate, cpages); - if (r) - ttm_handle_caching_state_failure(pages, - ttm_flags, cstate, - caching_array, cpages); - } -out: - kfree(caching_array); - - return r; -} - -/** - * Fill the given pool if there aren't enough pages and the requested number of - * pages is small. - */ -static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, - enum ttm_caching_state cstate, - unsigned count, unsigned long *irq_flags) -{ - struct page *p; - int r; - unsigned cpages = 0; - /** - * Only allow one pool fill operation at a time. - * If pool doesn't have enough pages for the allocation new pages are - * allocated from outside of pool. - */ - if (pool->fill_lock) - return; - - pool->fill_lock = true; - - /* If allocation request is small and there are not enough - * pages in a pool we fill the pool up first. */ - if (count < _manager->options.small - && count > pool->npages) { - struct list_head new_pages; - unsigned alloc_size = _manager->options.alloc_size; - - /** - * Can't change page caching if in irqsave context. We have to - * drop the pool->lock. - */ - spin_unlock_irqrestore(&pool->lock, *irq_flags); - - INIT_LIST_HEAD(&new_pages); - r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, - cstate, alloc_size, 0); - spin_lock_irqsave(&pool->lock, *irq_flags); - - if (!r) { - list_splice(&new_pages, &pool->list); - ++pool->nrefills; - pool->npages += alloc_size; - } else { - pr_debug("Failed to fill pool (%p)\n", pool); - /* If we have any pages left put them to the pool. */ - list_for_each_entry(p, &new_pages, lru) { - ++cpages; - } - list_splice(&new_pages, &pool->list); - pool->npages += cpages; - } - - } - pool->fill_lock = false; -} - -/** - * Allocate pages from the pool and put them on the return list. - * - * @return zero for success or negative error code. - */ -static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, - struct list_head *pages, - int ttm_flags, - enum ttm_caching_state cstate, - unsigned count, unsigned order) -{ - unsigned long irq_flags; - struct list_head *p; - unsigned i; - int r = 0; - - spin_lock_irqsave(&pool->lock, irq_flags); - if (!order) - ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, - &irq_flags); - - if (count >= pool->npages) { - /* take all pages from the pool */ - list_splice_init(&pool->list, pages); - count -= pool->npages; - pool->npages = 0; - goto out; - } - /* find the last pages to include for requested number of pages. Split - * pool to begin and halve it to reduce search space. */ - if (count <= pool->npages/2) { - i = 0; - list_for_each(p, &pool->list) { - if (++i == count) - break; - } - } else { - i = pool->npages + 1; - list_for_each_prev(p, &pool->list) { - if (--i == count) - break; - } - } - /* Cut 'count' number of pages from the pool */ - list_cut_position(pages, &pool->list, p); - pool->npages -= count; - count = 0; -out: - spin_unlock_irqrestore(&pool->lock, irq_flags); - - /* clear the pages coming from the pool if requested */ - if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { - struct page *page; - - list_for_each_entry(page, pages, lru) { - if (PageHighMem(page)) - clear_highpage(page); - else - clear_page(page_address(page)); - } - } - - /* If pool didn't have enough pages allocate new one. */ - if (count) { - gfp_t gfp_flags = pool->gfp_flags; - - /* set zero flag for page allocation if required */ - if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - - if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_flags |= __GFP_RETRY_MAYFAIL; - - /* ttm_alloc_new_pages doesn't reference pool so we can run - * multiple requests in parallel. - **/ - r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, - count, order); - } - - return r; -} - -/* Put all pages in pages list to correct pool to wait for reuse */ -static void ttm_put_pages(struct page **pages, unsigned npages, int flags, - enum ttm_caching_state cstate) -{ - struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); -#endif - unsigned long irq_flags; - unsigned i; - - if (pool == NULL) { - /* No pool for this memory type so free the pages */ - i = 0; - while (i < npages) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct page *p = pages[i]; -#endif - unsigned order = 0, j; - - if (!pages[i]) { - ++i; - continue; - } - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!(flags & TTM_PAGE_FLAG_DMA32) && - (npages - i) >= HPAGE_PMD_NR) { - for (j = 1; j < HPAGE_PMD_NR; ++j) - if (++p != pages[i + j]) - break; - - if (j == HPAGE_PMD_NR) - order = HPAGE_PMD_ORDER; - } -#endif - - if (page_count(pages[i]) != 1) - pr_err("Erroneous page count. Leaking pages.\n"); - __free_pages(pages[i], order); - - j = 1 << order; - while (j) { - pages[i++] = NULL; - --j; - } - } - return; - } - - i = 0; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (huge) { - unsigned max_size, n2free; - - spin_lock_irqsave(&huge->lock, irq_flags); - while ((npages - i) >= HPAGE_PMD_NR) { - struct page *p = pages[i]; - unsigned j; - - if (!p) - break; - - for (j = 1; j < HPAGE_PMD_NR; ++j) - if (++p != pages[i + j]) - break; - - if (j != HPAGE_PMD_NR) - break; - - list_add_tail(&pages[i]->lru, &huge->list); - - for (j = 0; j < HPAGE_PMD_NR; ++j) - pages[i++] = NULL; - huge->npages++; - } - - /* Check that we don't go over the pool limit */ - max_size = _manager->options.max_size; - max_size /= HPAGE_PMD_NR; - if (huge->npages > max_size) - n2free = huge->npages - max_size; - else - n2free = 0; - spin_unlock_irqrestore(&huge->lock, irq_flags); - if (n2free) - ttm_page_pool_free(huge, n2free, false); - } -#endif - - spin_lock_irqsave(&pool->lock, irq_flags); - while (i < npages) { - if (pages[i]) { - if (page_count(pages[i]) != 1) - pr_err("Erroneous page count. Leaking pages.\n"); - list_add_tail(&pages[i]->lru, &pool->list); - pages[i] = NULL; - pool->npages++; - } - ++i; - } - /* Check that we don't go over the pool limit */ - npages = 0; - if (pool->npages > _manager->options.max_size) { - npages = pool->npages - _manager->options.max_size; - /* free at least NUM_PAGES_TO_ALLOC number of pages - * to reduce calls to set_memory_wb */ - if (npages < NUM_PAGES_TO_ALLOC) - npages = NUM_PAGES_TO_ALLOC; - } - spin_unlock_irqrestore(&pool->lock, irq_flags); - if (npages) - ttm_page_pool_free(pool, npages, false); -} - -/* - * On success pages list will hold count number of correctly - * cached pages. - */ -static int ttm_get_pages(struct page **pages, unsigned npages, int flags, - enum ttm_caching_state cstate) -{ - struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); -#endif - struct list_head plist; - struct page *p = NULL; - unsigned count, first; - int r; - - /* No pool for cached pages */ - if (pool == NULL) { - gfp_t gfp_flags = GFP_USER; - unsigned i; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - unsigned j; -#endif - - /* set zero flag for page allocation if required */ - if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - - if (flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_flags |= __GFP_RETRY_MAYFAIL; - - if (flags & TTM_PAGE_FLAG_DMA32) - gfp_flags |= GFP_DMA32; - else - gfp_flags |= GFP_HIGHUSER; - - i = 0; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!(gfp_flags & GFP_DMA32)) { - while (npages >= HPAGE_PMD_NR) { - gfp_t huge_flags = gfp_flags; - - huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM; - huge_flags &= ~__GFP_MOVABLE; - huge_flags &= ~__GFP_COMP; - p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); - if (!p) - break; - - for (j = 0; j < HPAGE_PMD_NR; ++j) - pages[i++] = p++; - - npages -= HPAGE_PMD_NR; - } - } -#endif - - first = i; - while (npages) { - p = alloc_page(gfp_flags); - if (!p) { - pr_debug("Unable to allocate page\n"); - return -ENOMEM; - } - - /* Swap the pages if we detect consecutive order */ - if (i > first && pages[i - 1] == p - 1) - swap(p, pages[i - 1]); - - pages[i++] = p; - --npages; - } - return 0; - } - - count = 0; - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (huge && npages >= HPAGE_PMD_NR) { - INIT_LIST_HEAD(&plist); - ttm_page_pool_get_pages(huge, &plist, flags, cstate, - npages / HPAGE_PMD_NR, - HPAGE_PMD_ORDER); - - list_for_each_entry(p, &plist, lru) { - unsigned j; - - for (j = 0; j < HPAGE_PMD_NR; ++j) - pages[count++] = &p[j]; - } - } -#endif - - INIT_LIST_HEAD(&plist); - r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, - npages - count, 0); - - first = count; - list_for_each_entry(p, &plist, lru) { - struct page *tmp = p; - - /* Swap the pages if we detect consecutive order */ - if (count > first && pages[count - 1] == tmp - 1) - swap(tmp, pages[count - 1]); - pages[count++] = tmp; - } - - if (r) { - /* If there is any pages in the list put them back to - * the pool. - */ - pr_debug("Failed to allocate extra pages for large request\n"); - ttm_put_pages(pages, count, flags, cstate); - return r; - } - - return 0; -} - -static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, - char *name, unsigned int order) -{ - spin_lock_init(&pool->lock); - pool->fill_lock = false; - INIT_LIST_HEAD(&pool->list); - pool->npages = pool->nfrees = 0; - pool->gfp_flags = flags; - pool->name = name; - pool->order = order; -} - -int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) -{ - int ret; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - unsigned order = HPAGE_PMD_ORDER; -#else - unsigned order = 0; -#endif - - WARN_ON(_manager); - - pr_info("Initializing pool allocator\n"); - - _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); - if (!_manager) - return -ENOMEM; - - ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); - - ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0); - - ttm_page_pool_init_locked(&_manager->wc_pool_dma32, - GFP_USER | GFP_DMA32, "wc dma", 0); - - ttm_page_pool_init_locked(&_manager->uc_pool_dma32, - GFP_USER | GFP_DMA32, "uc dma", 0); - - ttm_page_pool_init_locked(&_manager->wc_pool_huge, - (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM) & - ~(__GFP_MOVABLE | __GFP_COMP), - "wc huge", order); - - ttm_page_pool_init_locked(&_manager->uc_pool_huge, - (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM) & - ~(__GFP_MOVABLE | __GFP_COMP) - , "uc huge", order); - - _manager->options.max_size = max_pages; - _manager->options.small = SMALL_ALLOCATION; - _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; - - ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, - &glob->kobj, "pool"); - if (unlikely(ret != 0)) - goto error; - - ret = ttm_pool_mm_shrink_init(_manager); - if (unlikely(ret != 0)) - goto error; - return 0; - -error: - kobject_put(&_manager->kobj); - _manager = NULL; - return ret; -} - -void ttm_page_alloc_fini(void) -{ - int i; - - pr_info("Finalizing pool allocator\n"); - ttm_pool_mm_shrink_fini(_manager); - - /* OK to use static buffer since global mutex is no longer used. */ - for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); - - kobject_put(&_manager->kobj); - _manager = NULL; -} - -static void -ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - unsigned i; - - if (mem_count_update == 0) - goto put_pages; - - for (i = 0; i < mem_count_update; ++i) { - if (!ttm->pages[i]) - continue; - - ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE); - } - -put_pages: - ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching_state); - ttm->state = tt_unpopulated; -} - -int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - unsigned i; - int ret; - - if (ttm->state != tt_unpopulated) - return 0; - - if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx)) - return -ENOMEM; - - ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching_state); - if (unlikely(ret != 0)) { - ttm_pool_unpopulate_helper(ttm, 0); - return ret; - } - - for (i = 0; i < ttm->num_pages; ++i) { - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - PAGE_SIZE, ctx); - if (unlikely(ret != 0)) { - ttm_pool_unpopulate_helper(ttm, i); - return -ENOMEM; - } - } - - if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { - ret = ttm_tt_swapin(ttm); - if (unlikely(ret != 0)) { - ttm_pool_unpopulate(ttm); - return ret; - } - } - - ttm->state = tt_unbound; - return 0; -} -EXPORT_SYMBOL(ttm_pool_populate); - -void ttm_pool_unpopulate(struct ttm_tt *ttm) -{ - ttm_pool_unpopulate_helper(ttm, ttm->num_pages); -} -EXPORT_SYMBOL(ttm_pool_unpopulate); - -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, - struct ttm_operation_ctx *ctx) -{ - unsigned i, j; - int r; - - r = ttm_pool_populate(&tt->ttm, ctx); - if (r) - return r; - - for (i = 0; i < tt->ttm.num_pages; ++i) { - struct page *p = tt->ttm.pages[i]; - size_t num_pages = 1; - - for (j = i + 1; j < tt->ttm.num_pages; ++j) { - if (++p != tt->ttm.pages[j]) - break; - - ++num_pages; - } - - tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], - 0, num_pages * PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, tt->dma_address[i])) { - while (i--) { - dma_unmap_page(dev, tt->dma_address[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); - tt->dma_address[i] = 0; - } - ttm_pool_unpopulate(&tt->ttm); - return -EFAULT; - } - - for (j = 1; j < num_pages; ++j) { - tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; - ++i; - } - } - return 0; -} -EXPORT_SYMBOL(ttm_populate_and_map_pages); - -void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) -{ - unsigned i, j; - - for (i = 0; i < tt->ttm.num_pages;) { - struct page *p = tt->ttm.pages[i]; - size_t num_pages = 1; - - if (!tt->dma_address[i] || !tt->ttm.pages[i]) { - ++i; - continue; - } - - for (j = i + 1; j < tt->ttm.num_pages; ++j) { - if (++p != tt->ttm.pages[j]) - break; - - ++num_pages; - } - - dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, - DMA_BIDIRECTIONAL); - - i += num_pages; - } - ttm_pool_unpopulate(&tt->ttm); -} -EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); - -int ttm_page_alloc_debugfs(struct seq_file *m, void *data) -{ - struct ttm_page_pool *p; - unsigned i; - char *h[] = {"pool", "refills", "pages freed", "size"}; - if (!_manager) { - seq_printf(m, "No pool allocator running.\n"); - return 0; - } - seq_printf(m, "%7s %12s %13s %8s\n", - h[0], h[1], h[2], h[3]); - for (i = 0; i < NUM_POOLS; ++i) { - p = &_manager->pools[i]; - - seq_printf(m, "%7s %12ld %13ld %8d\n", - p->name, p->nrefills, - p->nfrees, p->npages); - } - return 0; -} -EXPORT_SYMBOL(ttm_page_alloc_debugfs); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c deleted file mode 100644 index faefaaef7909..000000000000 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ /dev/null @@ -1,1239 +0,0 @@ -/* - * Copyright 2011 (c) Oracle Corp. - - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sub license, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> - */ - -/* - * A simple DMA pool losely based on dmapool.c. It has certain advantages - * over the DMA pools: - * - Pool collects resently freed pages for reuse (and hooks up to - * the shrinker). - * - Tracks currently in use pages - * - Tracks whether the page is UC, WB or cached (and reverts to WB - * when freed). - */ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/dma-mapping.h> -#include <linux/list.h> -#include <linux/seq_file.h> /* for seq_printf */ -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/highmem.h> -#include <linux/mm_types.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/atomic.h> -#include <linux/device.h> -#include <linux/kthread.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> -#include <drm/ttm/ttm_set_memory.h> - -#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) -#define SMALL_ALLOCATION 4 -#define FREE_ALL_PAGES (~0U) -#define VADDR_FLAG_HUGE_POOL 1UL -#define VADDR_FLAG_UPDATED_COUNT 2UL - -enum pool_type { - IS_UNDEFINED = 0, - IS_WC = 1 << 1, - IS_UC = 1 << 2, - IS_CACHED = 1 << 3, - IS_DMA32 = 1 << 4, - IS_HUGE = 1 << 5 -}; - -/* - * The pool structure. There are up to nine pools: - * - generic (not restricted to DMA32): - * - write combined, uncached, cached. - * - dma32 (up to 2^32 - so up 4GB): - * - write combined, uncached, cached. - * - huge (not restricted to DMA32): - * - write combined, uncached, cached. - * for each 'struct device'. The 'cached' is for pages that are actively used. - * The other ones can be shrunk by the shrinker API if neccessary. - * @pools: The 'struct device->dma_pools' link. - * @type: Type of the pool - * @lock: Protects the free_list from concurrnet access. Must be - * used with irqsave/irqrestore variants because pool allocator maybe called - * from delayed work. - * @free_list: Pool of pages that are free to be used. No order requirements. - * @dev: The device that is associated with these pools. - * @size: Size used during DMA allocation. - * @npages_free: Count of available pages for re-use. - * @npages_in_use: Count of pages that are in use. - * @nfrees: Stats when pool is shrinking. - * @nrefills: Stats when the pool is grown. - * @gfp_flags: Flags to pass for alloc_page. - * @name: Name of the pool. - * @dev_name: Name derieved from dev - similar to how dev_info works. - * Used during shutdown as the dev_info during release is unavailable. - */ -struct dma_pool { - struct list_head pools; /* The 'struct device->dma_pools link */ - enum pool_type type; - spinlock_t lock; - struct list_head free_list; - struct device *dev; - unsigned size; - unsigned npages_free; - unsigned npages_in_use; - unsigned long nfrees; /* Stats when shrunk. */ - unsigned long nrefills; /* Stats when grown. */ - gfp_t gfp_flags; - char name[13]; /* "cached dma32" */ - char dev_name[64]; /* Constructed from dev */ -}; - -/* - * The accounting page keeping track of the allocated page along with - * the DMA address. - * @page_list: The link to the 'page_list' in 'struct dma_pool'. - * @vaddr: The virtual address of the page and a flag if the page belongs to a - * huge pool - * @dma: The bus address of the page. If the page is not allocated - * via the DMA API, it will be -1. - */ -struct dma_page { - struct list_head page_list; - unsigned long vaddr; - struct page *p; - dma_addr_t dma; -}; - -/* - * Limits for the pool. They are handled without locks because only place where - * they may change is in sysfs store. They won't have immediate effect anyway - * so forcing serialization to access them is pointless. - */ - -struct ttm_pool_opts { - unsigned alloc_size; - unsigned max_size; - unsigned small; -}; - -/* - * Contains the list of all of the 'struct device' and their corresponding - * DMA pools. Guarded by _mutex->lock. - * @pools: The link to 'struct ttm_pool_manager->pools' - * @dev: The 'struct device' associated with the 'pool' - * @pool: The 'struct dma_pool' associated with the 'dev' - */ -struct device_pools { - struct list_head pools; - struct device *dev; - struct dma_pool *pool; -}; - -/* - * struct ttm_pool_manager - Holds memory pools for fast allocation - * - * @lock: Lock used when adding/removing from pools - * @pools: List of 'struct device' and 'struct dma_pool' tuples. - * @options: Limits for the pool. - * @npools: Total amount of pools in existence. - * @shrinker: The structure used by [un|]register_shrinker - */ -struct ttm_pool_manager { - struct mutex lock; - struct list_head pools; - struct ttm_pool_opts options; - unsigned npools; - struct shrinker mm_shrink; - struct kobject kobj; -}; - -static struct ttm_pool_manager *_manager; - -static struct attribute ttm_page_pool_max = { - .name = "pool_max_size", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_small = { - .name = "pool_small_allocation", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_page_pool_alloc_size = { - .name = "pool_allocation_size", - .mode = S_IRUGO | S_IWUSR -}; - -static struct attribute *ttm_pool_attrs[] = { - &ttm_page_pool_max, - &ttm_page_pool_small, - &ttm_page_pool_alloc_size, - NULL -}; - -static void ttm_pool_kobj_release(struct kobject *kobj) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - kfree(m); -} - -static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t size) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - int chars; - unsigned val; - - chars = sscanf(buffer, "%u", &val); - if (chars == 0) - return size; - - /* Convert kb to number of pages */ - val = val / (PAGE_SIZE >> 10); - - if (attr == &ttm_page_pool_max) { - m->options.max_size = val; - } else if (attr == &ttm_page_pool_small) { - m->options.small = val; - } else if (attr == &ttm_page_pool_alloc_size) { - if (val > NUM_PAGES_TO_ALLOC*8) { - pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - return size; - } else if (val > NUM_PAGES_TO_ALLOC) { - pr_warn("Setting allocation size to larger than %lu is not recommended\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); - } - m->options.alloc_size = val; - } - - return size; -} - -static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, - char *buffer) -{ - struct ttm_pool_manager *m = - container_of(kobj, struct ttm_pool_manager, kobj); - unsigned val = 0; - - if (attr == &ttm_page_pool_max) - val = m->options.max_size; - else if (attr == &ttm_page_pool_small) - val = m->options.small; - else if (attr == &ttm_page_pool_alloc_size) - val = m->options.alloc_size; - - val = val * (PAGE_SIZE >> 10); - - return snprintf(buffer, PAGE_SIZE, "%u\n", val); -} - -static const struct sysfs_ops ttm_pool_sysfs_ops = { - .show = &ttm_pool_show, - .store = &ttm_pool_store, -}; - -static struct kobj_type ttm_pool_kobj_type = { - .release = &ttm_pool_kobj_release, - .sysfs_ops = &ttm_pool_sysfs_ops, - .default_attrs = ttm_pool_attrs, -}; - -static int ttm_set_pages_caching(struct dma_pool *pool, - struct page **pages, unsigned cpages) -{ - int r = 0; - /* Set page caching */ - if (pool->type & IS_UC) { - r = ttm_set_pages_array_uc(pages, cpages); - if (r) - pr_err("%s: Failed to set %d pages to uc!\n", - pool->dev_name, cpages); - } - if (pool->type & IS_WC) { - r = ttm_set_pages_array_wc(pages, cpages); - if (r) - pr_err("%s: Failed to set %d pages to wc!\n", - pool->dev_name, cpages); - } - return r; -} - -static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) -{ - unsigned long attrs = 0; - dma_addr_t dma = d_page->dma; - d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; - if (pool->type & IS_HUGE) - attrs = DMA_ATTR_NO_WARN; - - dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); - - kfree(d_page); - d_page = NULL; -} -static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) -{ - struct dma_page *d_page; - unsigned long attrs = 0; - void *vaddr; - - d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL); - if (!d_page) - return NULL; - - if (pool->type & IS_HUGE) - attrs = DMA_ATTR_NO_WARN; - - vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma, - pool->gfp_flags, attrs); - if (vaddr) { - if (is_vmalloc_addr(vaddr)) - d_page->p = vmalloc_to_page(vaddr); - else - d_page->p = virt_to_page(vaddr); - d_page->vaddr = (unsigned long)vaddr; - if (pool->type & IS_HUGE) - d_page->vaddr |= VADDR_FLAG_HUGE_POOL; - } else { - kfree(d_page); - d_page = NULL; - } - return d_page; -} -static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) -{ - enum pool_type type = IS_UNDEFINED; - - if (flags & TTM_PAGE_FLAG_DMA32) - type |= IS_DMA32; - if (cstate == tt_cached) - type |= IS_CACHED; - else if (cstate == tt_uncached) - type |= IS_UC; - else - type |= IS_WC; - - return type; -} - -static void ttm_pool_update_free_locked(struct dma_pool *pool, - unsigned freed_pages) -{ - pool->npages_free -= freed_pages; - pool->nfrees += freed_pages; - -} - -/* set memory back to wb and free the pages. */ -static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) -{ - struct page *page = d_page->p; - unsigned num_pages; - - /* Don't set WB on WB page pool. */ - if (!(pool->type & IS_CACHED)) { - num_pages = pool->size / PAGE_SIZE; - if (ttm_set_pages_wb(page, num_pages)) - pr_err("%s: Failed to set %d pages to wb!\n", - pool->dev_name, num_pages); - } - - list_del(&d_page->page_list); - __ttm_dma_free_page(pool, d_page); -} - -static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, - struct page *pages[], unsigned npages) -{ - struct dma_page *d_page, *tmp; - - if (pool->type & IS_HUGE) { - list_for_each_entry_safe(d_page, tmp, d_pages, page_list) - ttm_dma_page_put(pool, d_page); - - return; - } - - /* Don't set WB on WB page pool. */ - if (npages && !(pool->type & IS_CACHED) && - ttm_set_pages_array_wb(pages, npages)) - pr_err("%s: Failed to set %d pages to wb!\n", - pool->dev_name, npages); - - list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { - list_del(&d_page->page_list); - __ttm_dma_free_page(pool, d_page); - } -} - -/* - * Free pages from pool. - * - * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC - * number of pages in one go. - * - * @pool: to free the pages from - * @nr_free: If set to true will free all pages in pool - * @use_static: Safe to use static buffer - **/ -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, - bool use_static) -{ - static struct page *static_buf[NUM_PAGES_TO_ALLOC]; - unsigned long irq_flags; - struct dma_page *dma_p, *tmp; - struct page **pages_to_free; - struct list_head d_pages; - unsigned freed_pages = 0, - npages_to_free = nr_free; - - if (NUM_PAGES_TO_ALLOC < nr_free) - npages_to_free = NUM_PAGES_TO_ALLOC; - - if (use_static) - pages_to_free = static_buf; - else - pages_to_free = kmalloc_array(npages_to_free, - sizeof(struct page *), - GFP_KERNEL); - - if (!pages_to_free) { - pr_debug("%s: Failed to allocate memory for pool free operation\n", - pool->dev_name); - return 0; - } - INIT_LIST_HEAD(&d_pages); -restart: - spin_lock_irqsave(&pool->lock, irq_flags); - - /* We picking the oldest ones off the list */ - list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, - page_list) { - if (freed_pages >= npages_to_free) - break; - - /* Move the dma_page from one list to another. */ - list_move(&dma_p->page_list, &d_pages); - - pages_to_free[freed_pages++] = dma_p->p; - /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ - if (freed_pages >= NUM_PAGES_TO_ALLOC) { - - ttm_pool_update_free_locked(pool, freed_pages); - /** - * Because changing page caching is costly - * we unlock the pool to prevent stalling. - */ - spin_unlock_irqrestore(&pool->lock, irq_flags); - - ttm_dma_pages_put(pool, &d_pages, pages_to_free, - freed_pages); - - INIT_LIST_HEAD(&d_pages); - - if (likely(nr_free != FREE_ALL_PAGES)) - nr_free -= freed_pages; - - if (NUM_PAGES_TO_ALLOC >= nr_free) - npages_to_free = nr_free; - else - npages_to_free = NUM_PAGES_TO_ALLOC; - - freed_pages = 0; - - /* free all so restart the processing */ - if (nr_free) - goto restart; - - /* Not allowed to fall through or break because - * following context is inside spinlock while we are - * outside here. - */ - goto out; - - } - } - - /* remove range of pages from the pool */ - if (freed_pages) { - ttm_pool_update_free_locked(pool, freed_pages); - nr_free -= freed_pages; - } - - spin_unlock_irqrestore(&pool->lock, irq_flags); - - if (freed_pages) - ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); -out: - if (pages_to_free != static_buf) - kfree(pages_to_free); - return nr_free; -} - -static void ttm_dma_free_pool(struct device *dev, enum pool_type type) -{ - struct device_pools *p; - struct dma_pool *pool; - - if (!dev) - return; - - mutex_lock(&_manager->lock); - list_for_each_entry_reverse(p, &_manager->pools, pools) { - if (p->dev != dev) - continue; - pool = p->pool; - if (pool->type != type) - continue; - - list_del(&p->pools); - kfree(p); - _manager->npools--; - break; - } - list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { - if (pool->type != type) - continue; - /* Takes a spinlock.. */ - /* OK to use static buffer since global mutex is held. */ - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); - WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); - /* This code path is called after _all_ references to the - * struct device has been dropped - so nobody should be - * touching it. In case somebody is trying to _add_ we are - * guarded by the mutex. */ - list_del(&pool->pools); - kfree(pool); - break; - } - mutex_unlock(&_manager->lock); -} - -/* - * On free-ing of the 'struct device' this deconstructor is run. - * Albeit the pool might have already been freed earlier. - */ -static void ttm_dma_pool_release(struct device *dev, void *res) -{ - struct dma_pool *pool = *(struct dma_pool **)res; - - if (pool) - ttm_dma_free_pool(dev, pool->type); -} - -static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data) -{ - return *(struct dma_pool **)res == match_data; -} - -static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, - enum pool_type type) -{ - const char *n[] = {"wc", "uc", "cached", " dma32", "huge"}; - enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE}; - struct device_pools *sec_pool = NULL; - struct dma_pool *pool = NULL, **ptr; - unsigned i; - int ret = -ENODEV; - char *p; - - if (!dev) - return NULL; - - ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - ret = -ENOMEM; - - pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, - dev_to_node(dev)); - if (!pool) - goto err_mem; - - sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL, - dev_to_node(dev)); - if (!sec_pool) - goto err_mem; - - INIT_LIST_HEAD(&sec_pool->pools); - sec_pool->dev = dev; - sec_pool->pool = pool; - - INIT_LIST_HEAD(&pool->free_list); - INIT_LIST_HEAD(&pool->pools); - spin_lock_init(&pool->lock); - pool->dev = dev; - pool->npages_free = pool->npages_in_use = 0; - pool->nfrees = 0; - pool->gfp_flags = flags; - if (type & IS_HUGE) -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - pool->size = HPAGE_PMD_SIZE; -#else - BUG(); -#endif - else - pool->size = PAGE_SIZE; - pool->type = type; - pool->nrefills = 0; - p = pool->name; - for (i = 0; i < ARRAY_SIZE(t); i++) { - if (type & t[i]) { - p += scnprintf(p, sizeof(pool->name) - (p - pool->name), - "%s", n[i]); - } - } - *p = 0; - /* We copy the name for pr_ calls b/c when dma_pool_destroy is called - * - the kobj->name has already been deallocated.*/ - snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", - dev_driver_string(dev), dev_name(dev)); - mutex_lock(&_manager->lock); - /* You can get the dma_pool from either the global: */ - list_add(&sec_pool->pools, &_manager->pools); - _manager->npools++; - /* or from 'struct device': */ - list_add(&pool->pools, &dev->dma_pools); - mutex_unlock(&_manager->lock); - - *ptr = pool; - devres_add(dev, ptr); - - return pool; -err_mem: - devres_free(ptr); - kfree(sec_pool); - kfree(pool); - return ERR_PTR(ret); -} - -static struct dma_pool *ttm_dma_find_pool(struct device *dev, - enum pool_type type) -{ - struct dma_pool *pool, *tmp; - - if (type == IS_UNDEFINED) - return NULL; - - /* NB: We iterate on the 'struct dev' which has no spinlock, but - * it does have a kref which we have taken. The kref is taken during - * graphic driver loading - in the drm_pci_init it calls either - * pci_dev_get or pci_register_driver which both end up taking a kref - * on 'struct device'. - * - * On teardown, the graphic drivers end up quiescing the TTM (put_pages) - * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice - * thing is at that point of time there are no pages associated with the - * driver so this function will not be called. - */ - list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) - if (pool->type == type) - return pool; - return NULL; -} - -/* - * Free pages the pages that failed to change the caching state. If there - * are pages that have changed their caching state already put them to the - * pool. - */ -static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, - struct list_head *d_pages, - struct page **failed_pages, - unsigned cpages) -{ - struct dma_page *d_page, *tmp; - struct page *p; - unsigned i = 0; - - p = failed_pages[0]; - if (!p) - return; - /* Find the failed page. */ - list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { - if (d_page->p != p) - continue; - /* .. and then progress over the full list. */ - list_del(&d_page->page_list); - __ttm_dma_free_page(pool, d_page); - if (++i < cpages) - p = failed_pages[i]; - else - break; - } - -} - -/* - * Allocate 'count' pages, and put 'need' number of them on the - * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset. - * The full list of pages should also be on 'd_pages'. - * We return zero for success, and negative numbers as errors. - */ -static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, - struct list_head *d_pages, - unsigned count) -{ - struct page **caching_array; - struct dma_page *dma_p; - struct page *p; - int r = 0; - unsigned i, j, npages, cpages; - unsigned max_cpages = min(count, - (unsigned)(PAGE_SIZE/sizeof(struct page *))); - - /* allocate array for page caching change */ - caching_array = kmalloc_array(max_cpages, sizeof(struct page *), - GFP_KERNEL); - - if (!caching_array) { - pr_debug("%s: Unable to allocate table for new pages\n", - pool->dev_name); - return -ENOMEM; - } - - if (count > 1) - pr_debug("%s: (%s:%d) Getting %d pages\n", - pool->dev_name, pool->name, current->pid, count); - - for (i = 0, cpages = 0; i < count; ++i) { - dma_p = __ttm_dma_alloc_page(pool); - if (!dma_p) { - pr_debug("%s: Unable to get page %u\n", - pool->dev_name, i); - - /* store already allocated pages in the pool after - * setting the caching state */ - if (cpages) { - r = ttm_set_pages_caching(pool, caching_array, - cpages); - if (r) - ttm_dma_handle_caching_state_failure( - pool, d_pages, caching_array, - cpages); - } - r = -ENOMEM; - goto out; - } - p = dma_p->p; - list_add(&dma_p->page_list, d_pages); - -#ifdef CONFIG_HIGHMEM - /* gfp flags of highmem page should never be dma32 so we - * we should be fine in such case - */ - if (PageHighMem(p)) - continue; -#endif - - npages = pool->size / PAGE_SIZE; - for (j = 0; j < npages; ++j) { - caching_array[cpages++] = p + j; - if (cpages == max_cpages) { - /* Note: Cannot hold the spinlock */ - r = ttm_set_pages_caching(pool, caching_array, - cpages); - if (r) { - ttm_dma_handle_caching_state_failure( - pool, d_pages, caching_array, - cpages); - goto out; - } - cpages = 0; - } - } - } - - if (cpages) { - r = ttm_set_pages_caching(pool, caching_array, cpages); - if (r) - ttm_dma_handle_caching_state_failure(pool, d_pages, - caching_array, cpages); - } -out: - kfree(caching_array); - return r; -} - -/* - * @return count of pages still required to fulfill the request. - */ -static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, - unsigned long *irq_flags) -{ - unsigned count = _manager->options.small; - int r = pool->npages_free; - - if (count > pool->npages_free) { - struct list_head d_pages; - - INIT_LIST_HEAD(&d_pages); - - spin_unlock_irqrestore(&pool->lock, *irq_flags); - - /* Returns how many more are neccessary to fulfill the - * request. */ - r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); - - spin_lock_irqsave(&pool->lock, *irq_flags); - if (!r) { - /* Add the fresh to the end.. */ - list_splice(&d_pages, &pool->free_list); - ++pool->nrefills; - pool->npages_free += count; - r = count; - } else { - struct dma_page *d_page; - unsigned cpages = 0; - - pr_debug("%s: Failed to fill %s pool (r:%d)!\n", - pool->dev_name, pool->name, r); - - list_for_each_entry(d_page, &d_pages, page_list) { - cpages++; - } - list_splice_tail(&d_pages, &pool->free_list); - pool->npages_free += cpages; - r = cpages; - } - } - return r; -} - -/* - * The populate list is actually a stack (not that is matters as TTM - * allocates one page at a time. - * return dma_page pointer if success, otherwise NULL. - */ -static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, - struct ttm_dma_tt *ttm_dma, - unsigned index) -{ - struct dma_page *d_page = NULL; - struct ttm_tt *ttm = &ttm_dma->ttm; - unsigned long irq_flags; - int count; - - spin_lock_irqsave(&pool->lock, irq_flags); - count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); - if (count) { - d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); - ttm->pages[index] = d_page->p; - ttm_dma->dma_address[index] = d_page->dma; - list_move_tail(&d_page->page_list, &ttm_dma->pages_list); - pool->npages_in_use += 1; - pool->npages_free -= 1; - } - spin_unlock_irqrestore(&pool->lock, irq_flags); - return d_page; -} - -static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) -{ - struct ttm_tt *ttm = &ttm_dma->ttm; - gfp_t gfp_flags; - - if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) - gfp_flags = GFP_USER | GFP_DMA32; - else - gfp_flags = GFP_HIGHUSER; - if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - gfp_flags |= __GFP_ZERO; - - if (huge) { - gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | - __GFP_KSWAPD_RECLAIM; - gfp_flags &= ~__GFP_MOVABLE; - gfp_flags &= ~__GFP_COMP; - } - - if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY) - gfp_flags |= __GFP_RETRY_MAYFAIL; - - return gfp_flags; -} - -/* - * On success pages list will hold count number of correctly - * cached pages. On failure will hold the negative return value (-ENOMEM, etc). - */ -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, - struct ttm_operation_ctx *ctx) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - struct ttm_tt *ttm = &ttm_dma->ttm; - unsigned long num_pages = ttm->num_pages; - struct dma_pool *pool; - struct dma_page *d_page; - enum pool_type type; - unsigned i; - int ret; - - if (ttm->state != tt_unpopulated) - return 0; - - if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) - return -ENOMEM; - - INIT_LIST_HEAD(&ttm_dma->pages_list); - i = 0; - - type = ttm_to_type(ttm->page_flags, ttm->caching_state); - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) - goto skip_huge; - - pool = ttm_dma_find_pool(dev, type | IS_HUGE); - if (!pool) { - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true); - - pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); - if (IS_ERR_OR_NULL(pool)) - goto skip_huge; - } - - while (num_pages >= HPAGE_PMD_NR) { - unsigned j; - - d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); - if (!d_page) - break; - - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - pool->size, ctx); - if (unlikely(ret != 0)) { - ttm_dma_unpopulate(ttm_dma, dev); - return -ENOMEM; - } - - d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; - for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { - ttm->pages[j] = ttm->pages[j - 1] + 1; - ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + - PAGE_SIZE; - } - - i += HPAGE_PMD_NR; - num_pages -= HPAGE_PMD_NR; - } - -skip_huge: -#endif - - pool = ttm_dma_find_pool(dev, type); - if (!pool) { - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false); - - pool = ttm_dma_pool_init(dev, gfp_flags, type); - if (IS_ERR_OR_NULL(pool)) - return -ENOMEM; - } - - while (num_pages) { - d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); - if (!d_page) { - ttm_dma_unpopulate(ttm_dma, dev); - return -ENOMEM; - } - - ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - pool->size, ctx); - if (unlikely(ret != 0)) { - ttm_dma_unpopulate(ttm_dma, dev); - return -ENOMEM; - } - - d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; - ++i; - --num_pages; - } - - if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { - ret = ttm_tt_swapin(ttm); - if (unlikely(ret != 0)) { - ttm_dma_unpopulate(ttm_dma, dev); - return ret; - } - } - - ttm->state = tt_unbound; - return 0; -} -EXPORT_SYMBOL_GPL(ttm_dma_populate); - -/* Put all pages in pages list to correct pool to wait for reuse */ -void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) -{ - struct ttm_mem_global *mem_glob = &ttm_mem_glob; - struct ttm_tt *ttm = &ttm_dma->ttm; - struct dma_pool *pool; - struct dma_page *d_page, *next; - enum pool_type type; - bool is_cached = false; - unsigned count, i, npages = 0; - unsigned long irq_flags; - - type = ttm_to_type(ttm->page_flags, ttm->caching_state); - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - pool = ttm_dma_find_pool(dev, type | IS_HUGE); - if (pool) { - count = 0; - list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, - page_list) { - if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) - continue; - - count++; - if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { - ttm_mem_global_free_page(mem_glob, d_page->p, - pool->size); - d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; - } - ttm_dma_page_put(pool, d_page); - } - - spin_lock_irqsave(&pool->lock, irq_flags); - pool->npages_in_use -= count; - pool->nfrees += count; - spin_unlock_irqrestore(&pool->lock, irq_flags); - } -#endif - - pool = ttm_dma_find_pool(dev, type); - if (!pool) - return; - - is_cached = (ttm_dma_find_pool(pool->dev, - ttm_to_type(ttm->page_flags, tt_cached)) == pool); - - /* make sure pages array match list and count number of pages */ - count = 0; - list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, - page_list) { - ttm->pages[count] = d_page->p; - count++; - - if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) { - ttm_mem_global_free_page(mem_glob, d_page->p, - pool->size); - d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT; - } - - if (is_cached) - ttm_dma_page_put(pool, d_page); - } - - spin_lock_irqsave(&pool->lock, irq_flags); - pool->npages_in_use -= count; - if (is_cached) { - pool->nfrees += count; - } else { - pool->npages_free += count; - list_splice(&ttm_dma->pages_list, &pool->free_list); - /* - * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages - * to free in order to minimize calls to set_memory_wb(). - */ - if (pool->npages_free >= (_manager->options.max_size + - NUM_PAGES_TO_ALLOC)) - npages = pool->npages_free - _manager->options.max_size; - } - spin_unlock_irqrestore(&pool->lock, irq_flags); - - INIT_LIST_HEAD(&ttm_dma->pages_list); - for (i = 0; i < ttm->num_pages; i++) { - ttm->pages[i] = NULL; - ttm_dma->dma_address[i] = 0; - } - - /* shrink pool if necessary (only on !is_cached pools)*/ - if (npages) - ttm_dma_page_pool_free(pool, npages, false); - ttm->state = tt_unpopulated; -} -EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); - -/** - * Callback for mm to request pool to reduce number of page held. - * - * XXX: (dchinner) Deadlock warning! - * - * I'm getting sadder as I hear more pathetical whimpers about needing per-pool - * shrinkers - */ -static unsigned long -ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) -{ - static unsigned start_pool; - unsigned idx = 0; - unsigned pool_offset; - unsigned shrink_pages = sc->nr_to_scan; - struct device_pools *p; - unsigned long freed = 0; - - if (list_empty(&_manager->pools)) - return SHRINK_STOP; - - if (!mutex_trylock(&_manager->lock)) - return SHRINK_STOP; - if (!_manager->npools) - goto out; - pool_offset = ++start_pool % _manager->npools; - list_for_each_entry(p, &_manager->pools, pools) { - unsigned nr_free; - - if (!p->dev) - continue; - if (shrink_pages == 0) - break; - /* Do it in round-robin fashion. */ - if (++idx < pool_offset) - continue; - nr_free = shrink_pages; - /* OK to use static buffer since global mutex is held. */ - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); - freed += nr_free - shrink_pages; - - pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", - p->pool->dev_name, p->pool->name, current->pid, - nr_free, shrink_pages); - } -out: - mutex_unlock(&_manager->lock); - return freed; -} - -static unsigned long -ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -{ - struct device_pools *p; - unsigned long count = 0; - - if (!mutex_trylock(&_manager->lock)) - return 0; - list_for_each_entry(p, &_manager->pools, pools) - count += p->pool->npages_free; - mutex_unlock(&_manager->lock); - return count; -} - -static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) -{ - manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; - manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; - manager->mm_shrink.seeks = 1; - return register_shrinker(&manager->mm_shrink); -} - -static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) -{ - unregister_shrinker(&manager->mm_shrink); -} - -int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) -{ - int ret; - - WARN_ON(_manager); - - pr_info("Initializing DMA pool allocator\n"); - - _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); - if (!_manager) - return -ENOMEM; - - mutex_init(&_manager->lock); - INIT_LIST_HEAD(&_manager->pools); - - _manager->options.max_size = max_pages; - _manager->options.small = SMALL_ALLOCATION; - _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; - - /* This takes care of auto-freeing the _manager */ - ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, - &glob->kobj, "dma_pool"); - if (unlikely(ret != 0)) - goto error; - - ret = ttm_dma_pool_mm_shrink_init(_manager); - if (unlikely(ret != 0)) - goto error; - return 0; - -error: - kobject_put(&_manager->kobj); - _manager = NULL; - return ret; -} - -void ttm_dma_page_alloc_fini(void) -{ - struct device_pools *p, *t; - - pr_info("Finalizing DMA pool allocator\n"); - ttm_dma_pool_mm_shrink_fini(_manager); - - list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { - dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, - current->pid); - WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, - ttm_dma_pool_match, p->pool)); - ttm_dma_free_pool(p->dev, p->pool->type); - } - kobject_put(&_manager->kobj); - _manager = NULL; -} - -int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) -{ - struct device_pools *p; - struct dma_pool *pool = NULL; - - if (!_manager) { - seq_printf(m, "No pool allocator running.\n"); - return 0; - } - seq_printf(m, " pool refills pages freed inuse available name\n"); - mutex_lock(&_manager->lock); - list_for_each_entry(p, &_manager->pools, pools) { - struct device *dev = p->dev; - if (!dev) - continue; - pool = p->pool; - seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n", - pool->name, pool->nrefills, - pool->nfrees, pool->npages_in_use, - pool->npages_free, - pool->dev_name); - } - mutex_unlock(&_manager->lock); - return 0; -} -EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c new file mode 100644 index 000000000000..44ec41aa78d6 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +/* Pooling of allocated pages is necessary because changing the caching + * attributes on x86 of the linear mapping requires a costly cross CPU TLB + * invalidate for those addresses. + * + * Additional to that allocations from the DMA coherent API are pooled as well + * cause they are rather slow compared to alloc_pages+map. + */ + +#include <linux/module.h> +#include <linux/dma-mapping.h> + +#ifdef CONFIG_X86 +#include <asm/set_memory.h> +#endif + +#include <drm/ttm/ttm_pool.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_tt.h> + +/** + * struct ttm_pool_dma - Helper object for coherent DMA mappings + * + * @addr: original DMA address returned for the mapping + * @vaddr: original vaddr return for the mapping and order in the lower bits + */ +struct ttm_pool_dma { + dma_addr_t addr; + unsigned long vaddr; +}; + +static unsigned long page_pool_size; + +MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); +module_param(page_pool_size, ulong, 0644); + +static atomic_long_t allocated_pages; + +static struct ttm_pool_type global_write_combined[MAX_ORDER]; +static struct ttm_pool_type global_uncached[MAX_ORDER]; + +static spinlock_t shrinker_lock; +static struct list_head shrinker_list; +static struct shrinker mm_shrinker; + +/* Allocate pages of size 1 << order with the given gfp_flags */ +static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, + unsigned int order) +{ + unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; + struct ttm_pool_dma *dma; + struct page *p; + void *vaddr; + + if (order) { + gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | + __GFP_KSWAPD_RECLAIM; + gfp_flags &= ~__GFP_MOVABLE; + gfp_flags &= ~__GFP_COMP; + } + + if (!pool->use_dma_alloc) { + p = alloc_pages(gfp_flags, order); + if (p) + p->private = order; + return p; + } + + dma = kmalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) + return NULL; + + if (order) + attr |= DMA_ATTR_NO_WARN; + + vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, + &dma->addr, gfp_flags, attr); + if (!vaddr) + goto error_free; + + /* TODO: This is an illegal abuse of the DMA API, but we need to rework + * TTM page fault handling and extend the DMA API to clean this up. + */ + if (is_vmalloc_addr(vaddr)) + p = vmalloc_to_page(vaddr); + else + p = virt_to_page(vaddr); + + dma->vaddr = (unsigned long)vaddr | order; + p->private = (unsigned long)dma; + return p; + +error_free: + kfree(dma); + return NULL; +} + +/* Reset the caching and pages of size 1 << order */ +static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, + unsigned int order, struct page *p) +{ + unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; + struct ttm_pool_dma *dma; + void *vaddr; + +#ifdef CONFIG_X86 + /* We don't care that set_pages_wb is inefficient here. This is only + * used when we have to shrink and CPU overhead is irrelevant then. + */ + if (caching != ttm_cached && !PageHighMem(p)) + set_pages_wb(p, 1 << order); +#endif + + if (!pool->use_dma_alloc) { + __free_pages(p, order); + return; + } + + if (order) + attr |= DMA_ATTR_NO_WARN; + + dma = (void *)p->private; + vaddr = (void *)(dma->vaddr & PAGE_MASK); + dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, + attr); + kfree(dma); +} + +/* Apply a new caching to an array of pages */ +static int ttm_pool_apply_caching(struct page **first, struct page **last, + enum ttm_caching caching) +{ +#ifdef CONFIG_X86 + unsigned int num_pages = last - first; + + if (!num_pages) + return 0; + + switch (caching) { + case ttm_cached: + break; + case ttm_write_combined: + return set_pages_array_wc(first, num_pages); + case ttm_uncached: + return set_pages_array_uc(first, num_pages); + } +#endif + return 0; +} + +/* Map pages of 1 << order size and fill the DMA address array */ +static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, + struct page *p, dma_addr_t **dma_addr) +{ + dma_addr_t addr; + unsigned int i; + + if (pool->use_dma_alloc) { + struct ttm_pool_dma *dma = (void *)p->private; + + addr = dma->addr; + } else { + size_t size = (1ULL << order) * PAGE_SIZE; + + addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(pool->dev, **dma_addr)) + return -EFAULT; + } + + for (i = 1 << order; i ; --i) { + *(*dma_addr)++ = addr; + addr += PAGE_SIZE; + } + + return 0; +} + +/* Unmap pages of 1 << order size */ +static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, + unsigned int num_pages) +{ + /* Unmapped while freeing the page */ + if (pool->use_dma_alloc) + return; + + dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, + DMA_BIDIRECTIONAL); +} + +/* Give pages into a specific pool_type */ +static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) +{ + spin_lock(&pt->lock); + list_add(&p->lru, &pt->pages); + spin_unlock(&pt->lock); + atomic_long_add(1 << pt->order, &allocated_pages); +} + +/* Take pages from a specific pool_type, return NULL when nothing available */ +static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) +{ + struct page *p; + + spin_lock(&pt->lock); + p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); + if (p) { + atomic_long_sub(1 << pt->order, &allocated_pages); + list_del(&p->lru); + } + spin_unlock(&pt->lock); + + return p; +} + +/* Count the number of pages available in a pool_type */ +static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) +{ + unsigned int count = 0; + struct page *p; + + spin_lock(&pt->lock); + /* Only used for debugfs, the overhead doesn't matter */ + list_for_each_entry(p, &pt->pages, lru) + ++count; + spin_unlock(&pt->lock); + + return count; +} + +/* Initialize and add a pool type to the global shrinker list */ +static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, + enum ttm_caching caching, unsigned int order) +{ + pt->pool = pool; + pt->caching = caching; + pt->order = order; + spin_lock_init(&pt->lock); + INIT_LIST_HEAD(&pt->pages); + + spin_lock(&shrinker_lock); + list_add_tail(&pt->shrinker_list, &shrinker_list); + spin_unlock(&shrinker_lock); +} + +/* Remove a pool_type from the global shrinker list and free all pages */ +static void ttm_pool_type_fini(struct ttm_pool_type *pt) +{ + struct page *p, *tmp; + + spin_lock(&shrinker_lock); + list_del(&pt->shrinker_list); + spin_unlock(&shrinker_lock); + + list_for_each_entry_safe(p, tmp, &pt->pages, lru) + ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); +} + +/* Return the pool_type to use for the given caching and order */ +static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, + enum ttm_caching caching, + unsigned int order) +{ + if (pool->use_dma_alloc) + return &pool->caching[caching].orders[order]; + +#ifdef CONFIG_X86 + switch (caching) { + case ttm_write_combined: + return &global_write_combined[order]; + case ttm_uncached: + return &global_uncached[order]; + default: + break; + } +#endif + + return NULL; +} + +/* Free pages using the global shrinker list */ +static unsigned int ttm_pool_shrink(void) +{ + struct ttm_pool_type *pt; + unsigned int num_freed; + struct page *p; + + spin_lock(&shrinker_lock); + pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); + + p = ttm_pool_type_take(pt); + if (p) { + ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); + num_freed = 1 << pt->order; + } else { + num_freed = 0; + } + + list_move_tail(&pt->shrinker_list, &shrinker_list); + spin_unlock(&shrinker_lock); + + return num_freed; +} + +/* Return the allocation order based for a page */ +static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) +{ + if (pool->use_dma_alloc) { + struct ttm_pool_dma *dma = (void *)p->private; + + return dma->vaddr & ~PAGE_MASK; + } + + return p->private; +} + +/** + * ttm_pool_alloc - Fill a ttm_tt object + * + * @pool: ttm_pool to use + * @tt: ttm_tt object to fill + * @ctx: operation context + * + * Fill the ttm_tt object with pages and also make sure to DMA map them when + * necessary. + * + * Returns: 0 on successe, negative error code otherwise. + */ +int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, + struct ttm_operation_ctx *ctx) +{ + unsigned long num_pages = tt->num_pages; + dma_addr_t *dma_addr = tt->dma_address; + struct page **caching = tt->pages; + struct page **pages = tt->pages; + gfp_t gfp_flags = GFP_USER; + unsigned int i, order; + struct page *p; + int r; + + WARN_ON(!num_pages || ttm_tt_is_populated(tt)); + WARN_ON(dma_addr && !pool->dev); + + if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) + gfp_flags |= __GFP_ZERO; + + if (ctx->gfp_retry_mayfail) + gfp_flags |= __GFP_RETRY_MAYFAIL; + + if (pool->use_dma32) + gfp_flags |= GFP_DMA32; + else + gfp_flags |= GFP_HIGHUSER; + + for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; + order = min_t(unsigned int, order, __fls(num_pages))) { + bool apply_caching = false; + struct ttm_pool_type *pt; + + pt = ttm_pool_select_type(pool, tt->caching, order); + p = pt ? ttm_pool_type_take(pt) : NULL; + if (p) { + apply_caching = true; + } else { + p = ttm_pool_alloc_page(pool, gfp_flags, order); + if (p && PageHighMem(p)) + apply_caching = true; + } + + if (!p) { + if (order) { + --order; + continue; + } + r = -ENOMEM; + goto error_free_all; + } + + if (apply_caching) { + r = ttm_pool_apply_caching(caching, pages, + tt->caching); + if (r) + goto error_free_page; + caching = pages + (1 << order); + } + + r = ttm_mem_global_alloc_page(&ttm_mem_glob, p, + (1 << order) * PAGE_SIZE, + ctx); + if (r) + goto error_free_page; + + if (dma_addr) { + r = ttm_pool_map(pool, order, p, &dma_addr); + if (r) + goto error_global_free; + } + + num_pages -= 1 << order; + for (i = 1 << order; i; --i) + *(pages++) = p++; + } + + r = ttm_pool_apply_caching(caching, pages, tt->caching); + if (r) + goto error_free_all; + + return 0; + +error_global_free: + ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE); + +error_free_page: + ttm_pool_free_page(pool, tt->caching, order, p); + +error_free_all: + num_pages = tt->num_pages - num_pages; + for (i = 0; i < num_pages; ) { + order = ttm_pool_page_order(pool, tt->pages[i]); + ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]); + i += 1 << order; + } + + return r; +} +EXPORT_SYMBOL(ttm_pool_alloc); + +/** + * ttm_pool_free - Free the backing pages from a ttm_tt object + * + * @pool: Pool to give pages back to. + * @tt: ttm_tt object to unpopulate + * + * Give the packing pages back to a pool or free them + */ +void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) +{ + unsigned int i; + + for (i = 0; i < tt->num_pages; ) { + struct page *p = tt->pages[i]; + unsigned int order, num_pages; + struct ttm_pool_type *pt; + + order = ttm_pool_page_order(pool, p); + num_pages = 1ULL << order; + ttm_mem_global_free_page(&ttm_mem_glob, p, + num_pages * PAGE_SIZE); + if (tt->dma_address) + ttm_pool_unmap(pool, tt->dma_address[i], num_pages); + + pt = ttm_pool_select_type(pool, tt->caching, order); + if (pt) + ttm_pool_type_give(pt, tt->pages[i]); + else + ttm_pool_free_page(pool, tt->caching, order, + tt->pages[i]); + + i += num_pages; + } + + while (atomic_long_read(&allocated_pages) > page_pool_size) + ttm_pool_shrink(); +} +EXPORT_SYMBOL(ttm_pool_free); + +/** + * ttm_pool_init - Initialize a pool + * + * @pool: the pool to initialize + * @dev: device for DMA allocations and mappings + * @use_dma_alloc: true if coherent DMA alloc should be used + * @use_dma32: true if GFP_DMA32 should be used + * + * Initialize the pool and its pool types. + */ +void ttm_pool_init(struct ttm_pool *pool, struct device *dev, + bool use_dma_alloc, bool use_dma32) +{ + unsigned int i, j; + + WARN_ON(!dev && use_dma_alloc); + + pool->dev = dev; + pool->use_dma_alloc = use_dma_alloc; + pool->use_dma32 = use_dma32; + + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) + for (j = 0; j < MAX_ORDER; ++j) + ttm_pool_type_init(&pool->caching[i].orders[j], + pool, i, j); +} +EXPORT_SYMBOL(ttm_pool_init); + +/** + * ttm_pool_fini - Cleanup a pool + * + * @pool: the pool to clean up + * + * Free all pages in the pool and unregister the types from the global + * shrinker. + */ +void ttm_pool_fini(struct ttm_pool *pool) +{ + unsigned int i, j; + + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) + for (j = 0; j < MAX_ORDER; ++j) + ttm_pool_type_fini(&pool->caching[i].orders[j]); +} +EXPORT_SYMBOL(ttm_pool_fini); + +#ifdef CONFIG_DEBUG_FS + +/* Dump information about the different pool types */ +static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, + struct seq_file *m) +{ + unsigned int i; + + for (i = 0; i < MAX_ORDER; ++i) + seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); + seq_puts(m, "\n"); +} + +/** + * ttm_pool_debugfs - Debugfs dump function for a pool + * + * @pool: the pool to dump the information for + * @m: seq_file to dump to + * + * Make a debugfs dump with the per pool and global information. + */ +int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) +{ + unsigned int i; + + spin_lock(&shrinker_lock); + + seq_puts(m, "\t "); + for (i = 0; i < MAX_ORDER; ++i) + seq_printf(m, " ---%2u---", i); + seq_puts(m, "\n"); + + seq_puts(m, "wc\t:"); + ttm_pool_debugfs_orders(global_write_combined, m); + seq_puts(m, "uc\t:"); + ttm_pool_debugfs_orders(global_uncached, m); + + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { + seq_puts(m, "DMA "); + switch (i) { + case ttm_cached: + seq_puts(m, "\t:"); + break; + case ttm_write_combined: + seq_puts(m, "wc\t:"); + break; + case ttm_uncached: + seq_puts(m, "uc\t:"); + break; + } + ttm_pool_debugfs_orders(pool->caching[i].orders, m); + } + + seq_printf(m, "\ntotal\t: %8lu of %8lu\n", + atomic_long_read(&allocated_pages), page_pool_size); + + spin_unlock(&shrinker_lock); + + return 0; +} +EXPORT_SYMBOL(ttm_pool_debugfs); + +#endif + +/* As long as pages are available make sure to release at least one */ +static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + unsigned long num_freed = 0; + + do + num_freed += ttm_pool_shrink(); + while (!num_freed && atomic_long_read(&allocated_pages)); + + return num_freed; +} + +/* Return the number of pages available or SHRINK_EMPTY if we have none */ +static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + unsigned long num_pages = atomic_long_read(&allocated_pages); + + return num_pages ? num_pages : SHRINK_EMPTY; +} + +/** + * ttm_pool_mgr_init - Initialize globals + * + * @num_pages: default number of pages + * + * Initialize the global locks and lists for the MM shrinker. + */ +int ttm_pool_mgr_init(unsigned long num_pages) +{ + unsigned int i; + + if (!page_pool_size) + page_pool_size = num_pages; + + spin_lock_init(&shrinker_lock); + INIT_LIST_HEAD(&shrinker_list); + + for (i = 0; i < MAX_ORDER; ++i) { + ttm_pool_type_init(&global_write_combined[i], NULL, + ttm_write_combined, i); + ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); + } + + mm_shrinker.count_objects = ttm_pool_shrinker_count; + mm_shrinker.scan_objects = ttm_pool_shrinker_scan; + mm_shrinker.seeks = 1; + return register_shrinker(&mm_shrinker); +} + +/** + * ttm_pool_mgr_fini - Finalize globals + * + * Cleanup the global pools and unregister the MM shrinker. + */ +void ttm_pool_mgr_fini(void) +{ + unsigned int i; + + for (i = 0; i < MAX_ORDER; ++i) { + ttm_pool_type_fini(&global_write_combined[i]); + ttm_pool_type_fini(&global_uncached[i]); + } + + unregister_shrinker(&mm_shrinker); + WARN_ON(!list_empty(&shrinker_list)); +} diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 770c8988c139..ea77919569a2 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -113,10 +113,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man, static const struct ttm_resource_manager_func ttm_range_manager_func; int ttm_range_man_init(struct ttm_bo_device *bdev, - unsigned type, - uint32_t available_caching, - uint32_t default_caching, - bool use_tt, + unsigned type, bool use_tt, unsigned long p_size) { struct ttm_resource_manager *man; @@ -127,8 +124,6 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, return -ENOMEM; man = &rman->manager; - man->available_caching = available_caching; - man->default_caching = default_caching; man->use_tt = use_tt; man->func = &ttm_range_manager_func; @@ -154,7 +149,7 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(bdev, man); + ret = ttm_resource_manager_evict_all(bdev, man); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 33b642532e5c..b60699bf4816 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -65,10 +65,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, { unsigned i; - man->use_io_reserve_lru = false; - mutex_init(&man->io_reserve_mutex); spin_lock_init(&man->move_lock); - INIT_LIST_HEAD(&man->io_reserve_lru); man->size = p_size; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) @@ -78,21 +75,21 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, EXPORT_SYMBOL(ttm_resource_manager_init); /* - * ttm_resource_manager_force_list_clean + * ttm_resource_manager_evict_all * * @bdev - device to use * @man - manager to use * - * Force all the objects out of a memory manager until clean. + * Evict all the objects out of a memory manager until it is empty. * Part of memory manager cleanup sequence. */ -int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man) +int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) { struct ttm_operation_ctx ctx = { .interruptible = false, .no_wait_gpu = false, - .flags = TTM_OPT_FLAG_FORCE_ALLOC + .force_alloc = true }; struct ttm_bo_global *glob = &ttm_bo_glob; struct dma_fence *fence; @@ -129,7 +126,7 @@ int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, return 0; } -EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); +EXPORT_SYMBOL(ttm_resource_manager_evict_all); /** * ttm_resource_manager_debug @@ -143,8 +140,6 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man, drm_printf(p, " use_type: %d\n", man->use_type); drm_printf(p, " use_tt: %d\n", man->use_tt); drm_printf(p, " size: %llu\n", man->size); - drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); - drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); if (man->func && man->func->debug) (*man->func->debug)(man, p); } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 1ccf1ef050d6..cfd633c7e764 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -37,8 +37,6 @@ #include <linux/file.h> #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_page_alloc.h> -#include <drm/ttm/ttm_set_memory.h> /** * Allocates a ttm structure for the given BO. @@ -53,12 +51,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) if (bo->ttm) return 0; - if (bdev->need_dma32) - page_flags |= TTM_PAGE_FLAG_DMA32; - - if (bdev->no_retry) - page_flags |= TTM_PAGE_FLAG_NO_RETRY; - switch (bo->type) { case ttm_bo_type_device: if (zero_alloc) @@ -93,21 +85,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) return 0; } -static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) +static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) { - ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, - sizeof(*ttm->ttm.pages) + - sizeof(*ttm->dma_address), - GFP_KERNEL | __GFP_ZERO); - if (!ttm->ttm.pages) + ttm->pages = kvmalloc_array(ttm->num_pages, + sizeof(*ttm->pages) + + sizeof(*ttm->dma_address), + GFP_KERNEL | __GFP_ZERO); + if (!ttm->pages) return -ENOMEM; - ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); + + ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); return 0; } -static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) +static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) { - ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, + ttm->dma_address = kvmalloc_array(ttm->num_pages, sizeof(*ttm->dma_address), GFP_KERNEL | __GFP_ZERO); if (!ttm->dma_address) @@ -115,133 +108,40 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) return 0; } -static int ttm_tt_set_page_caching(struct page *p, - enum ttm_caching_state c_old, - enum ttm_caching_state c_new) +void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - int ret = 0; - - if (PageHighMem(p)) - return 0; - - if (c_old != tt_cached) { - /* p isn't in the default caching state, set it to - * writeback first to free its current memtype. */ - - ret = ttm_set_pages_wb(p, 1); - if (ret) - return ret; - } - - if (c_new == tt_wc) - ret = ttm_set_pages_wc(p, 1); - else if (c_new == tt_uncached) - ret = ttm_set_pages_uc(p, 1); - - return ret; -} - -/* - * Change caching policy for the linear kernel map - * for range of pages in a ttm. - */ - -static int ttm_tt_set_caching(struct ttm_tt *ttm, - enum ttm_caching_state c_state) -{ - int i, j; - struct page *cur_page; - int ret; - - if (ttm->caching_state == c_state) - return 0; - - if (ttm->state == tt_unpopulated) { - /* Change caching but don't populate */ - ttm->caching_state = c_state; - return 0; - } + ttm_tt_unpopulate(bdev, ttm); - if (ttm->caching_state == tt_cached) - drm_clflush_pages(ttm->pages, ttm->num_pages); - - for (i = 0; i < ttm->num_pages; ++i) { - cur_page = ttm->pages[i]; - if (likely(cur_page != NULL)) { - ret = ttm_tt_set_page_caching(cur_page, - ttm->caching_state, - c_state); - if (unlikely(ret != 0)) - goto out_err; - } - } - - ttm->caching_state = c_state; - - return 0; - -out_err: - for (j = 0; j < i; ++j) { - cur_page = ttm->pages[j]; - if (likely(cur_page != NULL)) { - (void)ttm_tt_set_page_caching(cur_page, c_state, - ttm->caching_state); - } - } - - return ret; -} - -int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) -{ - enum ttm_caching_state state; - - if (placement & TTM_PL_FLAG_WC) - state = tt_wc; - else if (placement & TTM_PL_FLAG_UNCACHED) - state = tt_uncached; - else - state = tt_cached; + if (ttm->swap_storage) + fput(ttm->swap_storage); - return ttm_tt_set_caching(ttm, state); + ttm->swap_storage = NULL; } -EXPORT_SYMBOL(ttm_tt_set_placement_caching); +EXPORT_SYMBOL(ttm_tt_destroy_common); -void ttm_tt_destroy(struct ttm_tt *ttm) +void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - if (ttm == NULL) - return; - - ttm_tt_unbind(ttm); - - if (ttm->state == tt_unbound) - ttm_tt_unpopulate(ttm); - - if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && - ttm->swap_storage) - fput(ttm->swap_storage); - - ttm->swap_storage = NULL; - ttm->func->destroy(ttm); + bdev->driver->ttm_tt_destroy(bdev, ttm); } static void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo, - uint32_t page_flags) + uint32_t page_flags, + enum ttm_caching caching) { - ttm->bdev = bo->bdev; ttm->num_pages = bo->num_pages; - ttm->caching_state = tt_cached; + ttm->caching = ttm_cached; ttm->page_flags = page_flags; - ttm->state = tt_unpopulated; + ttm->dma_address = NULL; ttm->swap_storage = NULL; ttm->sg = bo->sg; + ttm->caching = caching; } int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, - uint32_t page_flags) + uint32_t page_flags, enum ttm_caching caching) { - ttm_tt_init_fields(ttm, bo, page_flags); + ttm_tt_init_fields(ttm, bo, page_flags, caching); if (ttm_tt_alloc_page_directory(ttm)) { pr_err("Failed allocating page table\n"); @@ -253,20 +153,21 @@ EXPORT_SYMBOL(ttm_tt_init); void ttm_tt_fini(struct ttm_tt *ttm) { - kvfree(ttm->pages); + if (ttm->pages) + kvfree(ttm->pages); + else + kvfree(ttm->dma_address); ttm->pages = NULL; + ttm->dma_address = NULL; } EXPORT_SYMBOL(ttm_tt_fini); -int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, - uint32_t page_flags) +int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, + uint32_t page_flags, enum ttm_caching caching) { - struct ttm_tt *ttm = &ttm_dma->ttm; + ttm_tt_init_fields(ttm, bo, page_flags, caching); - ttm_tt_init_fields(ttm, bo, page_flags); - - INIT_LIST_HEAD(&ttm_dma->pages_list); - if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { + if (ttm_dma_tt_alloc_page_directory(ttm)) { pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -274,19 +175,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_dma_tt_init); -int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, - uint32_t page_flags) +int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, + uint32_t page_flags, enum ttm_caching caching) { - struct ttm_tt *ttm = &ttm_dma->ttm; int ret; - ttm_tt_init_fields(ttm, bo, page_flags); + ttm_tt_init_fields(ttm, bo, page_flags, caching); - INIT_LIST_HEAD(&ttm_dma->pages_list); if (page_flags & TTM_PAGE_FLAG_SG) - ret = ttm_sg_tt_alloc_page_directory(ttm_dma); + ret = ttm_sg_tt_alloc_page_directory(ttm); else - ret = ttm_dma_tt_alloc_page_directory(ttm_dma); + ret = ttm_dma_tt_alloc_page_directory(ttm); if (ret) { pr_err("Failed allocating page table\n"); return -ENOMEM; @@ -295,125 +194,69 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_sg_tt_init); -void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) -{ - struct ttm_tt *ttm = &ttm_dma->ttm; - - if (ttm->pages) - kvfree(ttm->pages); - else - kvfree(ttm_dma->dma_address); - ttm->pages = NULL; - ttm_dma->dma_address = NULL; -} -EXPORT_SYMBOL(ttm_dma_tt_fini); - -void ttm_tt_unbind(struct ttm_tt *ttm) -{ - if (ttm->state == tt_bound) { - ttm->func->unbind(ttm); - ttm->state = tt_unbound; - } -} - -int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem, - struct ttm_operation_ctx *ctx) -{ - int ret = 0; - - if (!ttm) - return -EINVAL; - - if (ttm->state == tt_bound) - return 0; - - ret = ttm_tt_populate(ttm, ctx); - if (ret) - return ret; - - ret = ttm->func->bind(ttm, bo_mem); - if (unlikely(ret != 0)) - return ret; - - ttm->state = tt_bound; - - return 0; -} -EXPORT_SYMBOL(ttm_tt_bind); - int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; - int i; - int ret = -ENOMEM; + gfp_t gfp_mask; + int i, ret; swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL); swap_space = swap_storage->f_mapping; + gfp_mask = mapping_gfp_mask(swap_space); for (i = 0; i < ttm->num_pages; ++i) { - gfp_t gfp_mask = mapping_gfp_mask(swap_space); - - gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0); - from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask); - + from_page = shmem_read_mapping_page_gfp(swap_space, i, + gfp_mask); if (IS_ERR(from_page)) { ret = PTR_ERR(from_page); goto out_err; } to_page = ttm->pages[i]; - if (unlikely(to_page == NULL)) + if (unlikely(to_page == NULL)) { + ret = -ENOMEM; goto out_err; + } copy_highpage(to_page, from_page); put_page(from_page); } - if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) - fput(swap_storage); + fput(swap_storage); ttm->swap_storage = NULL; ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; return 0; + out_err: return ret; } -int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) +int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; - int i; - int ret = -ENOMEM; - - BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); - BUG_ON(ttm->caching_state != tt_cached); - - if (!persistent_swap_storage) { - swap_storage = shmem_file_setup("ttm swap", - ttm->num_pages << PAGE_SHIFT, - 0); - if (IS_ERR(swap_storage)) { - pr_err("Failed allocating swap storage\n"); - return PTR_ERR(swap_storage); - } - } else { - swap_storage = persistent_swap_storage; + gfp_t gfp_mask; + int i, ret; + + swap_storage = shmem_file_setup("ttm swap", + ttm->num_pages << PAGE_SHIFT, + 0); + if (IS_ERR(swap_storage)) { + pr_err("Failed allocating swap storage\n"); + return PTR_ERR(swap_storage); } swap_space = swap_storage->f_mapping; + gfp_mask = mapping_gfp_mask(swap_space); for (i = 0; i < ttm->num_pages; ++i) { - gfp_t gfp_mask = mapping_gfp_mask(swap_space); - - gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0); - from_page = ttm->pages[i]; if (unlikely(from_page == NULL)) continue; @@ -429,21 +272,19 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) put_page(to_page); } - ttm_tt_unpopulate(ttm); + ttm_tt_unpopulate(bdev, ttm); ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; - if (persistent_swap_storage) - ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; return 0; + out_err: - if (!persistent_swap_storage) - fput(swap_storage); + fput(swap_storage); return ret; } -static void ttm_tt_add_mapping(struct ttm_tt *ttm) +static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { pgoff_t i; @@ -451,24 +292,40 @@ static void ttm_tt_add_mapping(struct ttm_tt *ttm) return; for (i = 0; i < ttm->num_pages; ++i) - ttm->pages[i]->mapping = ttm->bdev->dev_mapping; + ttm->pages[i]->mapping = bdev->dev_mapping; } -int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +int ttm_tt_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { int ret; - if (ttm->state != tt_unpopulated) + if (!ttm) + return -EINVAL; + + if (ttm_tt_is_populated(ttm)) return 0; - if (ttm->bdev->driver->ttm_tt_populate) - ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); + if (bdev->driver->ttm_tt_populate) + ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); else - ret = ttm_pool_populate(ttm, ctx); - if (!ret) - ttm_tt_add_mapping(ttm); - return ret; + ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); + if (ret) + return ret; + + ttm_tt_add_mapping(bdev, ttm); + ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED; + if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { + ret = ttm_tt_swapin(ttm); + if (unlikely(ret != 0)) { + ttm_tt_unpopulate(bdev, ttm); + return ret; + } + } + + return 0; } +EXPORT_SYMBOL(ttm_tt_populate); static void ttm_tt_clear_mapping(struct ttm_tt *ttm) { @@ -484,14 +341,16 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) } } -void ttm_tt_unpopulate(struct ttm_tt *ttm) +void ttm_tt_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { - if (ttm->state == tt_unpopulated) + if (!ttm_tt_is_populated(ttm)) return; ttm_tt_clear_mapping(ttm); - if (ttm->bdev->driver->ttm_tt_unpopulate) - ttm->bdev->driver->ttm_tt_unpopulate(ttm); + if (bdev->driver->ttm_tt_unpopulate) + bdev->driver->ttm_tt_unpopulate(bdev, ttm); else - ttm_pool_unpopulate(ttm); + ttm_pool_free(&bdev->pool, ttm); + ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; } diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index c3aa39bd38ec..b5259cb1383f 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -200,8 +200,8 @@ static int tve200_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { - ret = -EINVAL; + if (irq < 0) { + ret = irq; goto clk_disable; } diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 915f8bfdb58c..182c586525eb 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -568,7 +568,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl); if (ret) { - kfree(bin); v3d_job_put(&render->base); kfree(bin); return ret; diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 3b81ea28c0bb..5a453532901f 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -90,18 +90,17 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; - unsigned int count; - struct scatterlist *sgl; + struct sg_dma_page_iter dma_iter; - for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) { - u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; + for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { + dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); + u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; u32 pte = page_prot | page_address; u32 i; - BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >= + BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= BIT(24)); - - for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++) + for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) v3d->pt[page++] = pte + i; } diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index d9a5af62af89..322bf7133ba1 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -213,17 +213,17 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc, } static void vbox_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } static void vbox_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } static void vbox_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } @@ -397,11 +397,13 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, vbox_crtc->cursor_enabled = true; - /* pinning is done in prepare/cleanup framebuffer */ - src = drm_gem_vram_kmap(gbo, true, NULL); + src = drm_gem_vram_vmap(gbo); if (IS_ERR(src)) { + /* + * BUG: we should have pinned the BO in prepare_fb(). + */ mutex_unlock(&vbox->hw_mutex); - DRM_WARN("Could not kmap cursor bo, skipping update\n"); + DRM_WARN("Could not map cursor bo, skipping update\n"); return; } @@ -414,7 +416,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, data_size = width * height * 4 + mask_size; copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); - drm_gem_vram_kunmap(gbo); + drm_gem_vram_vunmap(gbo, src); flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile index b303703bc7f3..d0163e18e9ca 100644 --- a/drivers/gpu/drm/vc4/Makefile +++ b/drivers/gpu/drm/vc4/Makefile @@ -12,6 +12,7 @@ vc4-y := \ vc4_kms.o \ vc4_gem.o \ vc4_hdmi.o \ + vc4_hdmi_phy.o \ vc4_vec.o \ vc4_hvs.o \ vc4_irq.o \ diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 74ceebd62fbc..f432278173cd 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -21,6 +21,8 @@ #include "vc4_drv.h" #include "uapi/drm/vc4_drm.h" +static vm_fault_t vc4_fault(struct vm_fault *vmf); + static const char * const bo_type_names[] = { "kernel", "V3D", @@ -374,6 +376,21 @@ out: return bo; } +static const struct vm_operations_struct vc4_vm_ops = { + .fault = vc4_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs vc4_gem_object_funcs = { + .free = vc4_free_object, + .export = vc4_prime_export, + .get_sg_table = drm_gem_cma_prime_get_sg_table, + .vmap = vc4_prime_vmap, + .vunmap = drm_gem_cma_prime_vunmap, + .vm_ops = &vc4_vm_ops, +}; + /** * vc4_gem_create_object - Implementation of driver->gem_create_object. * @dev: DRM device @@ -400,6 +417,8 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; mutex_unlock(&vc4->bo_lock); + bo->base.base.funcs = &vc4_gem_object_funcs; + return &bo->base.base; } @@ -684,7 +703,7 @@ struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags) return dmabuf; } -vm_fault_t vc4_fault(struct vm_fault *vmf) +static vm_fault_t vc4_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 6d8fa6118fc1..ea710beb8e00 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -65,6 +65,20 @@ static const struct debugfs_reg32 crtc_regs[] = { VC4_REG32(PV_HACT_ACT), }; +static unsigned int +vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel) +{ + u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel)); + /* Top/base are supposed to be 4-pixel aligned, but the + * Raspberry Pi firmware fills the low bits (which are + * presumably ignored). + */ + u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3; + u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3; + + return top - base + 4; +} + static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, int *vpos, int *hpos, @@ -74,6 +88,8 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); + unsigned int cob_size; u32 val; int fifo_lines; int vblank_lines; @@ -89,7 +105,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, * Read vertical scanline which is currently composed for our * pixelvalve by the HVS, and also the scaler status. */ - val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel)); + val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel)); /* Get optional system timestamp after query. */ if (etime) @@ -109,8 +125,9 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, *hpos += mode->crtc_htotal / 2; } + cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel); /* This is the offset we need for translating hvs -> pv scanout pos. */ - fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay; + fifo_lines = cob_size / mode->crtc_hdisplay; if (fifo_lines > 0) ret = true; @@ -189,10 +206,22 @@ void vc4_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); } -static u32 vc4_get_fifo_full_level(u32 format) +static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) { - static const u32 fifo_len_bytes = 64; + const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc); + const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); + u32 fifo_len_bytes = pv_data->fifo_depth; + /* + * Pixels are pulled from the HVS if the number of bytes is + * lower than the FIFO full level. + * + * The latency of the pixel fetch mechanism is 6 pixels, so we + * need to convert those 6 pixels in bytes, depending on the + * format, and then subtract that from the length of the FIFO + * to make sure we never end up in a situation where the FIFO + * is full. + */ switch (format) { case PV_CONTROL_FORMAT_DSIV_16: case PV_CONTROL_FORMAT_DSIC_16: @@ -202,10 +231,30 @@ static u32 vc4_get_fifo_full_level(u32 format) case PV_CONTROL_FORMAT_24: case PV_CONTROL_FORMAT_DSIV_24: default: + /* + * For some reason, the pixelvalve4 doesn't work with + * the usual formula and will only work with 32. + */ + if (crtc_data->hvs_output == 5) + return 32; + return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; } } +static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc, + u32 format) +{ + u32 level = vc4_get_fifo_full_level(vc4_crtc, format); + u32 ret = 0; + + ret |= VC4_SET_FIELD((level >> 6), + PV5_CONTROL_FIFO_LEVEL_HIGH); + + return ret | VC4_SET_FIELD(level & 0x3f, + PV_CONTROL_FIFO_LEVEL); +} + /* * Returns the encoder attached to the CRTC. * @@ -230,11 +279,23 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc) return NULL; } +static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc) +{ + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + + /* The PV needs to be disabled before it can be flushed */ + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN); + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR); +} + static void vc4_crtc_config_pv(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); struct drm_crtc_state *state = crtc->state; struct drm_display_mode *mode = &state->adjusted_mode; bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; @@ -242,24 +303,29 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 || vc4_encoder->type == VC4_ENCODER_TYPE_DSI1); u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24; + u8 ppc = pv_data->pixels_per_clock; + bool debug_dump_regs = false; - /* Reset the PV fifo. */ - CRTC_WRITE(PV_CONTROL, 0); - CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | PV_CONTROL_EN); - CRTC_WRITE(PV_CONTROL, 0); + if (debug_dump_regs) { + struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); + dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n", + drm_crtc_index(crtc)); + drm_print_regset32(&p, &vc4_crtc->regset); + } + + vc4_crtc_pixelvalve_reset(crtc); CRTC_WRITE(PV_HORZA, - VC4_SET_FIELD((mode->htotal - - mode->hsync_end) * pixel_rep, + VC4_SET_FIELD((mode->htotal - mode->hsync_end) * pixel_rep / ppc, PV_HORZA_HBP) | - VC4_SET_FIELD((mode->hsync_end - - mode->hsync_start) * pixel_rep, + VC4_SET_FIELD((mode->hsync_end - mode->hsync_start) * pixel_rep / ppc, PV_HORZA_HSYNC)); + CRTC_WRITE(PV_HORZB, - VC4_SET_FIELD((mode->hsync_start - - mode->hdisplay) * pixel_rep, + VC4_SET_FIELD((mode->hsync_start - mode->hdisplay) * pixel_rep / ppc, PV_HORZB_HFP) | - VC4_SET_FIELD(mode->hdisplay * pixel_rep, PV_HORZB_HACTIVE)); + VC4_SET_FIELD(mode->hdisplay * pixel_rep / ppc, + PV_HORZB_HACTIVE)); CRTC_WRITE(PV_VERTA, VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, @@ -306,35 +372,20 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc) if (is_dsi) CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); - CRTC_WRITE(PV_CONTROL, + if (vc4->hvs->hvs5) + CRTC_WRITE(PV_MUX_CFG, + VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, + PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); + + CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | + vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) | VC4_SET_FIELD(format, PV_CONTROL_FORMAT) | - VC4_SET_FIELD(vc4_get_fifo_full_level(format), - PV_CONTROL_FIFO_LEVEL) | VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) | PV_CONTROL_CLR_AT_START | PV_CONTROL_TRIGGER_UNDERFLOW | PV_CONTROL_WAIT_HSTART | VC4_SET_FIELD(vc4_encoder->clock_select, - PV_CONTROL_CLK_SELECT) | - PV_CONTROL_FIFO_CLR | - PV_CONTROL_EN); -} - -static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) -{ - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - bool debug_dump_regs = false; - - if (debug_dump_regs) { - struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); - dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n", - drm_crtc_index(crtc)); - drm_print_regset32(&p, &vc4_crtc->regset); - } - - vc4_crtc_config_pv(crtc); - - vc4_hvs_mode_set_nofb(crtc); + PV_CONTROL_CLK_SELECT)); if (debug_dump_regs) { struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); @@ -352,24 +403,88 @@ static void require_hvs_enabled(struct drm_device *dev) SCALER_DISPCTRL_ENABLE); } -static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) +static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel) { - struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct drm_device *dev = crtc->dev; int ret; - require_hvs_enabled(dev); - - /* Disable vblank irq handling before crtc is disabled. */ - drm_crtc_vblank_off(crtc); - CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN); ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1); WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n"); - vc4_hvs_atomic_disable(crtc, old_state); + /* + * This delay is needed to avoid to get a pixel stuck in an + * unflushable FIFO between the pixelvalve and the HDMI + * controllers on the BCM2711. + * + * Timing is fairly sensitive here, so mdelay is the safest + * approach. + * + * If it was to be reworked, the stuck pixel happens on a + * BCM2711 when changing mode with a good probability, so a + * script that changes mode on a regular basis should trigger + * the bug after less than 10 attempts. It manifests itself with + * every pixels being shifted by one to the right, and thus the + * last pixel of a line actually being displayed as the first + * pixel on the next line. + */ + mdelay(20); + + if (vc4_encoder && vc4_encoder->post_crtc_disable) + vc4_encoder->post_crtc_disable(encoder); + + vc4_crtc_pixelvalve_reset(crtc); + vc4_hvs_stop_channel(dev, channel); + + if (vc4_encoder && vc4_encoder->post_crtc_powerdown) + vc4_encoder->post_crtc_powerdown(encoder); + + return 0; +} + +int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) +{ + struct drm_device *drm = crtc->dev; + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + int channel; + + if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node, + "brcm,bcm2711-pixelvalve2") || + of_device_is_compatible(vc4_crtc->pdev->dev.of_node, + "brcm,bcm2711-pixelvalve4"))) + return 0; + + if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN)) + return 0; + + if (!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN)) + return 0; + + channel = vc4_hvs_get_fifo_from_output(drm, vc4_crtc->data->hvs_output); + if (channel < 0) + return 0; + + return vc4_crtc_disable(crtc, channel); +} + +static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); + struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state); + struct drm_device *dev = crtc->dev; + + require_hvs_enabled(dev); + + /* Disable vblank irq handling before crtc is disabled. */ + drm_crtc_vblank_off(crtc); + + vc4_crtc_disable(crtc, old_vc4_state->assigned_channel); /* * Make sure we issue a vblank event after disabling the CRTC if @@ -386,10 +501,14 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, } static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); require_hvs_enabled(dev); @@ -400,11 +519,24 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, vc4_hvs_atomic_enable(crtc, old_state); + if (vc4_encoder->pre_crtc_configure) + vc4_encoder->pre_crtc_configure(encoder); + + vc4_crtc_config_pv(crtc); + + CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN); + + if (vc4_encoder->pre_crtc_enable) + vc4_encoder->pre_crtc_enable(encoder); + /* When feeding the transposer block the pixelvalve is unneeded and * should not be enabled. */ CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); + + if (vc4_encoder->post_crtc_enable) + vc4_encoder->post_crtc_enable(encoder); } static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc, @@ -452,18 +584,21 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state, } static int vc4_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct drm_connector *conn; struct drm_connector_state *conn_state; int ret, i; - ret = vc4_hvs_atomic_check(crtc, state); + ret = vc4_hvs_atomic_check(crtc, crtc_state); if (ret) return ret; - for_each_new_connector_in_state(state->state, conn, conn_state, i) { + for_each_new_connector_in_state(state, conn, conn_state, + i) { if (conn_state->crtc != crtc) continue; @@ -499,7 +634,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); - u32 chan = vc4_crtc->channel; + u32 chan = vc4_state->assigned_channel; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); @@ -516,7 +651,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) * the CRTC and encoder already reconfigured, leading to * underruns. This can be seen when reconfiguring the CRTC. */ - vc4_hvs_unmask_underrun(dev, vc4_crtc->channel); + vc4_hvs_unmask_underrun(dev, chan); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -698,6 +833,7 @@ struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) old_vc4_state = to_vc4_crtc_state(crtc->state); vc4_state->feed_txp = old_vc4_state->feed_txp; vc4_state->margins = old_vc4_state->margins; + vc4_state->assigned_channel = old_vc4_state->assigned_channel; __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); return &vc4_state->base; @@ -723,11 +859,19 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc, void vc4_crtc_reset(struct drm_crtc *crtc) { + struct vc4_crtc_state *vc4_crtc_state; + if (crtc->state) vc4_crtc_destroy_state(crtc, crtc->state); - crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); - if (crtc->state) - __drm_atomic_helper_crtc_reset(crtc, crtc->state); + + vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL); + if (!vc4_crtc_state) { + crtc->state = NULL; + return; + } + + vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; + __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base); } static const struct drm_crtc_funcs vc4_crtc_funcs = { @@ -747,7 +891,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { }; static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { - .mode_set_nofb = vc4_crtc_mode_set_nofb, .mode_valid = vc4_crtc_mode_valid, .atomic_check = vc4_crtc_atomic_check, .atomic_flush = vc4_hvs_atomic_flush, @@ -758,9 +901,12 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { static const struct vc4_pv_data bcm2835_pv0_data = { .base = { - .hvs_channel = 0, + .hvs_available_channels = BIT(0), + .hvs_output = 0, }, .debugfs_name = "crtc0_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, .encoder_types = { [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0, [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI, @@ -769,9 +915,12 @@ static const struct vc4_pv_data bcm2835_pv0_data = { static const struct vc4_pv_data bcm2835_pv1_data = { .base = { - .hvs_channel = 2, + .hvs_available_channels = BIT(2), + .hvs_output = 2, }, .debugfs_name = "crtc1_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, .encoder_types = { [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1, [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI, @@ -780,19 +929,94 @@ static const struct vc4_pv_data bcm2835_pv1_data = { static const struct vc4_pv_data bcm2835_pv2_data = { .base = { - .hvs_channel = 1, + .hvs_available_channels = BIT(1), + .hvs_output = 1, }, .debugfs_name = "crtc2_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, .encoder_types = { - [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI, + [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI0, [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC, }, }; +static const struct vc4_pv_data bcm2711_pv0_data = { + .base = { + .hvs_available_channels = BIT(0), + .hvs_output = 0, + }, + .debugfs_name = "crtc0_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_DSI0, + [1] = VC4_ENCODER_TYPE_DPI, + }, +}; + +static const struct vc4_pv_data bcm2711_pv1_data = { + .base = { + .hvs_available_channels = BIT(0) | BIT(1) | BIT(2), + .hvs_output = 3, + }, + .debugfs_name = "crtc1_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_DSI1, + [1] = VC4_ENCODER_TYPE_SMI, + }, +}; + +static const struct vc4_pv_data bcm2711_pv2_data = { + .base = { + .hvs_available_channels = BIT(0) | BIT(1) | BIT(2), + .hvs_output = 4, + }, + .debugfs_name = "crtc2_regs", + .fifo_depth = 256, + .pixels_per_clock = 2, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_HDMI0, + }, +}; + +static const struct vc4_pv_data bcm2711_pv3_data = { + .base = { + .hvs_available_channels = BIT(1), + .hvs_output = 1, + }, + .debugfs_name = "crtc3_regs", + .fifo_depth = 64, + .pixels_per_clock = 1, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_VEC, + }, +}; + +static const struct vc4_pv_data bcm2711_pv4_data = { + .base = { + .hvs_available_channels = BIT(0) | BIT(1) | BIT(2), + .hvs_output = 5, + }, + .debugfs_name = "crtc4_regs", + .fifo_depth = 64, + .pixels_per_clock = 2, + .encoder_types = { + [0] = VC4_ENCODER_TYPE_HDMI1, + }, +}; + static const struct of_device_id vc4_crtc_dt_match[] = { { .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data }, { .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data }, { .compatible = "brcm,bcm2835-pixelvalve2", .data = &bcm2835_pv2_data }, + { .compatible = "brcm,bcm2711-pixelvalve0", .data = &bcm2711_pv0_data }, + { .compatible = "brcm,bcm2711-pixelvalve1", .data = &bcm2711_pv1_data }, + { .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data }, + { .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data }, + { .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data }, {} }; @@ -819,26 +1043,11 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm, } } -static void -vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc) -{ - struct drm_device *drm = vc4_crtc->base.dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); - u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel)); - /* Top/base are supposed to be 4-pixel aligned, but the - * Raspberry Pi firmware fills the low bits (which are - * presumably ignored). - */ - u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3; - u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3; - - vc4_crtc->cob_size = top - base + 4; -} - int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, const struct drm_crtc_funcs *crtc_funcs, const struct drm_crtc_helper_funcs *crtc_helper_funcs) { + struct vc4_dev *vc4 = to_vc4_dev(drm); struct drm_crtc *crtc = &vc4_crtc->base; struct drm_plane *primary_plane; unsigned int i; @@ -858,15 +1067,17 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, crtc_funcs, NULL); drm_crtc_helper_add(crtc, crtc_helper_funcs); - vc4_crtc->channel = vc4_crtc->data->hvs_channel; - drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); - drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); - /* We support CTM, but only for one CRTC at a time. It's therefore - * implemented as private driver state in vc4_kms, not here. - */ - drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size); - vc4_crtc_get_cob_allocation(vc4_crtc); + if (!vc4->hvs->hvs5) { + drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); + + drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); + + /* We support CTM, but only for one CRTC at a time. It's therefore + * implemented as private driver state in vc4_kms, not here. + */ + drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size); + } for (i = 0; i < crtc->gamma_size; i++) { vc4_crtc->lut_r[i] = i; @@ -915,7 +1126,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) CRTC_WRITE(PV_INTEN, 0); CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); ret = devm_request_irq(dev, platform_get_irq(pdev, 0), - vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc); + vc4_crtc_irq_handler, + IRQF_SHARED, + "vc4 crtc", vc4_crtc); if (ret) goto err_destroy_planes; diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 38343d2fb4fb..8f10f609e4f8 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -140,12 +140,6 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) kfree(vc4file); } -static const struct vm_operations_struct vc4_vm_ops = { - .fault = vc4_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct file_operations vc4_drm_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -195,16 +189,10 @@ static struct drm_driver vc4_drm_driver = { #endif .gem_create_object = vc4_create_object, - .gem_free_object_unlocked = vc4_free_object, - .gem_vm_ops = &vc4_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = vc4_prime_export, - .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, .gem_prime_import_sg_table = vc4_prime_import_sg_table, - .gem_prime_vmap = vc4_prime_vmap, - .gem_prime_vunmap = drm_gem_cma_prime_vunmap, .gem_prime_mmap = vc4_prime_mmap, .dumb_create = vc4_dumb_create, @@ -252,6 +240,7 @@ static int vc4_drm_bind(struct device *dev) struct drm_device *drm; struct vc4_dev *vc4; struct device_node *node; + struct drm_crtc *crtc; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); @@ -298,6 +287,9 @@ static int vc4_drm_bind(struct device *dev) if (ret < 0) goto unbind_all; + drm_for_each_crtc(crtc, drm) + vc4_crtc_disable_at_boot(crtc); + ret = drm_dev_register(drm, 0); if (ret < 0) goto unbind_all; @@ -310,6 +302,7 @@ unbind_all: component_unbind_all(dev, drm); gem_destroy: vc4_gem_destroy(drm); + drm_mode_config_cleanup(drm); vc4_bo_cache_destroy(drm); dev_put: drm_dev_put(drm); @@ -368,6 +361,7 @@ static int vc4_platform_drm_remove(struct platform_device *pdev) } static const struct of_device_id vc4_of_match[] = { + { .compatible = "brcm,bcm2711-vc5", }, { .compatible = "brcm,bcm2835-vc4", }, { .compatible = "brcm,cygnus-vc4", }, {}, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index fa19160c801f..5d3d8ed0b775 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -73,7 +73,6 @@ struct vc4_perfmon { struct vc4_dev { struct drm_device *dev; - struct vc4_hdmi *hdmi; struct vc4_hvs *hvs; struct vc4_v3d *v3d; struct vc4_dpi *dpi; @@ -201,6 +200,9 @@ struct vc4_dev { int power_refcount; + /* Set to true when the load tracker is supported. */ + bool load_tracker_available; + /* Set to true when the load tracker is active. */ bool load_tracker_enabled; @@ -285,7 +287,7 @@ struct vc4_bo { static inline struct vc4_bo * to_vc4_bo(struct drm_gem_object *bo) { - return (struct vc4_bo *)bo; + return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base); } struct vc4_fence { @@ -298,7 +300,7 @@ struct vc4_fence { static inline struct vc4_fence * to_vc4_fence(struct dma_fence *fence) { - return (struct vc4_fence *)fence; + return container_of(fence, struct vc4_fence, base); } struct vc4_seqno_cb { @@ -320,6 +322,8 @@ struct vc4_hvs { void __iomem *regs; u32 __iomem *dlist; + struct clk *core_clk; + /* Memory manager for CRTCs to allocate space in the display * list. Units are dwords. */ @@ -329,7 +333,11 @@ struct vc4_hvs { spinlock_t mm_lock; struct drm_mm_node mitchell_netravali_filter; + struct debugfs_regset32 regset; + + /* HVS version 5 flag, therefore requires updated dlist structures */ + bool hvs5; }; struct vc4_plane { @@ -339,7 +347,7 @@ struct vc4_plane { static inline struct vc4_plane * to_vc4_plane(struct drm_plane *plane) { - return (struct vc4_plane *)plane; + return container_of(plane, struct vc4_plane, base); } enum vc4_scaling_mode { @@ -415,12 +423,13 @@ struct vc4_plane_state { static inline struct vc4_plane_state * to_vc4_plane_state(struct drm_plane_state *state) { - return (struct vc4_plane_state *)state; + return container_of(state, struct vc4_plane_state, base); } enum vc4_encoder_type { VC4_ENCODER_TYPE_NONE, - VC4_ENCODER_TYPE_HDMI, + VC4_ENCODER_TYPE_HDMI0, + VC4_ENCODER_TYPE_HDMI1, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1, @@ -432,6 +441,13 @@ struct vc4_encoder { struct drm_encoder base; enum vc4_encoder_type type; u32 clock_select; + + void (*pre_crtc_configure)(struct drm_encoder *encoder); + void (*pre_crtc_enable)(struct drm_encoder *encoder); + void (*post_crtc_enable)(struct drm_encoder *encoder); + + void (*post_crtc_disable)(struct drm_encoder *encoder); + void (*post_crtc_powerdown)(struct drm_encoder *encoder); }; static inline struct vc4_encoder * @@ -441,13 +457,22 @@ to_vc4_encoder(struct drm_encoder *encoder) } struct vc4_crtc_data { - /* Which channel of the HVS this pixelvalve sources from. */ - int hvs_channel; + /* Bitmask of channels (FIFOs) of the HVS that the output can source from */ + unsigned int hvs_available_channels; + + /* Which output of the HVS this pixelvalve sources from. */ + int hvs_output; }; struct vc4_pv_data { struct vc4_crtc_data base; + /* Depth of the PixelValve FIFO in bytes */ + unsigned int fifo_depth; + + /* Number of pixels output per clock period */ + u8 pixels_per_clock; + enum vc4_encoder_type encoder_types[4]; const char *debugfs_name; @@ -462,14 +487,9 @@ struct vc4_crtc { /* Timestamp at start of vblank irq - unaffected by lock delays. */ ktime_t t_vblank; - /* Which HVS channel we're using for our CRTC. */ - int channel; - u8 lut_r[256]; u8 lut_g[256]; u8 lut_b[256]; - /* Size in pixels of the COB memory allocated to this CRTC. */ - u32 cob_size; struct drm_pending_vblank_event *event; @@ -479,7 +499,7 @@ struct vc4_crtc { static inline struct vc4_crtc * to_vc4_crtc(struct drm_crtc *crtc) { - return (struct vc4_crtc *)crtc; + return container_of(crtc, struct vc4_crtc, base); } static inline const struct vc4_crtc_data * @@ -502,6 +522,7 @@ struct vc4_crtc_state { struct drm_mm_node mm; bool feed_txp; bool txp_armed; + unsigned int assigned_channel; struct { unsigned int left; @@ -511,10 +532,12 @@ struct vc4_crtc_state { } margins; }; +#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) + static inline struct vc4_crtc_state * to_vc4_crtc_state(struct drm_crtc_state *crtc_state) { - return (struct vc4_crtc_state *)crtc_state; + return container_of(crtc_state, struct vc4_crtc_state, base); } #define V3D_READ(offset) readl(vc4->v3d->regs + offset) @@ -778,7 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -vm_fault_t vc4_fault(struct vm_fault *vmf); int vc4_mmap(struct file *filp, struct vm_area_struct *vma); int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, @@ -794,6 +816,7 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); /* vc4_crtc.c */ extern struct platform_driver vc4_crtc_driver; +int vc4_crtc_disable_at_boot(struct drm_crtc *crtc); int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, const struct drm_crtc_funcs *crtc_funcs, const struct drm_crtc_helper_funcs *crtc_helper_funcs); @@ -888,11 +911,13 @@ void vc4_irq_reset(struct drm_device *dev); /* vc4_hvs.c */ extern struct platform_driver vc4_hvs_driver; +void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output); +int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output); int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state); void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); -void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state); -void vc4_hvs_mode_set_nofb(struct drm_crtc *crtc); +void vc4_hvs_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state); void vc4_hvs_dump_state(struct drm_device *dev); void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index eaf276978ee7..19aab4e7e209 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1246,10 +1246,12 @@ reset_fifo_and_return: return ret; } +static const struct component_ops vc4_dsi_ops; static int vc4_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct vc4_dsi *dsi = host_to_dsi(host); + int ret; dsi->lanes = device->lanes; dsi->channel = device->channel; @@ -1284,6 +1286,12 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host, return 0; } + ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops); + if (ret) { + mipi_dsi_host_unregister(&dsi->dsi_host); + return ret; + } + return 0; } @@ -1662,7 +1670,6 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct vc4_dsi *dsi; - int ret; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) @@ -1670,26 +1677,10 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev) dev_set_drvdata(dev, dsi); dsi->pdev = pdev; - - /* Note, the initialization sequence for DSI and panels is - * tricky. The component bind above won't get past its - * -EPROBE_DEFER until the panel/bridge probes. The - * panel/bridge will return -EPROBE_DEFER until it has a - * mipi_dsi_host to register its device to. So, we register - * the host during pdev probe time, so vc4 as a whole can then - * -EPROBE_DEFER its component bind process until the panel - * successfully attaches. - */ dsi->dsi_host.ops = &vc4_dsi_host_ops; dsi->dsi_host.dev = dev; mipi_dsi_host_register(&dsi->dsi_host); - ret = component_add(&pdev->dev, &vc4_dsi_ops); - if (ret) { - mipi_dsi_host_unregister(&dsi->dsi_host); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 15a11cd4de25..95779d50cca0 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -43,177 +43,101 @@ #include <linux/of_platform.h> #include <linux/pm_runtime.h> #include <linux/rational.h> +#include <linux/reset.h> #include <sound/dmaengine_pcm.h> #include <sound/pcm_drm_eld.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "media/cec.h" #include "vc4_drv.h" +#include "vc4_hdmi.h" +#include "vc4_hdmi_regs.h" #include "vc4_regs.h" -#define HSM_CLOCK_FREQ 163682864 -#define CEC_CLOCK_FREQ 40000 -#define CEC_CLOCK_DIV (HSM_CLOCK_FREQ / CEC_CLOCK_FREQ) - -/* HDMI audio information */ -struct vc4_hdmi_audio { - struct snd_soc_card card; - struct snd_soc_dai_link link; - struct snd_soc_dai_link_component cpu; - struct snd_soc_dai_link_component codec; - struct snd_soc_dai_link_component platform; - int samplerate; - int channels; - struct snd_dmaengine_dai_dma_data dma_data; - struct snd_pcm_substream *substream; -}; +#define VC5_HDMI_HORZA_HFP_SHIFT 16 +#define VC5_HDMI_HORZA_HFP_MASK VC4_MASK(28, 16) +#define VC5_HDMI_HORZA_VPOS BIT(15) +#define VC5_HDMI_HORZA_HPOS BIT(14) +#define VC5_HDMI_HORZA_HAP_SHIFT 0 +#define VC5_HDMI_HORZA_HAP_MASK VC4_MASK(13, 0) -/* General HDMI hardware state. */ -struct vc4_hdmi { - struct platform_device *pdev; - - struct drm_encoder *encoder; - struct drm_connector *connector; +#define VC5_HDMI_HORZB_HBP_SHIFT 16 +#define VC5_HDMI_HORZB_HBP_MASK VC4_MASK(26, 16) +#define VC5_HDMI_HORZB_HSP_SHIFT 0 +#define VC5_HDMI_HORZB_HSP_MASK VC4_MASK(10, 0) - struct vc4_hdmi_audio audio; +#define VC5_HDMI_VERTA_VSP_SHIFT 24 +#define VC5_HDMI_VERTA_VSP_MASK VC4_MASK(28, 24) +#define VC5_HDMI_VERTA_VFP_SHIFT 16 +#define VC5_HDMI_VERTA_VFP_MASK VC4_MASK(22, 16) +#define VC5_HDMI_VERTA_VAL_SHIFT 0 +#define VC5_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0) - struct i2c_adapter *ddc; - void __iomem *hdmicore_regs; - void __iomem *hd_regs; - int hpd_gpio; - bool hpd_active_low; +#define VC5_HDMI_VERTB_VSPO_SHIFT 16 +#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16) - struct cec_adapter *cec_adap; - struct cec_msg cec_rx_msg; - bool cec_tx_ok; - bool cec_irq_was_rx; +# define VC4_HD_M_SW_RST BIT(2) +# define VC4_HD_M_ENABLE BIT(0) - struct clk *pixel_clock; - struct clk *hsm_clock; +#define CEC_CLOCK_FREQ 40000 +#define VC4_HSM_MID_CLOCK 149985000 - struct debugfs_regset32 hdmi_regset; - struct debugfs_regset32 hd_regset; -}; +static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct vc4_hdmi *vc4_hdmi = node->info_ent->data; + struct drm_printer p = drm_seq_file_printer(m); -#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset) -#define HDMI_WRITE(offset, val) writel(val, vc4->hdmi->hdmicore_regs + offset) -#define HD_READ(offset) readl(vc4->hdmi->hd_regs + offset) -#define HD_WRITE(offset, val) writel(val, vc4->hdmi->hd_regs + offset) + drm_print_regset32(&p, &vc4_hdmi->hdmi_regset); + drm_print_regset32(&p, &vc4_hdmi->hd_regset); -/* VC4 HDMI encoder KMS struct */ -struct vc4_hdmi_encoder { - struct vc4_encoder base; - bool hdmi_monitor; - bool limited_rgb_range; -}; + return 0; +} -static inline struct vc4_hdmi_encoder * -to_vc4_hdmi_encoder(struct drm_encoder *encoder) +static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { - return container_of(encoder, struct vc4_hdmi_encoder, base.base); -} + HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_SW_RST); + udelay(1); + HDMI_WRITE(HDMI_M_CTL, 0); -/* VC4 HDMI connector KMS struct */ -struct vc4_hdmi_connector { - struct drm_connector base; + HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_ENABLE); - /* Since the connector is attached to just the one encoder, - * this is the reference to it so we can do the best_encoder() - * hook. - */ - struct drm_encoder *encoder; -}; + HDMI_WRITE(HDMI_SW_RESET_CONTROL, + VC4_HDMI_SW_RESET_HDMI | + VC4_HDMI_SW_RESET_FORMAT_DETECT); -static inline struct vc4_hdmi_connector * -to_vc4_hdmi_connector(struct drm_connector *connector) -{ - return container_of(connector, struct vc4_hdmi_connector, base); + HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0); } -static const struct debugfs_reg32 hdmi_regs[] = { - VC4_REG32(VC4_HDMI_CORE_REV), - VC4_REG32(VC4_HDMI_SW_RESET_CONTROL), - VC4_REG32(VC4_HDMI_HOTPLUG_INT), - VC4_REG32(VC4_HDMI_HOTPLUG), - VC4_REG32(VC4_HDMI_MAI_CHANNEL_MAP), - VC4_REG32(VC4_HDMI_MAI_CONFIG), - VC4_REG32(VC4_HDMI_MAI_FORMAT), - VC4_REG32(VC4_HDMI_AUDIO_PACKET_CONFIG), - VC4_REG32(VC4_HDMI_RAM_PACKET_CONFIG), - VC4_REG32(VC4_HDMI_HORZA), - VC4_REG32(VC4_HDMI_HORZB), - VC4_REG32(VC4_HDMI_FIFO_CTL), - VC4_REG32(VC4_HDMI_SCHEDULER_CONTROL), - VC4_REG32(VC4_HDMI_VERTA0), - VC4_REG32(VC4_HDMI_VERTA1), - VC4_REG32(VC4_HDMI_VERTB0), - VC4_REG32(VC4_HDMI_VERTB1), - VC4_REG32(VC4_HDMI_TX_PHY_RESET_CTL), - VC4_REG32(VC4_HDMI_TX_PHY_CTL0), - - VC4_REG32(VC4_HDMI_CEC_CNTRL_1), - VC4_REG32(VC4_HDMI_CEC_CNTRL_2), - VC4_REG32(VC4_HDMI_CEC_CNTRL_3), - VC4_REG32(VC4_HDMI_CEC_CNTRL_4), - VC4_REG32(VC4_HDMI_CEC_CNTRL_5), - VC4_REG32(VC4_HDMI_CPU_STATUS), - VC4_REG32(VC4_HDMI_CPU_MASK_STATUS), - - VC4_REG32(VC4_HDMI_CEC_RX_DATA_1), - VC4_REG32(VC4_HDMI_CEC_RX_DATA_2), - VC4_REG32(VC4_HDMI_CEC_RX_DATA_3), - VC4_REG32(VC4_HDMI_CEC_RX_DATA_4), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_1), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_2), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_3), - VC4_REG32(VC4_HDMI_CEC_TX_DATA_4), -}; - -static const struct debugfs_reg32 hd_regs[] = { - VC4_REG32(VC4_HD_M_CTL), - VC4_REG32(VC4_HD_MAI_CTL), - VC4_REG32(VC4_HD_MAI_THR), - VC4_REG32(VC4_HD_MAI_FMT), - VC4_REG32(VC4_HD_MAI_SMP), - VC4_REG32(VC4_HD_VID_CTL), - VC4_REG32(VC4_HD_CSC_CTL), - VC4_REG32(VC4_HD_FRAME_COUNT), -}; - -static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) +static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_hdmi *hdmi = vc4->hdmi; - struct drm_printer p = drm_seq_file_printer(m); + reset_control_reset(vc4_hdmi->reset); - drm_print_regset32(&p, &hdmi->hdmi_regset); - drm_print_regset32(&p, &hdmi->hd_regset); + HDMI_WRITE(HDMI_DVP_CTL, 0); - return 0; + HDMI_WRITE(HDMI_CLOCK_STOP, + HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL); } static enum drm_connector_status vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) { - struct drm_device *dev = connector->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); - if (vc4->hdmi->hpd_gpio) { - if (gpio_get_value_cansleep(vc4->hdmi->hpd_gpio) ^ - vc4->hdmi->hpd_active_low) + if (vc4_hdmi->hpd_gpio) { + if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^ + vc4_hdmi->hpd_active_low) return connector_status_connected; - cec_phys_addr_invalidate(vc4->hdmi->cec_adap); + cec_phys_addr_invalidate(vc4_hdmi->cec_adap); return connector_status_disconnected; } - if (drm_probe_ddc(vc4->hdmi->ddc)) + if (drm_probe_ddc(vc4_hdmi->ddc)) return connector_status_connected; - if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) + if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) return connector_status_connected; - cec_phys_addr_invalidate(vc4->hdmi->cec_adap); + cec_phys_addr_invalidate(vc4_hdmi->cec_adap); return connector_status_disconnected; } @@ -225,17 +149,13 @@ static void vc4_hdmi_connector_destroy(struct drm_connector *connector) static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) { - struct vc4_hdmi_connector *vc4_connector = - to_vc4_hdmi_connector(connector); - struct drm_encoder *encoder = vc4_connector->encoder; - struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); - struct drm_device *dev = connector->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); + struct vc4_hdmi_encoder *vc4_encoder = &vc4_hdmi->encoder; int ret = 0; struct edid *edid; - edid = drm_get_edid(connector, vc4->hdmi->ddc); - cec_s_phys_addr_from_edid(vc4->hdmi->cec_adap, edid); + edid = drm_get_edid(connector, vc4_hdmi->ddc); + cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); if (!edid) return -ENODEV; @@ -267,32 +187,23 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = .get_modes = vc4_hdmi_connector_get_modes, }; -static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, - struct drm_encoder *encoder, - struct i2c_adapter *ddc) +static int vc4_hdmi_connector_init(struct drm_device *dev, + struct vc4_hdmi *vc4_hdmi) { - struct drm_connector *connector; - struct vc4_hdmi_connector *hdmi_connector; + struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; int ret; - hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector), - GFP_KERNEL); - if (!hdmi_connector) - return ERR_PTR(-ENOMEM); - connector = &hdmi_connector->base; - - hdmi_connector->encoder = encoder; - drm_connector_init_with_ddc(dev, connector, &vc4_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA, - ddc); + vc4_hdmi->ddc); drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs); /* Create and attach TV margin props to this connector. */ ret = drm_mode_create_tv_margin_properties(dev); if (ret) - return ERR_PTR(ret); + return ret; drm_connector_attach_tv_margin_properties(connector); @@ -304,35 +215,37 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, drm_connector_attach_encoder(connector, encoder); - return connector; + return 0; } static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, enum hdmi_infoframe_type type) { - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); u32 packet_id = type - 0x80; - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, - HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id)); + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, + HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id)); - return wait_for(!(HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) & + return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) & BIT(packet_id)), 100); } static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, union hdmi_infoframe *frame) { - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); u32 packet_id = frame->any.type - 0x80; - u32 packet_reg = VC4_HDMI_RAM_PACKET(packet_id); + const struct vc4_hdmi_register *ram_packet_start = + &vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START]; + u32 packet_reg = ram_packet_start->offset + VC4_HDMI_PACKET_STRIDE * packet_id; + void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi, + ram_packet_start->reg); uint8_t buffer[VC4_HDMI_PACKET_STRIDE]; ssize_t len, i; int ret; - WARN_ONCE(!(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & + WARN_ONCE(!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE), "Packet RAM has to be on to store the packet."); @@ -347,23 +260,23 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, } for (i = 0; i < len; i += 7) { - HDMI_WRITE(packet_reg, - buffer[i + 0] << 0 | - buffer[i + 1] << 8 | - buffer[i + 2] << 16); + writel(buffer[i + 0] << 0 | + buffer[i + 1] << 8 | + buffer[i + 2] << 16, + base + packet_reg); packet_reg += 4; - HDMI_WRITE(packet_reg, - buffer[i + 3] << 0 | - buffer[i + 4] << 8 | - buffer[i + 5] << 16 | - buffer[i + 6] << 24); + writel(buffer[i + 3] << 0 | + buffer[i + 4] << 8 | + buffer[i + 5] << 16 | + buffer[i + 6] << 24, + base + packet_reg); packet_reg += 4; } - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, - HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) | BIT(packet_id)); - ret = wait_for((HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) & + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, + HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id)); + ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) & BIT(packet_id)), 100); if (ret) DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret); @@ -371,24 +284,24 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); - struct vc4_dev *vc4 = encoder->dev->dev_private; - struct vc4_hdmi *hdmi = vc4->hdmi; - struct drm_connector_state *cstate = hdmi->connector->state; + struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_connector_state *cstate = connector->state; struct drm_crtc *crtc = encoder->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; union hdmi_infoframe frame; int ret; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - hdmi->connector, mode); + connector, mode); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; } drm_hdmi_avi_infoframe_quant_range(&frame.avi, - hdmi->connector, mode, + connector, mode, vc4_encoder->limited_rgb_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); @@ -416,9 +329,7 @@ static void vc4_hdmi_set_spd_infoframe(struct drm_encoder *encoder) static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder) { - struct drm_device *drm = encoder->dev; - struct vc4_dev *vc4 = drm->dev_private; - struct vc4_hdmi *hdmi = vc4->hdmi; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); union hdmi_infoframe frame; int ret; @@ -427,45 +338,139 @@ static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder) frame.audio.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM; frame.audio.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM; frame.audio.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM; - frame.audio.channels = hdmi->audio.channels; + frame.audio.channels = vc4_hdmi->audio.channels; vc4_hdmi_write_infoframe(encoder, &frame); } static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + vc4_hdmi_set_avi_infoframe(encoder); vc4_hdmi_set_spd_infoframe(encoder); + /* + * If audio was streaming, then we need to reenabled the audio + * infoframe here during encoder_enable. + */ + if (vc4_hdmi->audio.streaming) + vc4_hdmi_set_audio_infoframe(encoder); } -static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_hdmi *hdmi = vc4->hdmi; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0); + + HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | + VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_CLRSYNC); + + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX); +} + +static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder) +{ + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); int ret; - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, 0); + if (vc4_hdmi->variant->phy_disable) + vc4_hdmi->variant->phy_disable(vc4_hdmi); - HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16); - HD_WRITE(VC4_HD_VID_CTL, - HD_READ(VC4_HD_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); - clk_disable_unprepare(hdmi->pixel_clock); + clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); + clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->pixel_clock); - ret = pm_runtime_put(&hdmi->pdev->dev); + ret = pm_runtime_put(&vc4_hdmi->pdev->dev); if (ret < 0) DRM_ERROR("Failed to release power domain: %d\n", ret); } -static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder) +{ +} + +static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) +{ + u32 csc_ctl; + + csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR, + VC4_HD_CSC_CTL_ORDER); + + if (enable) { + /* CEA VICs other than #1 requre limited range RGB + * output unless overridden by an AVI infoframe. + * Apply a colorspace conversion to squash 0-255 down + * to 16-235. The matrix here is: + * + * [ 0 0 0.8594 16] + * [ 0 0.8594 0 16] + * [ 0.8594 0 0 16] + * [ 0 0 0 1] + */ + csc_ctl |= VC4_HD_CSC_CTL_ENABLE; + csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC; + csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM, + VC4_HD_CSC_CTL_MODE); + + HDMI_WRITE(HDMI_CSC_12_11, (0x000 << 16) | 0x000); + HDMI_WRITE(HDMI_CSC_14_13, (0x100 << 16) | 0x6e0); + HDMI_WRITE(HDMI_CSC_22_21, (0x6e0 << 16) | 0x000); + HDMI_WRITE(HDMI_CSC_24_23, (0x100 << 16) | 0x000); + HDMI_WRITE(HDMI_CSC_32_31, (0x000 << 16) | 0x6e0); + HDMI_WRITE(HDMI_CSC_34_33, (0x100 << 16) | 0x000); + } + + /* The RGB order applies even when CSC is disabled. */ + HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); +} + +static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) +{ + u32 csc_ctl; + + csc_ctl = 0x07; /* RGB_CONVERT_MODE = custom matrix, || USE_RGB_TO_YCBCR */ + + if (enable) { + /* CEA VICs other than #1 requre limited range RGB + * output unless overridden by an AVI infoframe. + * Apply a colorspace conversion to squash 0-255 down + * to 16-235. The matrix here is: + * + * [ 0.8594 0 0 16] + * [ 0 0.8594 0 16] + * [ 0 0 0.8594 16] + * [ 0 0 0 1] + * Matrix is signed 2p13 fixed point, with signed 9p6 offsets + */ + HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x1b80); + HDMI_WRITE(HDMI_CSC_14_13, (0x0400 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_22_21, (0x1b80 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_24_23, (0x0400 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_34_33, (0x0400 << 16) | 0x1b80); + } else { + /* Still use the matrix for full range, but make it unity. + * Matrix is signed 2p13 fixed point, with signed 9p6 offsets + */ + HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x2000); + HDMI_WRITE(HDMI_CSC_14_13, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_22_21, (0x2000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_24_23, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000); + HDMI_WRITE(HDMI_CSC_34_33, (0x0000 << 16) | 0x2000); + } + + HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); +} + +static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode) { - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; - struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); - struct drm_device *dev = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_hdmi *hdmi = vc4->hdmi; - bool debug_dump_regs = false; bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; @@ -483,213 +488,285 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); - u32 csc_ctl; + + HDMI_WRITE(HDMI_HORZA, + (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) | + (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) | + VC4_SET_FIELD(mode->hdisplay * pixel_rep, + VC4_HDMI_HORZA_HAP)); + + HDMI_WRITE(HDMI_HORZB, + VC4_SET_FIELD((mode->htotal - + mode->hsync_end) * pixel_rep, + VC4_HDMI_HORZB_HBP) | + VC4_SET_FIELD((mode->hsync_end - + mode->hsync_start) * pixel_rep, + VC4_HDMI_HORZB_HSP) | + VC4_SET_FIELD((mode->hsync_start - + mode->hdisplay) * pixel_rep, + VC4_HDMI_HORZB_HFP)); + + HDMI_WRITE(HDMI_VERTA0, verta); + HDMI_WRITE(HDMI_VERTA1, verta); + + HDMI_WRITE(HDMI_VERTB0, vertb_even); + HDMI_WRITE(HDMI_VERTB1, vertb); +} +static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode) +{ + bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; + bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; + bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1; + u32 verta = (VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start, + VC5_HDMI_VERTA_VSP) | + VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay, + VC5_HDMI_VERTA_VFP) | + VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL)); + u32 vertb = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) | + VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, + VC4_HDMI_VERTB_VBP)); + u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) | + VC4_SET_FIELD(mode->crtc_vtotal - + mode->crtc_vsync_end - + interlaced, + VC4_HDMI_VERTB_VBP)); + + HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021); + HDMI_WRITE(HDMI_HORZA, + (vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) | + (hsync_pos ? VC5_HDMI_HORZA_HPOS : 0) | + VC4_SET_FIELD(mode->hdisplay * pixel_rep, + VC5_HDMI_HORZA_HAP) | + VC4_SET_FIELD((mode->hsync_start - + mode->hdisplay) * pixel_rep, + VC5_HDMI_HORZA_HFP)); + + HDMI_WRITE(HDMI_HORZB, + VC4_SET_FIELD((mode->htotal - + mode->hsync_end) * pixel_rep, + VC5_HDMI_HORZB_HBP) | + VC4_SET_FIELD((mode->hsync_end - + mode->hsync_start) * pixel_rep, + VC5_HDMI_HORZB_HSP)); + + HDMI_WRITE(HDMI_VERTA0, verta); + HDMI_WRITE(HDMI_VERTA1, verta); + + HDMI_WRITE(HDMI_VERTB0, vertb_even); + HDMI_WRITE(HDMI_VERTB1, vertb); + + HDMI_WRITE(HDMI_CLOCK_STOP, 0); +} + +static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) +{ + u32 drift; + int ret; + + drift = HDMI_READ(HDMI_FIFO_CTL); + drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK; + + HDMI_WRITE(HDMI_FIFO_CTL, + drift & ~VC4_HDMI_FIFO_CTL_RECENTER); + HDMI_WRITE(HDMI_FIFO_CTL, + drift | VC4_HDMI_FIFO_CTL_RECENTER); + usleep_range(1000, 1100); + HDMI_WRITE(HDMI_FIFO_CTL, + drift & ~VC4_HDMI_FIFO_CTL_RECENTER); + HDMI_WRITE(HDMI_FIFO_CTL, + drift | VC4_HDMI_FIFO_CTL_RECENTER); + + ret = wait_for(HDMI_READ(HDMI_FIFO_CTL) & + VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1); + WARN_ONCE(ret, "Timeout waiting for " + "VC4_HDMI_FIFO_CTL_RECENTER_DONE"); +} + +static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder) +{ + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long pixel_rate, hsm_rate; int ret; - ret = pm_runtime_get_sync(&hdmi->pdev->dev); + ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev); if (ret < 0) { DRM_ERROR("Failed to retain power domain: %d\n", ret); return; } - ret = clk_set_rate(hdmi->pixel_clock, - mode->clock * 1000 * - ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1)); + pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1); + ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate); if (ret) { DRM_ERROR("Failed to set pixel clock rate: %d\n", ret); return; } - ret = clk_prepare_enable(hdmi->pixel_clock); + ret = clk_prepare_enable(vc4_hdmi->pixel_clock); if (ret) { DRM_ERROR("Failed to turn on pixel clock: %d\n", ret); return; } - HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL, - VC4_HDMI_SW_RESET_HDMI | - VC4_HDMI_SW_RESET_FORMAT_DETECT); - - HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL, 0); - - /* PHY should be in reset, like - * vc4_hdmi_encoder_disable() does. + /* + * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must + * be faster than pixel clock, infinitesimally faster, tested in + * simulation. Otherwise, exact value is unimportant for HDMI + * operation." This conflicts with bcm2835's vc4 documentation, which + * states HSM's clock has to be at least 108% of the pixel clock. + * + * Real life tests reveal that vc4's firmware statement holds up, and + * users are able to use pixel clocks closer to HSM's, namely for + * 1920x1200@60Hz. So it was decided to have leave a 1% margin between + * both clocks. Which, for RPi0-3 implies a maximum pixel clock of + * 162MHz. + * + * Additionally, the AXI clock needs to be at least 25% of + * pixel clock, but HSM ends up being the limiting factor. */ - HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16); + hsm_rate = max_t(unsigned long, 120000000, (pixel_rate / 100) * 101); + ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate); + if (ret) { + DRM_ERROR("Failed to set HSM clock rate: %d\n", ret); + return; + } - HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0); + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + if (ret) { + DRM_ERROR("Failed to turn on HSM clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; + } - if (debug_dump_regs) { - struct drm_printer p = drm_info_printer(&hdmi->pdev->dev); + /* + * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup + * at 300MHz. + */ + ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, + (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000)); + if (ret) { + DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; + } - dev_info(&hdmi->pdev->dev, "HDMI regs before:\n"); - drm_print_regset32(&p, &hdmi->hdmi_regset); - drm_print_regset32(&p, &hdmi->hd_regset); + ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); + if (ret) { + DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; } - HD_WRITE(VC4_HD_VID_CTL, 0); + if (vc4_hdmi->variant->reset) + vc4_hdmi->variant->reset(vc4_hdmi); - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) | + if (vc4_hdmi->variant->phy_init) + vc4_hdmi->variant->phy_init(vc4_hdmi, mode); + + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT | VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS); - HDMI_WRITE(VC4_HDMI_HORZA, - (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) | - (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) | - VC4_SET_FIELD(mode->hdisplay * pixel_rep, - VC4_HDMI_HORZA_HAP)); - - HDMI_WRITE(VC4_HDMI_HORZB, - VC4_SET_FIELD((mode->htotal - - mode->hsync_end) * pixel_rep, - VC4_HDMI_HORZB_HBP) | - VC4_SET_FIELD((mode->hsync_end - - mode->hsync_start) * pixel_rep, - VC4_HDMI_HORZB_HSP) | - VC4_SET_FIELD((mode->hsync_start - - mode->hdisplay) * pixel_rep, - VC4_HDMI_HORZB_HFP)); - - HDMI_WRITE(VC4_HDMI_VERTA0, verta); - HDMI_WRITE(VC4_HDMI_VERTA1, verta); - - HDMI_WRITE(VC4_HDMI_VERTB0, vertb_even); - HDMI_WRITE(VC4_HDMI_VERTB1, vertb); - - HD_WRITE(VC4_HD_VID_CTL, - (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | - (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW)); + if (vc4_hdmi->variant->set_timings) + vc4_hdmi->variant->set_timings(vc4_hdmi, mode); +} - csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR, - VC4_HD_CSC_CTL_ORDER); +static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder) +{ + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); if (vc4_encoder->hdmi_monitor && - drm_default_rgb_quant_range(mode) == - HDMI_QUANTIZATION_RANGE_LIMITED) { - /* CEA VICs other than #1 requre limited range RGB - * output unless overridden by an AVI infoframe. - * Apply a colorspace conversion to squash 0-255 down - * to 16-235. The matrix here is: - * - * [ 0 0 0.8594 16] - * [ 0 0.8594 0 16] - * [ 0.8594 0 0 16] - * [ 0 0 0 1] - */ - csc_ctl |= VC4_HD_CSC_CTL_ENABLE; - csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC; - csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM, - VC4_HD_CSC_CTL_MODE); + drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED) { + if (vc4_hdmi->variant->csc_setup) + vc4_hdmi->variant->csc_setup(vc4_hdmi, true); - HD_WRITE(VC4_HD_CSC_12_11, (0x000 << 16) | 0x000); - HD_WRITE(VC4_HD_CSC_14_13, (0x100 << 16) | 0x6e0); - HD_WRITE(VC4_HD_CSC_22_21, (0x6e0 << 16) | 0x000); - HD_WRITE(VC4_HD_CSC_24_23, (0x100 << 16) | 0x000); - HD_WRITE(VC4_HD_CSC_32_31, (0x000 << 16) | 0x6e0); - HD_WRITE(VC4_HD_CSC_34_33, (0x100 << 16) | 0x000); vc4_encoder->limited_rgb_range = true; } else { + if (vc4_hdmi->variant->csc_setup) + vc4_hdmi->variant->csc_setup(vc4_hdmi, false); + vc4_encoder->limited_rgb_range = false; } - /* The RGB order applies even when CSC is disabled. */ - HD_WRITE(VC4_HD_CSC_CTL, csc_ctl); - - HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); + HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); +} - if (debug_dump_regs) { - struct drm_printer p = drm_info_printer(&hdmi->pdev->dev); +static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder) +{ + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); + bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; + bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; + int ret; - dev_info(&hdmi->pdev->dev, "HDMI regs after:\n"); - drm_print_regset32(&p, &hdmi->hdmi_regset); - drm_print_regset32(&p, &hdmi->hd_regset); - } + HDMI_WRITE(HDMI_VID_CTL, + VC4_HD_VID_CTL_ENABLE | + VC4_HD_VID_CTL_UNDERFLOW_ENABLE | + VC4_HD_VID_CTL_FRAME_COUNTER_RESET | + (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | + (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW)); - HD_WRITE(VC4_HD_VID_CTL, - HD_READ(VC4_HD_VID_CTL) | - VC4_HD_VID_CTL_ENABLE | - VC4_HD_VID_CTL_UNDERFLOW_ENABLE | - VC4_HD_VID_CTL_FRAME_COUNTER_RESET); + HDMI_WRITE(HDMI_VID_CTL, + HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_BLANKPIX); if (vc4_encoder->hdmi_monitor) { - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) | + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); - ret = wait_for(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + ret = wait_for(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000); WARN_ONCE(ret, "Timeout waiting for " "VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n"); } else { - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, - HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, + HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~(VC4_HDMI_RAM_PACKET_ENABLE)); - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) & ~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); - ret = wait_for(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + ret = wait_for(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000); WARN_ONCE(ret, "Timeout waiting for " "!VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n"); } if (vc4_encoder->hdmi_monitor) { - u32 drift; - - WARN_ON(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) & + WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE)); - HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL, - HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) | + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, + HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT); - HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, + HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, VC4_HDMI_RAM_PACKET_ENABLE); vc4_hdmi_set_infoframes(encoder); - - drift = HDMI_READ(VC4_HDMI_FIFO_CTL); - drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK; - - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift & ~VC4_HDMI_FIFO_CTL_RECENTER); - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift | VC4_HDMI_FIFO_CTL_RECENTER); - usleep_range(1000, 1100); - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift & ~VC4_HDMI_FIFO_CTL_RECENTER); - HDMI_WRITE(VC4_HDMI_FIFO_CTL, - drift | VC4_HDMI_FIFO_CTL_RECENTER); - - ret = wait_for(HDMI_READ(VC4_HDMI_FIFO_CTL) & - VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1); - WARN_ONCE(ret, "Timeout waiting for " - "VC4_HDMI_FIFO_CTL_RECENTER_DONE"); } + + vc4_hdmi_recenter_fifo(vc4_hdmi); +} + +static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) +{ } static enum drm_mode_status -vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, +vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) { - /* - * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must - * be faster than pixel clock, infinitesimally faster, tested in - * simulation. Otherwise, exact value is unimportant for HDMI - * operation." This conflicts with bcm2835's vc4 documentation, which - * states HSM's clock has to be at least 108% of the pixel clock. - * - * Real life tests reveal that vc4's firmware statement holds up, and - * users are able to use pixel clocks closer to HSM's, namely for - * 1920x1200@60Hz. So it was decided to have leave a 1% margin between - * both clocks. Which, for RPi0-3 implies a maximum pixel clock of - * 162MHz. - * - * Additionally, the AXI clock needs to be at least 25% of - * pixel clock, but HSM ends up being the limiting factor. - */ - if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock) return MODE_CLOCK_HIGH; return MODE_OK; @@ -701,34 +778,54 @@ static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = { .enable = vc4_hdmi_encoder_enable, }; +static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask) +{ + int i; + u32 channel_map = 0; + + for (i = 0; i < 8; i++) { + if (channel_mask & BIT(i)) + channel_map |= i << (3 * i); + } + return channel_map; +} + +static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask) +{ + int i; + u32 channel_map = 0; + + for (i = 0; i < 8; i++) { + if (channel_mask & BIT(i)) + channel_map |= i << (4 * i); + } + return channel_map; +} + /* HDMI audio codec callbacks */ -static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *hdmi) +static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi) { - struct drm_device *drm = hdmi->encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); - u32 hsm_clock = clk_get_rate(hdmi->hsm_clock); + u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock); unsigned long n, m; - rational_best_approximation(hsm_clock, hdmi->audio.samplerate, + rational_best_approximation(hsm_clock, vc4_hdmi->audio.samplerate, VC4_HD_MAI_SMP_N_MASK >> VC4_HD_MAI_SMP_N_SHIFT, (VC4_HD_MAI_SMP_M_MASK >> VC4_HD_MAI_SMP_M_SHIFT) + 1, &n, &m); - HD_WRITE(VC4_HD_MAI_SMP, - VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) | - VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M)); + HDMI_WRITE(HDMI_MAI_SMP, + VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) | + VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M)); } -static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi) +static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi) { - struct drm_encoder *encoder = hdmi->encoder; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; struct drm_crtc *crtc = encoder->crtc; - struct drm_device *drm = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); const struct drm_display_mode *mode = &crtc->state->adjusted_mode; - u32 samplerate = hdmi->audio.samplerate; + u32 samplerate = vc4_hdmi->audio.samplerate; u32 n, cts; u64 tmp; @@ -737,7 +834,7 @@ static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi) do_div(tmp, 128 * samplerate); cts = tmp; - HDMI_WRITE(VC4_HDMI_CRP_CFG, + HDMI_WRITE(HDMI_CRP_CFG, VC4_HDMI_CRP_CFG_EXTERNAL_CTS_EN | VC4_SET_FIELD(n, VC4_HDMI_CRP_CFG_N)); @@ -746,8 +843,8 @@ static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi) * providing a CTS_1 value. The two CTS values are alternated * between based on the period fields */ - HDMI_WRITE(VC4_HDMI_CTS_0, cts); - HDMI_WRITE(VC4_HDMI_CTS_1, cts); + HDMI_WRITE(HDMI_CTS_0, cts); + HDMI_WRITE(HDMI_CTS_1, cts); } static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) @@ -760,26 +857,25 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) static int vc4_hdmi_audio_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); - struct drm_encoder *encoder = hdmi->encoder; - struct vc4_dev *vc4 = to_vc4_dev(encoder->dev); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct drm_connector *connector = &vc4_hdmi->connector; int ret; - if (hdmi->audio.substream && hdmi->audio.substream != substream) + if (vc4_hdmi->audio.substream && vc4_hdmi->audio.substream != substream) return -EINVAL; - hdmi->audio.substream = substream; + vc4_hdmi->audio.substream = substream; /* * If the HDMI encoder hasn't probed, or the encoder is * currently in DVI mode, treat the codec dai as missing. */ - if (!encoder->crtc || !(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & + if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE)) return -ENODEV; - ret = snd_pcm_hw_constraint_eld(substream->runtime, - hdmi->connector->eld); + ret = snd_pcm_hw_constraint_eld(substream->runtime, connector->eld); if (ret) return ret; @@ -791,34 +887,33 @@ static int vc4_hdmi_audio_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return 0; } -static void vc4_hdmi_audio_reset(struct vc4_hdmi *hdmi) +static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi) { - struct drm_encoder *encoder = hdmi->encoder; - struct drm_device *drm = encoder->dev; - struct device *dev = &hdmi->pdev->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct device *dev = &vc4_hdmi->pdev->dev; int ret; + vc4_hdmi->audio.streaming = false; ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO); if (ret) dev_err(dev, "Failed to stop audio infoframe: %d\n", ret); - HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_RESET); - HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_ERRORF); - HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_FLUSH); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_ERRORF); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH); } static void vc4_hdmi_audio_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); - if (substream != hdmi->audio.substream) + if (substream != vc4_hdmi->audio.substream) return; - vc4_hdmi_audio_reset(hdmi); + vc4_hdmi_audio_reset(vc4_hdmi); - hdmi->audio.substream = NULL; + vc4_hdmi->audio.substream = NULL; } /* HDMI audio codec callbacks */ @@ -826,72 +921,68 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); - struct drm_encoder *encoder = hdmi->encoder; - struct drm_device *drm = encoder->dev; - struct device *dev = &hdmi->pdev->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct device *dev = &vc4_hdmi->pdev->dev; u32 audio_packet_config, channel_mask; - u32 channel_map, i; + u32 channel_map; - if (substream != hdmi->audio.substream) + if (substream != vc4_hdmi->audio.substream) return -EINVAL; dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__, params_rate(params), params_width(params), params_channels(params)); - hdmi->audio.channels = params_channels(params); - hdmi->audio.samplerate = params_rate(params); + vc4_hdmi->audio.channels = params_channels(params); + vc4_hdmi->audio.samplerate = params_rate(params); - HD_WRITE(VC4_HD_MAI_CTL, - VC4_HD_MAI_CTL_RESET | - VC4_HD_MAI_CTL_FLUSH | - VC4_HD_MAI_CTL_DLATE | - VC4_HD_MAI_CTL_ERRORE | - VC4_HD_MAI_CTL_ERRORF); + HDMI_WRITE(HDMI_MAI_CTL, + VC4_HD_MAI_CTL_RESET | + VC4_HD_MAI_CTL_FLUSH | + VC4_HD_MAI_CTL_DLATE | + VC4_HD_MAI_CTL_ERRORE | + VC4_HD_MAI_CTL_ERRORF); - vc4_hdmi_audio_set_mai_clock(hdmi); + vc4_hdmi_audio_set_mai_clock(vc4_hdmi); + /* The B frame identifier should match the value used by alsa-lib (8) */ audio_packet_config = VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_SAMPLE_FLAT | VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS | - VC4_SET_FIELD(0xf, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER); + VC4_SET_FIELD(0x8, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER); - channel_mask = GENMASK(hdmi->audio.channels - 1, 0); + channel_mask = GENMASK(vc4_hdmi->audio.channels - 1, 0); audio_packet_config |= VC4_SET_FIELD(channel_mask, VC4_HDMI_AUDIO_PACKET_CEA_MASK); /* Set the MAI threshold. This logic mimics the firmware's. */ - if (hdmi->audio.samplerate > 96000) { - HD_WRITE(VC4_HD_MAI_THR, - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); - } else if (hdmi->audio.samplerate > 48000) { - HD_WRITE(VC4_HD_MAI_THR, - VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); + if (vc4_hdmi->audio.samplerate > 96000) { + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); + } else if (vc4_hdmi->audio.samplerate > 48000) { + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW)); } else { - HD_WRITE(VC4_HD_MAI_THR, - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW)); + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW)); } - HDMI_WRITE(VC4_HDMI_MAI_CONFIG, + HDMI_WRITE(HDMI_MAI_CONFIG, VC4_HDMI_MAI_CONFIG_BIT_REVERSE | VC4_SET_FIELD(channel_mask, VC4_HDMI_MAI_CHANNEL_MASK)); - channel_map = 0; - for (i = 0; i < 8; i++) { - if (channel_mask & BIT(i)) - channel_map |= i << (3 * i); - } + channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask); + HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map); + HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config); + vc4_hdmi_set_n_cts(vc4_hdmi); - HDMI_WRITE(VC4_HDMI_MAI_CHANNEL_MAP, channel_map); - HDMI_WRITE(VC4_HDMI_AUDIO_PACKET_CONFIG, audio_packet_config); - vc4_hdmi_set_n_cts(hdmi); + vc4_hdmi_set_audio_infoframe(encoder); return 0; } @@ -899,30 +990,31 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream, static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); - struct drm_encoder *encoder = hdmi->encoder; - struct drm_device *drm = encoder->dev; - struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); switch (cmd) { case SNDRV_PCM_TRIGGER_START: - vc4_hdmi_set_audio_infoframe(encoder); - HDMI_WRITE(VC4_HDMI_TX_PHY_CTL0, - HDMI_READ(VC4_HDMI_TX_PHY_CTL0) & - ~VC4_HDMI_TX_PHY_RNG_PWRDN); - HD_WRITE(VC4_HD_MAI_CTL, - VC4_SET_FIELD(hdmi->audio.channels, - VC4_HD_MAI_CTL_CHNUM) | - VC4_HD_MAI_CTL_ENABLE); + vc4_hdmi->audio.streaming = true; + + if (vc4_hdmi->variant->phy_rng_enable) + vc4_hdmi->variant->phy_rng_enable(vc4_hdmi); + + HDMI_WRITE(HDMI_MAI_CTL, + VC4_SET_FIELD(vc4_hdmi->audio.channels, + VC4_HD_MAI_CTL_CHNUM) | + VC4_HD_MAI_CTL_ENABLE); break; case SNDRV_PCM_TRIGGER_STOP: - HD_WRITE(VC4_HD_MAI_CTL, - VC4_HD_MAI_CTL_DLATE | - VC4_HD_MAI_CTL_ERRORE | - VC4_HD_MAI_CTL_ERRORF); - HDMI_WRITE(VC4_HDMI_TX_PHY_CTL0, - HDMI_READ(VC4_HDMI_TX_PHY_CTL0) | - VC4_HDMI_TX_PHY_RNG_PWRDN); + HDMI_WRITE(HDMI_MAI_CTL, + VC4_HD_MAI_CTL_DLATE | + VC4_HD_MAI_CTL_ERRORE | + VC4_HD_MAI_CTL_ERRORF); + + if (vc4_hdmi->variant->phy_rng_disable) + vc4_hdmi->variant->phy_rng_disable(vc4_hdmi); + + vc4_hdmi->audio.streaming = false; + break; default: break; @@ -943,10 +1035,11 @@ static int vc4_hdmi_audio_eld_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct vc4_hdmi *hdmi = snd_component_to_hdmi(component); + struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component); + struct drm_connector *connector = &vc4_hdmi->connector; uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; - uinfo->count = sizeof(hdmi->connector->eld); + uinfo->count = sizeof(connector->eld); return 0; } @@ -955,10 +1048,11 @@ static int vc4_hdmi_audio_eld_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); - struct vc4_hdmi *hdmi = snd_component_to_hdmi(component); + struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component); + struct drm_connector *connector = &vc4_hdmi->connector; - memcpy(ucontrol->value.bytes.data, hdmi->connector->eld, - sizeof(hdmi->connector->eld)); + memcpy(ucontrol->value.bytes.data, connector->eld, + sizeof(connector->eld)); return 0; } @@ -983,6 +1077,7 @@ static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = { }; static const struct snd_soc_component_driver vc4_hdmi_audio_component_drv = { + .name = "vc4-hdmi-codec-dai-component", .controls = vc4_hdmi_audio_controls, .num_controls = ARRAY_SIZE(vc4_hdmi_audio_controls), .dapm_widgets = vc4_hdmi_audio_widgets, @@ -1023,9 +1118,9 @@ static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = { static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai) { - struct vc4_hdmi *hdmi = dai_to_hdmi(dai); + struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai); - snd_soc_dai_init_dma_data(dai, &hdmi->audio.dma_data, NULL); + snd_soc_dai_init_dma_data(dai, &vc4_hdmi->audio.dma_data, NULL); return 0; } @@ -1051,12 +1146,15 @@ static const struct snd_dmaengine_pcm_config pcm_conf = { .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, }; -static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) +static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) { - struct snd_soc_dai_link *dai_link = &hdmi->audio.link; - struct snd_soc_card *card = &hdmi->audio.card; - struct device *dev = &hdmi->pdev->dev; + const struct vc4_hdmi_register *mai_data = + &vc4_hdmi->variant->registers[HDMI_MAI_DATA]; + struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link; + struct snd_soc_card *card = &vc4_hdmi->audio.card; + struct device *dev = &vc4_hdmi->pdev->dev; const __be32 *addr; + int index; int ret; if (!of_find_property(dev->of_node, "dmas", NULL)) { @@ -1065,6 +1163,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) return 0; } + if (mai_data->reg != VC4_HD) { + WARN_ONCE(true, "MAI isn't in the HD block\n"); + return -EINVAL; + } + /* * Get the physical address of VC4_HD_MAI_DATA. We need to retrieve * the bus address specified in the DT, because the physical address @@ -1072,10 +1175,16 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) * for DMA transfers. * This VC/MMU should probably be exposed to avoid this kind of hacks. */ - addr = of_get_address(dev->of_node, 1, NULL, NULL); - hdmi->audio.dma_data.addr = be32_to_cpup(addr) + VC4_HD_MAI_DATA; - hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - hdmi->audio.dma_data.maxburst = 2; + index = of_property_match_string(dev->of_node, "reg-names", "hd"); + /* Before BCM2711, we don't have a named register range */ + if (index < 0) + index = 1; + + addr = of_get_address(dev->of_node, index, NULL, NULL); + + vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset; + vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + vc4_hdmi->audio.dma_data.maxburst = 2; ret = devm_snd_dmaengine_pcm_register(dev, &pcm_conf, 0); if (ret) { @@ -1098,9 +1207,9 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) return ret; } - dai_link->cpus = &hdmi->audio.cpu; - dai_link->codecs = &hdmi->audio.codec; - dai_link->platforms = &hdmi->audio.platform; + dai_link->cpus = &vc4_hdmi->audio.cpu; + dai_link->codecs = &vc4_hdmi->audio.codec; + dai_link->platforms = &vc4_hdmi->audio.platform; dai_link->num_cpus = 1; dai_link->num_codecs = 1; @@ -1115,8 +1224,9 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) card->dai_link = dai_link; card->num_links = 1; - card->name = "vc4-hdmi"; + card->name = vc4_hdmi->variant->card_name; card->dev = dev; + card->owner = THIS_MODULE; /* * Be careful, snd_soc_register_card() calls dev_set_drvdata() and @@ -1125,7 +1235,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) * now stored in card->drvdata and should be retrieved with * snd_soc_card_get_drvdata() if needed. */ - snd_soc_card_set_drvdata(card, hdmi); + snd_soc_card_set_drvdata(card, vc4_hdmi); ret = devm_snd_soc_register_card(dev, card); if (ret) dev_err(dev, "Could not register sound card: %d\n", ret); @@ -1137,35 +1247,35 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) #ifdef CONFIG_DRM_VC4_HDMI_CEC static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv) { - struct vc4_dev *vc4 = priv; - struct vc4_hdmi *hdmi = vc4->hdmi; - - if (hdmi->cec_irq_was_rx) { - if (hdmi->cec_rx_msg.len) - cec_received_msg(hdmi->cec_adap, &hdmi->cec_rx_msg); - } else if (hdmi->cec_tx_ok) { - cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_OK, + struct vc4_hdmi *vc4_hdmi = priv; + + if (vc4_hdmi->cec_irq_was_rx) { + if (vc4_hdmi->cec_rx_msg.len) + cec_received_msg(vc4_hdmi->cec_adap, + &vc4_hdmi->cec_rx_msg); + } else if (vc4_hdmi->cec_tx_ok) { + cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); } else { /* * This CEC implementation makes 1 retry, so if we * get a NACK, then that means it made 2 attempts. */ - cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_NACK, + cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK, 0, 2, 0, 0); } return IRQ_HANDLED; } -static void vc4_cec_read_msg(struct vc4_dev *vc4, u32 cntrl1) +static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) { - struct cec_msg *msg = &vc4->hdmi->cec_rx_msg; + struct cec_msg *msg = &vc4_hdmi->cec_rx_msg; unsigned int i; msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >> VC4_HDMI_CEC_REC_WRD_CNT_SHIFT); for (i = 0; i < msg->len; i += 4) { - u32 val = HDMI_READ(VC4_HDMI_CEC_RX_DATA_1 + i); + u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i); msg->msg[i] = val & 0xff; msg->msg[i + 1] = (val >> 8) & 0xff; @@ -1176,38 +1286,37 @@ static void vc4_cec_read_msg(struct vc4_dev *vc4, u32 cntrl1) static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) { - struct vc4_dev *vc4 = priv; - struct vc4_hdmi *hdmi = vc4->hdmi; - u32 stat = HDMI_READ(VC4_HDMI_CPU_STATUS); + struct vc4_hdmi *vc4_hdmi = priv; + u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS); u32 cntrl1, cntrl5; if (!(stat & VC4_HDMI_CPU_CEC)) return IRQ_NONE; - hdmi->cec_rx_msg.len = 0; - cntrl1 = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); - cntrl5 = HDMI_READ(VC4_HDMI_CEC_CNTRL_5); - hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT; - if (hdmi->cec_irq_was_rx) { - vc4_cec_read_msg(vc4, cntrl1); + vc4_hdmi->cec_rx_msg.len = 0; + cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); + cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5); + vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT; + if (vc4_hdmi->cec_irq_was_rx) { + vc4_cec_read_msg(vc4_hdmi, cntrl1); cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1); + HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF; } else { - hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; + vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; } - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1); - HDMI_WRITE(VC4_HDMI_CPU_CLEAR, VC4_HDMI_CPU_CEC); + HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1); + HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC); return IRQ_WAKE_THREAD; } static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) { - struct vc4_dev *vc4 = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); /* clock period in microseconds */ const u32 usecs = 1000000 / CEC_CLOCK_FREQ; - u32 val = HDMI_READ(VC4_HDMI_CEC_CNTRL_5); + u32 val = HDMI_READ(HDMI_CEC_CNTRL_5); val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET | VC4_HDMI_CEC_CNT_TO_4700_US_MASK | @@ -1216,30 +1325,30 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) ((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT); if (enable) { - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val | + HDMI_WRITE(HDMI_CEC_CNTRL_5, val | VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_2, - ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | - ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | - ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | - ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | - ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_3, - ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | - ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | - ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | - ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_4, - ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | - ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | - ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | - ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); - - HDMI_WRITE(VC4_HDMI_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); + HDMI_WRITE(HDMI_CEC_CNTRL_5, val); + HDMI_WRITE(HDMI_CEC_CNTRL_2, + ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | + ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | + ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | + ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | + ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_3, + ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | + ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | + ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | + ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_4, + ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | + ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | + ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | + ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); + + HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); } else { - HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, VC4_HDMI_CPU_CEC); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val | + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); + HDMI_WRITE(HDMI_CEC_CNTRL_5, val | VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); } return 0; @@ -1247,10 +1356,10 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) { - struct vc4_dev *vc4 = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, - (HDMI_READ(VC4_HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) | + HDMI_WRITE(HDMI_CEC_CNTRL_1, + (HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) | (log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT); return 0; } @@ -1258,25 +1367,25 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { - struct vc4_dev *vc4 = cec_get_drvdata(adap); + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); u32 val; unsigned int i; for (i = 0; i < msg->len; i += 4) - HDMI_WRITE(VC4_HDMI_CEC_TX_DATA_1 + i, + HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i, (msg->msg[i]) | (msg->msg[i + 1] << 8) | (msg->msg[i + 2] << 16) | (msg->msg[i + 3] << 24)); - val = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); + val = HDMI_READ(HDMI_CEC_CNTRL_1); val &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val); + HDMI_WRITE(HDMI_CEC_CNTRL_1, val); val &= ~VC4_HDMI_CEC_MESSAGE_LENGTH_MASK; val |= (msg->len - 1) << VC4_HDMI_CEC_MESSAGE_LENGTH_SHIFT; val |= VC4_HDMI_CEC_START_XMIT_BEGIN; - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val); + HDMI_WRITE(HDMI_CEC_CNTRL_1, val); return 0; } @@ -1285,61 +1394,275 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { .adap_log_addr = vc4_hdmi_cec_adap_log_addr, .adap_transmit = vc4_hdmi_cec_adap_transmit, }; -#endif -static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) +static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) { -#ifdef CONFIG_DRM_VC4_HDMI_CEC struct cec_connector_info conn_info; -#endif - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = drm->dev_private; - struct vc4_hdmi *hdmi; - struct vc4_hdmi_encoder *vc4_hdmi_encoder; - struct device_node *ddc_node; + struct platform_device *pdev = vc4_hdmi->pdev; u32 value; int ret; - hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) + if (!vc4_hdmi->variant->cec_available) + return 0; + + vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, + vc4_hdmi, "vc4", + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, 1); + ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap); + if (ret < 0) + return ret; + + cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); + cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); + + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff); + value = HDMI_READ(HDMI_CEC_CNTRL_1); + value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; + /* + * Set the logical address to Unregistered and set the clock + * divider: the hsm_clock rate and this divider setting will + * give a 40 kHz CEC clock. + */ + value |= VC4_HDMI_CEC_ADDR_MASK | + (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT); + HDMI_WRITE(HDMI_CEC_CNTRL_1, value); + ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0), + vc4_cec_irq_handler, + vc4_cec_irq_handler_thread, 0, + "vc4 hdmi cec", vc4_hdmi); + if (ret) + goto err_delete_cec_adap; + + ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); + if (ret < 0) + goto err_delete_cec_adap; + + return 0; + +err_delete_cec_adap: + cec_delete_adapter(vc4_hdmi->cec_adap); + + return ret; +} + +static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) +{ + cec_unregister_adapter(vc4_hdmi->cec_adap); +} +#else +static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) +{ + return 0; +} + +static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) {}; + +#endif + +static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi, + struct debugfs_regset32 *regset, + enum vc4_hdmi_regs reg) +{ + const struct vc4_hdmi_variant *variant = vc4_hdmi->variant; + struct debugfs_reg32 *regs, *new_regs; + unsigned int count = 0; + unsigned int i; + + regs = kcalloc(variant->num_registers, sizeof(*regs), + GFP_KERNEL); + if (!regs) return -ENOMEM; - vc4_hdmi_encoder = devm_kzalloc(dev, sizeof(*vc4_hdmi_encoder), - GFP_KERNEL); - if (!vc4_hdmi_encoder) + for (i = 0; i < variant->num_registers; i++) { + const struct vc4_hdmi_register *field = &variant->registers[i]; + + if (field->reg != reg) + continue; + + regs[count].name = field->name; + regs[count].offset = field->offset; + count++; + } + + new_regs = krealloc(regs, count * sizeof(*regs), GFP_KERNEL); + if (!new_regs) return -ENOMEM; - vc4_hdmi_encoder->base.type = VC4_ENCODER_TYPE_HDMI; - hdmi->encoder = &vc4_hdmi_encoder->base.base; - - hdmi->pdev = pdev; - hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0); - if (IS_ERR(hdmi->hdmicore_regs)) - return PTR_ERR(hdmi->hdmicore_regs); - - hdmi->hd_regs = vc4_ioremap_regs(pdev, 1); - if (IS_ERR(hdmi->hd_regs)) - return PTR_ERR(hdmi->hd_regs); - - hdmi->hdmi_regset.base = hdmi->hdmicore_regs; - hdmi->hdmi_regset.regs = hdmi_regs; - hdmi->hdmi_regset.nregs = ARRAY_SIZE(hdmi_regs); - hdmi->hd_regset.base = hdmi->hd_regs; - hdmi->hd_regset.regs = hd_regs; - hdmi->hd_regset.nregs = ARRAY_SIZE(hd_regs); - - hdmi->pixel_clock = devm_clk_get(dev, "pixel"); - if (IS_ERR(hdmi->pixel_clock)) { - ret = PTR_ERR(hdmi->pixel_clock); + + regset->base = __vc4_hdmi_get_field_base(vc4_hdmi, reg); + regset->regs = new_regs; + regset->nregs = count; + + return 0; +} + +static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) +{ + struct platform_device *pdev = vc4_hdmi->pdev; + struct device *dev = &pdev->dev; + int ret; + + vc4_hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0); + if (IS_ERR(vc4_hdmi->hdmicore_regs)) + return PTR_ERR(vc4_hdmi->hdmicore_regs); + + vc4_hdmi->hd_regs = vc4_ioremap_regs(pdev, 1); + if (IS_ERR(vc4_hdmi->hd_regs)) + return PTR_ERR(vc4_hdmi->hd_regs); + + ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD); + if (ret) + return ret; + + ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI); + if (ret) + return ret; + + vc4_hdmi->pixel_clock = devm_clk_get(dev, "pixel"); + if (IS_ERR(vc4_hdmi->pixel_clock)) { + ret = PTR_ERR(vc4_hdmi->pixel_clock); if (ret != -EPROBE_DEFER) DRM_ERROR("Failed to get pixel clock\n"); return ret; } - hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); - if (IS_ERR(hdmi->hsm_clock)) { + + vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_clock)) { DRM_ERROR("Failed to get HDMI state machine clock\n"); - return PTR_ERR(hdmi->hsm_clock); + return PTR_ERR(vc4_hdmi->hsm_clock); } + vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock; + + return 0; +} + +static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) +{ + struct platform_device *pdev = vc4_hdmi->pdev; + struct device *dev = &pdev->dev; + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi"); + if (!res) + return -ENODEV; + + vc4_hdmi->hdmicore_regs = devm_ioremap(dev, res->start, + resource_size(res)); + if (!vc4_hdmi->hdmicore_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hd"); + if (!res) + return -ENODEV; + + vc4_hdmi->hd_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->hd_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cec"); + if (!res) + return -ENODEV; + + vc4_hdmi->cec_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->cec_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csc"); + if (!res) + return -ENODEV; + + vc4_hdmi->csc_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->csc_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvp"); + if (!res) + return -ENODEV; + + vc4_hdmi->dvp_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->dvp_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + if (!res) + return -ENODEV; + + vc4_hdmi->phy_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->phy_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "packet"); + if (!res) + return -ENODEV; + + vc4_hdmi->ram_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->ram_regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rm"); + if (!res) + return -ENODEV; + + vc4_hdmi->rm_regs = devm_ioremap(dev, res->start, resource_size(res)); + if (!vc4_hdmi->rm_regs) + return -ENOMEM; + + vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_clock)) { + DRM_ERROR("Failed to get HDMI state machine clock\n"); + return PTR_ERR(vc4_hdmi->hsm_clock); + } + + vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb"); + if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) { + DRM_ERROR("Failed to get pixel bvb clock\n"); + return PTR_ERR(vc4_hdmi->pixel_bvb_clock); + } + + vc4_hdmi->audio_clock = devm_clk_get(dev, "audio"); + if (IS_ERR(vc4_hdmi->audio_clock)) { + DRM_ERROR("Failed to get audio clock\n"); + return PTR_ERR(vc4_hdmi->audio_clock); + } + + vc4_hdmi->reset = devm_reset_control_get(dev, NULL); + if (IS_ERR(vc4_hdmi->reset)) { + DRM_ERROR("Failed to get HDMI reset line\n"); + return PTR_ERR(vc4_hdmi->reset); + } + + return 0; +} + +static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) +{ + const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_hdmi *vc4_hdmi; + struct drm_encoder *encoder; + struct device_node *ddc_node; + u32 value; + int ret; + + vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL); + if (!vc4_hdmi) + return -ENOMEM; + + dev_set_drvdata(dev, vc4_hdmi); + encoder = &vc4_hdmi->encoder.base.base; + vc4_hdmi->encoder.base.type = variant->encoder_type; + vc4_hdmi->encoder.base.pre_crtc_configure = vc4_hdmi_encoder_pre_crtc_configure; + vc4_hdmi->encoder.base.pre_crtc_enable = vc4_hdmi_encoder_pre_crtc_enable; + vc4_hdmi->encoder.base.post_crtc_enable = vc4_hdmi_encoder_post_crtc_enable; + vc4_hdmi->encoder.base.post_crtc_disable = vc4_hdmi_encoder_post_crtc_disable; + vc4_hdmi->encoder.base.post_crtc_powerdown = vc4_hdmi_encoder_post_crtc_powerdown; + vc4_hdmi->pdev = pdev; + vc4_hdmi->variant = variant; + + ret = variant->init_resources(vc4_hdmi); + if (ret) + return ret; ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); if (!ddc_node) { @@ -1347,123 +1670,62 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) return -ENODEV; } - hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); + vc4_hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); - if (!hdmi->ddc) { + if (!vc4_hdmi->ddc) { DRM_DEBUG("Failed to get ddc i2c adapter by node\n"); return -EPROBE_DEFER; } - /* This is the rate that is set by the firmware. The number - * needs to be a bit higher than the pixel clock rate - * (generally 148.5Mhz). - */ - ret = clk_set_rate(hdmi->hsm_clock, HSM_CLOCK_FREQ); - if (ret) { - DRM_ERROR("Failed to set HSM clock rate: %d\n", ret); - goto err_put_i2c; - } - - ret = clk_prepare_enable(hdmi->hsm_clock); - if (ret) { - DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n", - ret); - goto err_put_i2c; - } - /* Only use the GPIO HPD pin if present in the DT, otherwise * we'll use the HDMI core's register. */ if (of_find_property(dev->of_node, "hpd-gpios", &value)) { enum of_gpio_flags hpd_gpio_flags; - hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node, - "hpd-gpios", 0, - &hpd_gpio_flags); - if (hdmi->hpd_gpio < 0) { - ret = hdmi->hpd_gpio; + vc4_hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node, + "hpd-gpios", 0, + &hpd_gpio_flags); + if (vc4_hdmi->hpd_gpio < 0) { + ret = vc4_hdmi->hpd_gpio; goto err_unprepare_hsm; } - hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW; + vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW; } - vc4->hdmi = hdmi; - - /* HDMI core must be enabled. */ - if (!(HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE)) { - HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST); - udelay(1); - HD_WRITE(VC4_HD_M_CTL, 0); - - HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_ENABLE); - } pm_runtime_enable(dev); - drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs); + drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs); - hdmi->connector = - vc4_hdmi_connector_init(drm, hdmi->encoder, hdmi->ddc); - if (IS_ERR(hdmi->connector)) { - ret = PTR_ERR(hdmi->connector); + ret = vc4_hdmi_connector_init(drm, vc4_hdmi); + if (ret) goto err_destroy_encoder; - } -#ifdef CONFIG_DRM_VC4_HDMI_CEC - hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, - vc4, "vc4", - CEC_CAP_DEFAULTS | - CEC_CAP_CONNECTOR_INFO, 1); - ret = PTR_ERR_OR_ZERO(hdmi->cec_adap); - if (ret < 0) - goto err_destroy_conn; - cec_fill_conn_info_from_drm(&conn_info, hdmi->connector); - cec_s_conn_info(hdmi->cec_adap, &conn_info); - - HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff); - value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); - value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; - /* - * Set the logical address to Unregistered and set the clock - * divider: the hsm_clock rate and this divider setting will - * give a 40 kHz CEC clock. - */ - value |= VC4_HDMI_CEC_ADDR_MASK | - (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT); - HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, value); - ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), - vc4_cec_irq_handler, - vc4_cec_irq_handler_thread, 0, - "vc4 hdmi cec", vc4); + ret = vc4_hdmi_cec_init(vc4_hdmi); if (ret) - goto err_delete_cec_adap; - ret = cec_register_adapter(hdmi->cec_adap, dev); - if (ret < 0) - goto err_delete_cec_adap; -#endif + goto err_destroy_conn; - ret = vc4_hdmi_audio_init(hdmi); + ret = vc4_hdmi_audio_init(vc4_hdmi); if (ret) - goto err_destroy_encoder; + goto err_free_cec; - vc4_debugfs_add_file(drm, "hdmi_regs", vc4_hdmi_debugfs_regs, hdmi); + vc4_debugfs_add_file(drm, variant->debugfs_name, + vc4_hdmi_debugfs_regs, + vc4_hdmi); return 0; -#ifdef CONFIG_DRM_VC4_HDMI_CEC -err_delete_cec_adap: - cec_delete_adapter(hdmi->cec_adap); +err_free_cec: + vc4_hdmi_cec_exit(vc4_hdmi); err_destroy_conn: - vc4_hdmi_connector_destroy(hdmi->connector); -#endif + vc4_hdmi_connector_destroy(&vc4_hdmi->connector); err_destroy_encoder: - drm_encoder_cleanup(hdmi->encoder); + drm_encoder_cleanup(encoder); err_unprepare_hsm: - clk_disable_unprepare(hdmi->hsm_clock); pm_runtime_disable(dev); -err_put_i2c: - put_device(&hdmi->ddc->dev); + put_device(&vc4_hdmi->ddc->dev); return ret; } @@ -1471,20 +1733,39 @@ err_put_i2c: static void vc4_hdmi_unbind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = drm->dev_private; - struct vc4_hdmi *hdmi = vc4->hdmi; + struct vc4_hdmi *vc4_hdmi; - cec_unregister_adapter(hdmi->cec_adap); - vc4_hdmi_connector_destroy(hdmi->connector); - drm_encoder_cleanup(hdmi->encoder); + /* + * ASoC makes it a bit hard to retrieve a pointer to the + * vc4_hdmi structure. Registering the card will overwrite our + * device drvdata with a pointer to the snd_soc_card structure, + * which can then be used to retrieve whatever drvdata we want + * to associate. + * + * However, that doesn't fly in the case where we wouldn't + * register an ASoC card (because of an old DT that is missing + * the dmas properties for example), then the card isn't + * registered and the device drvdata wouldn't be set. + * + * We can deal with both cases by making sure a snd_soc_card + * pointer and a vc4_hdmi structure are pointing to the same + * memory address, so we can treat them indistinctly without any + * issue. + */ + BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0); + BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0); + vc4_hdmi = dev_get_drvdata(dev); - clk_disable_unprepare(hdmi->hsm_clock); - pm_runtime_disable(dev); + kfree(vc4_hdmi->hdmi_regset.regs); + kfree(vc4_hdmi->hd_regset.regs); - put_device(&hdmi->ddc->dev); + vc4_hdmi_cec_exit(vc4_hdmi); + vc4_hdmi_connector_destroy(&vc4_hdmi->connector); + drm_encoder_cleanup(&vc4_hdmi->encoder.base.base); + + pm_runtime_disable(dev); - vc4->hdmi = NULL; + put_device(&vc4_hdmi->ddc->dev); } static const struct component_ops vc4_hdmi_ops = { @@ -1503,8 +1784,80 @@ static int vc4_hdmi_dev_remove(struct platform_device *pdev) return 0; } +static const struct vc4_hdmi_variant bcm2835_variant = { + .encoder_type = VC4_ENCODER_TYPE_HDMI0, + .debugfs_name = "hdmi_regs", + .card_name = "vc4-hdmi", + .max_pixel_clock = 162000000, + .cec_available = true, + .registers = vc4_hdmi_fields, + .num_registers = ARRAY_SIZE(vc4_hdmi_fields), + + .init_resources = vc4_hdmi_init_resources, + .csc_setup = vc4_hdmi_csc_setup, + .reset = vc4_hdmi_reset, + .set_timings = vc4_hdmi_set_timings, + .phy_init = vc4_hdmi_phy_init, + .phy_disable = vc4_hdmi_phy_disable, + .phy_rng_enable = vc4_hdmi_phy_rng_enable, + .phy_rng_disable = vc4_hdmi_phy_rng_disable, + .channel_map = vc4_hdmi_channel_map, +}; + +static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { + .encoder_type = VC4_ENCODER_TYPE_HDMI0, + .debugfs_name = "hdmi0_regs", + .card_name = "vc4-hdmi-0", + .max_pixel_clock = 297000000, + .registers = vc5_hdmi_hdmi0_fields, + .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields), + .phy_lane_mapping = { + PHY_LANE_0, + PHY_LANE_1, + PHY_LANE_2, + PHY_LANE_CK, + }, + + .init_resources = vc5_hdmi_init_resources, + .csc_setup = vc5_hdmi_csc_setup, + .reset = vc5_hdmi_reset, + .set_timings = vc5_hdmi_set_timings, + .phy_init = vc5_hdmi_phy_init, + .phy_disable = vc5_hdmi_phy_disable, + .phy_rng_enable = vc5_hdmi_phy_rng_enable, + .phy_rng_disable = vc5_hdmi_phy_rng_disable, + .channel_map = vc5_hdmi_channel_map, +}; + +static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { + .encoder_type = VC4_ENCODER_TYPE_HDMI1, + .debugfs_name = "hdmi1_regs", + .card_name = "vc4-hdmi-1", + .max_pixel_clock = 297000000, + .registers = vc5_hdmi_hdmi1_fields, + .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields), + .phy_lane_mapping = { + PHY_LANE_1, + PHY_LANE_0, + PHY_LANE_CK, + PHY_LANE_2, + }, + + .init_resources = vc5_hdmi_init_resources, + .csc_setup = vc5_hdmi_csc_setup, + .reset = vc5_hdmi_reset, + .set_timings = vc5_hdmi_set_timings, + .phy_init = vc5_hdmi_phy_init, + .phy_disable = vc5_hdmi_phy_disable, + .phy_rng_enable = vc5_hdmi_phy_rng_enable, + .phy_rng_disable = vc5_hdmi_phy_rng_disable, + .channel_map = vc5_hdmi_channel_map, +}; + static const struct of_device_id vc4_hdmi_dt_match[] = { - { .compatible = "brcm,bcm2835-hdmi" }, + { .compatible = "brcm,bcm2835-hdmi", .data = &bcm2835_variant }, + { .compatible = "brcm,bcm2711-hdmi0", .data = &bcm2711_hdmi0_variant }, + { .compatible = "brcm,bcm2711-hdmi1", .data = &bcm2711_hdmi1_variant }, {} }; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h new file mode 100644 index 000000000000..63c6f8bddf1d --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -0,0 +1,184 @@ +#ifndef _VC4_HDMI_H_ +#define _VC4_HDMI_H_ + +#include <drm/drm_connector.h> +#include <media/cec.h> +#include <sound/dmaengine_pcm.h> +#include <sound/soc.h> + +#include "vc4_drv.h" + +/* VC4 HDMI encoder KMS struct */ +struct vc4_hdmi_encoder { + struct vc4_encoder base; + bool hdmi_monitor; + bool limited_rgb_range; +}; + +static inline struct vc4_hdmi_encoder * +to_vc4_hdmi_encoder(struct drm_encoder *encoder) +{ + return container_of(encoder, struct vc4_hdmi_encoder, base.base); +} + +struct drm_display_mode; + +struct vc4_hdmi; +struct vc4_hdmi_register; + +enum vc4_hdmi_phy_channel { + PHY_LANE_0 = 0, + PHY_LANE_1, + PHY_LANE_2, + PHY_LANE_CK, +}; + +struct vc4_hdmi_variant { + /* Encoder Type for that controller */ + enum vc4_encoder_type encoder_type; + + /* ALSA card name */ + const char *card_name; + + /* Filename to expose the registers in debugfs */ + const char *debugfs_name; + + /* Set to true when the CEC support is available */ + bool cec_available; + + /* Maximum pixel clock supported by the controller (in Hz) */ + unsigned long long max_pixel_clock; + + /* List of the registers available on that variant */ + const struct vc4_hdmi_register *registers; + + /* Number of registers on that variant */ + unsigned int num_registers; + + /* BCM2711 Only. + * The variants don't map the lane in the same order in the + * PHY, so this is an array mapping the HDMI channel (index) + * to the PHY lane (value). + */ + enum vc4_hdmi_phy_channel phy_lane_mapping[4]; + + /* Callback to get the resources (memory region, interrupts, + * clocks, etc) for that variant. + */ + int (*init_resources)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to reset the HDMI block */ + void (*reset)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to enable / disable the CSC */ + void (*csc_setup)(struct vc4_hdmi *vc4_hdmi, bool enable); + + /* Callback to configure the video timings in the HDMI block */ + void (*set_timings)(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); + + /* Callback to initialize the PHY according to the mode */ + void (*phy_init)(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); + + /* Callback to disable the PHY */ + void (*phy_disable)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to enable the RNG in the PHY */ + void (*phy_rng_enable)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to disable the RNG in the PHY */ + void (*phy_rng_disable)(struct vc4_hdmi *vc4_hdmi); + + /* Callback to get channel map */ + u32 (*channel_map)(struct vc4_hdmi *vc4_hdmi, u32 channel_mask); +}; + +/* HDMI audio information */ +struct vc4_hdmi_audio { + struct snd_soc_card card; + struct snd_soc_dai_link link; + struct snd_soc_dai_link_component cpu; + struct snd_soc_dai_link_component codec; + struct snd_soc_dai_link_component platform; + int samplerate; + int channels; + struct snd_dmaengine_dai_dma_data dma_data; + struct snd_pcm_substream *substream; + + bool streaming; +}; + +/* General HDMI hardware state. */ +struct vc4_hdmi { + struct vc4_hdmi_audio audio; + + struct platform_device *pdev; + const struct vc4_hdmi_variant *variant; + + struct vc4_hdmi_encoder encoder; + struct drm_connector connector; + + struct i2c_adapter *ddc; + void __iomem *hdmicore_regs; + void __iomem *hd_regs; + + /* VC5 Only */ + void __iomem *cec_regs; + /* VC5 Only */ + void __iomem *csc_regs; + /* VC5 Only */ + void __iomem *dvp_regs; + /* VC5 Only */ + void __iomem *phy_regs; + /* VC5 Only */ + void __iomem *ram_regs; + /* VC5 Only */ + void __iomem *rm_regs; + + int hpd_gpio; + bool hpd_active_low; + + struct cec_adapter *cec_adap; + struct cec_msg cec_rx_msg; + bool cec_tx_ok; + bool cec_irq_was_rx; + + struct clk *pixel_clock; + struct clk *hsm_clock; + struct clk *audio_clock; + struct clk *pixel_bvb_clock; + + struct reset_control *reset; + + struct debugfs_regset32 hdmi_regset; + struct debugfs_regset32 hd_regset; +}; + +static inline struct vc4_hdmi * +connector_to_vc4_hdmi(struct drm_connector *connector) +{ + return container_of(connector, struct vc4_hdmi, connector); +} + +static inline struct vc4_hdmi * +encoder_to_vc4_hdmi(struct drm_encoder *encoder) +{ + struct vc4_hdmi_encoder *_encoder = to_vc4_hdmi_encoder(encoder); + + return container_of(_encoder, struct vc4_hdmi, encoder); +} + +void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); +void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi); +void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi); +void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi); + +void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, + struct drm_display_mode *mode); +void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi); +void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi); +void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi); + +#endif /* _VC4_HDMI_H_ */ diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c new file mode 100644 index 000000000000..057796b54c51 --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2015 Broadcom + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + */ + +#include "vc4_hdmi.h" +#include "vc4_regs.h" +#include "vc4_hdmi_regs.h" + +#define VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB BIT(5) +#define VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB BIT(4) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET BIT(3) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET BIT(2) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET BIT(1) +#define VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET BIT(0) + +#define VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN BIT(4) + +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_SHIFT 29 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_MASK VC4_MASK(31, 29) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_SHIFT 24 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_MASK VC4_MASK(28, 24) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_SHIFT 21 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_MASK VC4_MASK(23, 21) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_SHIFT 16 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_MASK VC4_MASK(20, 16) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_SHIFT 13 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_MASK VC4_MASK(15, 13) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_SHIFT 8 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_MASK VC4_MASK(12, 8) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_SHIFT 5 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_MASK VC4_MASK(7, 5) +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_MASK VC4_MASK(4, 0) + +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_SHIFT 15 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_MASK VC4_MASK(19, 15) +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_SHIFT 10 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_MASK VC4_MASK(14, 10) +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_SHIFT 5 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_MASK VC4_MASK(9, 5) +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_MASK VC4_MASK(4, 0) + +#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_SHIFT 16 +#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_MASK VC4_MASK(19, 16) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_SHIFT 12 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_MASK VC4_MASK(15, 12) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_SHIFT 8 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_MASK VC4_MASK(11, 8) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_SHIFT 4 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_MASK VC4_MASK(7, 4) +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_MASK VC4_MASK(3, 0) + +#define VC4_HDMI_TX_PHY_CTL_3_RP_SHIFT 17 +#define VC4_HDMI_TX_PHY_CTL_3_RP_MASK VC4_MASK(19, 17) +#define VC4_HDMI_TX_PHY_CTL_3_RZ_SHIFT 12 +#define VC4_HDMI_TX_PHY_CTL_3_RZ_MASK VC4_MASK(16, 12) +#define VC4_HDMI_TX_PHY_CTL_3_CP1_SHIFT 10 +#define VC4_HDMI_TX_PHY_CTL_3_CP1_MASK VC4_MASK(11, 10) +#define VC4_HDMI_TX_PHY_CTL_3_CP_SHIFT 8 +#define VC4_HDMI_TX_PHY_CTL_3_CP_MASK VC4_MASK(9, 8) +#define VC4_HDMI_TX_PHY_CTL_3_CZ_SHIFT 6 +#define VC4_HDMI_TX_PHY_CTL_3_CZ_MASK VC4_MASK(7, 6) +#define VC4_HDMI_TX_PHY_CTL_3_ICP_SHIFT 0 +#define VC4_HDMI_TX_PHY_CTL_3_ICP_MASK VC4_MASK(5, 0) + +#define VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE BIT(13) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VC_RANGE_EN BIT(12) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_LOW BIT(11) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_HIGH BIT(10) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_SHIFT 9 +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_MASK VC4_MASK(9, 9) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_FB_DIV2 BIT(8) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_POST_DIV2 BIT(7) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN BIT(6) +#define VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK BIT(5) + +#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_SHIFT 16 +#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_MASK VC4_MASK(27, 16) +#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_SHIFT 14 +#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_MASK VC4_MASK(15, 14) +#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE BIT(13) +#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_SHIFT 11 +#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_MASK VC4_MASK(12, 11) + +#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_SHIFT 8 +#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_MASK VC4_MASK(15, 8) + +#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_SHIFT 0 +#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_MASK VC4_MASK(3, 0) + +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_MASK VC4_MASK(13, 12) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_SHIFT 12 +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_MASK VC4_MASK(9, 8) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_SHIFT 8 +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_MASK VC4_MASK(5, 4) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_SHIFT 4 +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_MASK VC4_MASK(1, 0) +#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_SHIFT 0 + +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK VC4_MASK(27, 0) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_SHIFT 0 + +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK VC4_MASK(27, 0) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_SHIFT 0 + +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_MASK VC4_MASK(31, 16) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_SHIFT 16 +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_MASK VC4_MASK(15, 0) +#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_SHIFT 0 + +#define VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS BIT(19) +#define VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR BIT(17) +#define VC4_HDMI_RM_CONTROL_FREE_RUN BIT(4) + +#define VC4_HDMI_RM_OFFSET_ONLY BIT(31) +#define VC4_HDMI_RM_OFFSET_OFFSET_SHIFT 0 +#define VC4_HDMI_RM_OFFSET_OFFSET_MASK VC4_MASK(30, 0) + +#define VC4_HDMI_RM_FORMAT_SHIFT_SHIFT 24 +#define VC4_HDMI_RM_FORMAT_SHIFT_MASK VC4_MASK(25, 24) + +#define OSCILLATOR_FREQUENCY 54000000 + +void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode) +{ + /* PHY should be in reset, like + * vc4_hdmi_encoder_disable() does. + */ + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0); +} + +void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); +} + +void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_CTL_0, + HDMI_READ(HDMI_TX_PHY_CTL_0) & + ~VC4_HDMI_TX_PHY_RNG_PWRDN); +} + +void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_CTL_0, + HDMI_READ(HDMI_TX_PHY_CTL_0) | + VC4_HDMI_TX_PHY_RNG_PWRDN); +} + +static unsigned long long +phy_get_vco_freq(unsigned long long clock, u8 *vco_sel, u8 *vco_div) +{ + unsigned long long vco_freq = clock; + unsigned int _vco_div = 0; + unsigned int _vco_sel = 0; + + while (vco_freq < 3000000000ULL) { + _vco_div++; + vco_freq = clock * _vco_div * 10; + } + + if (vco_freq > 4500000000ULL) + _vco_sel = 1; + + *vco_sel = _vco_sel; + *vco_div = _vco_div; + + return vco_freq; +} + +static u8 phy_get_cp_current(unsigned long vco_freq) +{ + if (vco_freq < 3700000000ULL) + return 0x1c; + + return 0x18; +} + +static u32 phy_get_rm_offset(unsigned long long vco_freq) +{ + unsigned long long fref = OSCILLATOR_FREQUENCY; + u64 offset = 0; + + /* RM offset is stored as 9.22 format */ + offset = vco_freq * 2; + offset = offset << 22; + do_div(offset, fref); + offset >>= 2; + + return offset; +} + +static u8 phy_get_vco_gain(unsigned long long vco_freq) +{ + if (vco_freq < 3350000000ULL) + return 0xf; + + if (vco_freq < 3700000000ULL) + return 0xc; + + if (vco_freq < 4050000000ULL) + return 0x6; + + if (vco_freq < 4800000000ULL) + return 0x5; + + if (vco_freq < 5200000000ULL) + return 0x7; + + return 0x2; +} + +struct phy_lane_settings { + struct { + u8 preemphasis; + u8 main_driver; + } amplitude; + + u8 res_sel_data; + u8 term_res_sel_data; +}; + +struct phy_settings { + unsigned long long min_rate; + unsigned long long max_rate; + struct phy_lane_settings channel[3]; + struct phy_lane_settings clock; +}; + +static const struct phy_settings vc5_hdmi_phy_settings[] = { + { + 0, 50000000, + { + {{0x0, 0x0A}, 0x12, 0x0}, + {{0x0, 0x0A}, 0x12, 0x0}, + {{0x0, 0x0A}, 0x12, 0x0} + }, + {{0x0, 0x0A}, 0x18, 0x0}, + }, + { + 50000001, 75000000, + { + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0} + }, + {{0x0, 0x0C}, 0x18, 0x3}, + }, + { + 75000001, 165000000, + { + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0}, + {{0x0, 0x09}, 0x12, 0x0} + }, + {{0x0, 0x0C}, 0x18, 0x3}, + }, + { + 165000001, 250000000, + { + {{0x0, 0x0F}, 0x12, 0x1}, + {{0x0, 0x0F}, 0x12, 0x1}, + {{0x0, 0x0F}, 0x12, 0x1} + }, + {{0x0, 0x0C}, 0x18, 0x3}, + }, + { + 250000001, 340000000, + { + {{0x2, 0x0D}, 0x12, 0x1}, + {{0x2, 0x0D}, 0x12, 0x1}, + {{0x2, 0x0D}, 0x12, 0x1} + }, + {{0x0, 0x0C}, 0x18, 0xF}, + }, + { + 340000001, 450000000, + { + {{0x0, 0x1B}, 0x12, 0xF}, + {{0x0, 0x1B}, 0x12, 0xF}, + {{0x0, 0x1B}, 0x12, 0xF} + }, + {{0x0, 0x0A}, 0x12, 0xF}, + }, + { + 450000001, 600000000, + { + {{0x0, 0x1C}, 0x12, 0xF}, + {{0x0, 0x1C}, 0x12, 0xF}, + {{0x0, 0x1C}, 0x12, 0xF} + }, + {{0x0, 0x0B}, 0x13, 0xF}, + }, +}; + +static const struct phy_settings *phy_get_settings(unsigned long long tmds_rate) +{ + unsigned int count = ARRAY_SIZE(vc5_hdmi_phy_settings); + unsigned int i; + + for (i = 0; i < count; i++) { + const struct phy_settings *s = &vc5_hdmi_phy_settings[i]; + + if (tmds_rate >= s->min_rate && tmds_rate <= s->max_rate) + return s; + } + + /* + * If the pixel clock exceeds our max setting, try the max + * setting anyway. + */ + return &vc5_hdmi_phy_settings[count - 1]; +} + +static const struct phy_lane_settings * +phy_get_channel_settings(enum vc4_hdmi_phy_channel chan, + unsigned long long tmds_rate) +{ + const struct phy_settings *settings = phy_get_settings(tmds_rate); + + if (chan == PHY_LANE_CK) + return &settings->clock; + + return &settings->channel[chan]; +} + +static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f); + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10)); +} + +void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode) +{ + const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings; + const struct vc4_hdmi_variant *variant = vc4_hdmi->variant; + unsigned long long pixel_freq = mode->clock * 1000; + unsigned long long vco_freq; + unsigned char word_sel; + u8 vco_sel, vco_div; + + vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div); + + vc5_hdmi_reset_phy(vc4_hdmi); + + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, + VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, + HDMI_READ(HDMI_TX_PHY_RESET_CTL) & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET & + ~VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET); + + HDMI_WRITE(HDMI_RM_CONTROL, + HDMI_READ(HDMI_RM_CONTROL) | + VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS | + VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR | + VC4_HDMI_RM_CONTROL_FREE_RUN); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, + (HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1) & + ~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK) | + VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, + (HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2) & + ~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK) | + VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT)); + + HDMI_WRITE(HDMI_RM_OFFSET, + VC4_SET_FIELD(phy_get_rm_offset(vco_freq), + VC4_HDMI_RM_OFFSET_OFFSET) | + VC4_HDMI_RM_OFFSET_ONLY); + + HDMI_WRITE(HDMI_TX_PHY_CLK_DIV, + VC4_SET_FIELD(vco_div, VC4_HDMI_TX_PHY_CLK_DIV_VCO)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, + VC4_SET_FIELD(0xe147, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD) | + VC4_SET_FIELD(0xe14, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_0, + VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK | + VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN | + VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE | + VC4_SET_FIELD(vco_sel, VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_1, + HDMI_READ(HDMI_TX_PHY_PLL_CTL_1) | + VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE | + VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY) | + VC4_SET_FIELD(0x8a, VC4_HDMI_TX_PHY_PLL_CTL_1_CPP)); + + HDMI_WRITE(HDMI_RM_FORMAT, + HDMI_READ(HDMI_RM_FORMAT) | + VC4_SET_FIELD(2, VC4_HDMI_RM_FORMAT_SHIFT)); + + HDMI_WRITE(HDMI_TX_PHY_PLL_CFG, + HDMI_READ(HDMI_TX_PHY_PLL_CFG) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CFG_PDIV)); + + if (pixel_freq >= 340000000) + word_sel = 3; + else + word_sel = 0; + HDMI_WRITE(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, word_sel); + + HDMI_WRITE(HDMI_TX_PHY_CTL_3, + VC4_SET_FIELD(phy_get_cp_current(vco_freq), + VC4_HDMI_TX_PHY_CTL_3_ICP) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP) | + VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP1) | + VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_CTL_3_CZ) | + VC4_SET_FIELD(4, VC4_HDMI_TX_PHY_CTL_3_RP) | + VC4_SET_FIELD(6, VC4_HDMI_TX_PHY_CTL_3_RZ)); + + chan0_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_0], + pixel_freq); + chan1_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_1], + pixel_freq); + chan2_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_2], + pixel_freq); + clock_settings = + phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_CK], + pixel_freq); + + HDMI_WRITE(HDMI_TX_PHY_CTL_0, + VC4_SET_FIELD(chan0_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP) | + VC4_SET_FIELD(chan0_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV) | + VC4_SET_FIELD(chan1_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP) | + VC4_SET_FIELD(chan1_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV) | + VC4_SET_FIELD(chan2_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP) | + VC4_SET_FIELD(chan2_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV) | + VC4_SET_FIELD(clock_settings->amplitude.preemphasis, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP) | + VC4_SET_FIELD(clock_settings->amplitude.main_driver, + VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV)); + + HDMI_WRITE(HDMI_TX_PHY_CTL_1, + HDMI_READ(HDMI_TX_PHY_CTL_1) | + VC4_SET_FIELD(chan0_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0) | + VC4_SET_FIELD(chan1_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1) | + VC4_SET_FIELD(chan2_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2) | + VC4_SET_FIELD(clock_settings->res_sel_data, + VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK)); + + HDMI_WRITE(HDMI_TX_PHY_CTL_2, + VC4_SET_FIELD(chan0_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0) | + VC4_SET_FIELD(chan1_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1) | + VC4_SET_FIELD(chan2_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2) | + VC4_SET_FIELD(clock_settings->term_res_sel_data, + VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK) | + VC4_SET_FIELD(phy_get_vco_gain(vco_freq), + VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN)); + + HDMI_WRITE(HDMI_TX_PHY_CHANNEL_SWAP, + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_0], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL) | + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_1], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL) | + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_2], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL) | + VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_CK], + VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL)); + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, + HDMI_READ(HDMI_TX_PHY_RESET_CTL) & + ~(VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB | + VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB)); + + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, + HDMI_READ(HDMI_TX_PHY_RESET_CTL) | + VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB | + VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB); +} + +void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) +{ + vc5_hdmi_reset_phy(vc4_hdmi); +} + +void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, + HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) & + ~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); +} + +void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) +{ + HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, + HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) | + VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); +} diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h new file mode 100644 index 000000000000..7c6b4818f245 --- /dev/null +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -0,0 +1,442 @@ +#ifndef _VC4_HDMI_REGS_H_ +#define _VC4_HDMI_REGS_H_ + +#include "vc4_hdmi.h" + +#define VC4_HDMI_PACKET_STRIDE 0x24 + +enum vc4_hdmi_regs { + VC4_INVALID = 0, + VC4_HDMI, + VC4_HD, + VC5_CEC, + VC5_CSC, + VC5_DVP, + VC5_PHY, + VC5_RAM, + VC5_RM, +}; + +enum vc4_hdmi_field { + HDMI_AUDIO_PACKET_CONFIG, + HDMI_CEC_CNTRL_1, + HDMI_CEC_CNTRL_2, + HDMI_CEC_CNTRL_3, + HDMI_CEC_CNTRL_4, + HDMI_CEC_CNTRL_5, + HDMI_CEC_CPU_CLEAR, + HDMI_CEC_CPU_MASK_CLEAR, + HDMI_CEC_CPU_MASK_SET, + HDMI_CEC_CPU_MASK_STATUS, + HDMI_CEC_CPU_STATUS, + + /* + * Transmit data, first byte is low byte of the 32-bit reg. + * MSB of each byte transmitted first. + */ + HDMI_CEC_RX_DATA_1, + HDMI_CEC_RX_DATA_2, + HDMI_CEC_RX_DATA_3, + HDMI_CEC_RX_DATA_4, + HDMI_CEC_TX_DATA_1, + HDMI_CEC_TX_DATA_2, + HDMI_CEC_TX_DATA_3, + HDMI_CEC_TX_DATA_4, + HDMI_CLOCK_STOP, + HDMI_CORE_REV, + HDMI_CRP_CFG, + HDMI_CSC_12_11, + HDMI_CSC_14_13, + HDMI_CSC_22_21, + HDMI_CSC_24_23, + HDMI_CSC_32_31, + HDMI_CSC_34_33, + HDMI_CSC_CTL, + + /* + * 20-bit fields containing CTS values to be transmitted if + * !EXTERNAL_CTS_EN + */ + HDMI_CTS_0, + HDMI_CTS_1, + HDMI_DVP_CTL, + HDMI_FIFO_CTL, + HDMI_FRAME_COUNT, + HDMI_HORZA, + HDMI_HORZB, + HDMI_HOTPLUG, + HDMI_HOTPLUG_INT, + + /* + * 3 bits per field, where each field maps from that + * corresponding MAI bus channel to the given HDMI channel. + */ + HDMI_MAI_CHANNEL_MAP, + HDMI_MAI_CONFIG, + HDMI_MAI_CTL, + + /* + * Register for DMAing in audio data to be transported over + * the MAI bus to the Falcon core. + */ + HDMI_MAI_DATA, + + /* Format header to be placed on the MAI data. Unused. */ + HDMI_MAI_FMT, + + /* Last received format word on the MAI bus. */ + HDMI_MAI_FORMAT, + HDMI_MAI_SMP, + HDMI_MAI_THR, + HDMI_M_CTL, + HDMI_RAM_PACKET_CONFIG, + HDMI_RAM_PACKET_START, + HDMI_RAM_PACKET_STATUS, + HDMI_RM_CONTROL, + HDMI_RM_FORMAT, + HDMI_RM_OFFSET, + HDMI_SCHEDULER_CONTROL, + HDMI_SW_RESET_CONTROL, + HDMI_TX_PHY_CHANNEL_SWAP, + HDMI_TX_PHY_CLK_DIV, + HDMI_TX_PHY_CTL_0, + HDMI_TX_PHY_CTL_1, + HDMI_TX_PHY_CTL_2, + HDMI_TX_PHY_CTL_3, + HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, + HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, + HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, + HDMI_TX_PHY_PLL_CFG, + HDMI_TX_PHY_PLL_CTL_0, + HDMI_TX_PHY_PLL_CTL_1, + HDMI_TX_PHY_POWERDOWN_CTL, + HDMI_TX_PHY_RESET_CTL, + HDMI_TX_PHY_TMDS_CLK_WORD_SEL, + HDMI_VEC_INTERFACE_XBAR, + HDMI_VERTA0, + HDMI_VERTA1, + HDMI_VERTB0, + HDMI_VERTB1, + HDMI_VID_CTL, +}; + +struct vc4_hdmi_register { + char *name; + enum vc4_hdmi_regs reg; + unsigned int offset; +}; + +#define _VC4_REG(_base, _reg, _offset) \ + [_reg] = { \ + .name = #_reg, \ + .reg = _base, \ + .offset = _offset, \ + } + +#define VC4_HD_REG(reg, offset) _VC4_REG(VC4_HD, reg, offset) +#define VC4_HDMI_REG(reg, offset) _VC4_REG(VC4_HDMI, reg, offset) +#define VC5_CEC_REG(reg, offset) _VC4_REG(VC5_CEC, reg, offset) +#define VC5_CSC_REG(reg, offset) _VC4_REG(VC5_CSC, reg, offset) +#define VC5_DVP_REG(reg, offset) _VC4_REG(VC5_DVP, reg, offset) +#define VC5_PHY_REG(reg, offset) _VC4_REG(VC5_PHY, reg, offset) +#define VC5_RAM_REG(reg, offset) _VC4_REG(VC5_RAM, reg, offset) +#define VC5_RM_REG(reg, offset) _VC4_REG(VC5_RM, reg, offset) + +static const struct vc4_hdmi_register vc4_hdmi_fields[] = { + VC4_HD_REG(HDMI_M_CTL, 0x000c), + VC4_HD_REG(HDMI_MAI_CTL, 0x0014), + VC4_HD_REG(HDMI_MAI_THR, 0x0018), + VC4_HD_REG(HDMI_MAI_FMT, 0x001c), + VC4_HD_REG(HDMI_MAI_DATA, 0x0020), + VC4_HD_REG(HDMI_MAI_SMP, 0x002c), + VC4_HD_REG(HDMI_VID_CTL, 0x0038), + VC4_HD_REG(HDMI_CSC_CTL, 0x0040), + VC4_HD_REG(HDMI_CSC_12_11, 0x0044), + VC4_HD_REG(HDMI_CSC_14_13, 0x0048), + VC4_HD_REG(HDMI_CSC_22_21, 0x004c), + VC4_HD_REG(HDMI_CSC_24_23, 0x0050), + VC4_HD_REG(HDMI_CSC_32_31, 0x0054), + VC4_HD_REG(HDMI_CSC_34_33, 0x0058), + VC4_HD_REG(HDMI_FRAME_COUNT, 0x0068), + + VC4_HDMI_REG(HDMI_CORE_REV, 0x0000), + VC4_HDMI_REG(HDMI_SW_RESET_CONTROL, 0x0004), + VC4_HDMI_REG(HDMI_HOTPLUG_INT, 0x0008), + VC4_HDMI_REG(HDMI_HOTPLUG, 0x000c), + VC4_HDMI_REG(HDMI_FIFO_CTL, 0x005c), + VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0090), + VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0094), + VC4_HDMI_REG(HDMI_MAI_FORMAT, 0x0098), + VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x009c), + VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x00a0), + VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x00a4), + VC4_HDMI_REG(HDMI_CRP_CFG, 0x00a8), + VC4_HDMI_REG(HDMI_CTS_0, 0x00ac), + VC4_HDMI_REG(HDMI_CTS_1, 0x00b0), + VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x00c0), + VC4_HDMI_REG(HDMI_HORZA, 0x00c4), + VC4_HDMI_REG(HDMI_HORZB, 0x00c8), + VC4_HDMI_REG(HDMI_VERTA0, 0x00cc), + VC4_HDMI_REG(HDMI_VERTB0, 0x00d0), + VC4_HDMI_REG(HDMI_VERTA1, 0x00d4), + VC4_HDMI_REG(HDMI_VERTB1, 0x00d8), + VC4_HDMI_REG(HDMI_CEC_CNTRL_1, 0x00e8), + VC4_HDMI_REG(HDMI_CEC_CNTRL_2, 0x00ec), + VC4_HDMI_REG(HDMI_CEC_CNTRL_3, 0x00f0), + VC4_HDMI_REG(HDMI_CEC_CNTRL_4, 0x00f4), + VC4_HDMI_REG(HDMI_CEC_CNTRL_5, 0x00f8), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_1, 0x00fc), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_2, 0x0100), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_3, 0x0104), + VC4_HDMI_REG(HDMI_CEC_TX_DATA_4, 0x0108), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_1, 0x010c), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_2, 0x0110), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_3, 0x0114), + VC4_HDMI_REG(HDMI_CEC_RX_DATA_4, 0x0118), + VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0), + VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4), + VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340), + VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348), + VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c), + VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c), + VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354), + VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400), +}; + +static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = { + VC4_HD_REG(HDMI_DVP_CTL, 0x0000), + VC4_HD_REG(HDMI_MAI_CTL, 0x0010), + VC4_HD_REG(HDMI_MAI_THR, 0x0014), + VC4_HD_REG(HDMI_MAI_FMT, 0x0018), + VC4_HD_REG(HDMI_MAI_DATA, 0x001c), + VC4_HD_REG(HDMI_MAI_SMP, 0x0020), + VC4_HD_REG(HDMI_VID_CTL, 0x0044), + VC4_HD_REG(HDMI_FRAME_COUNT, 0x0060), + + VC4_HDMI_REG(HDMI_FIFO_CTL, 0x074), + VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0b8), + VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0bc), + VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0c4), + VC4_HDMI_REG(HDMI_CRP_CFG, 0x0c8), + VC4_HDMI_REG(HDMI_CTS_0, 0x0cc), + VC4_HDMI_REG(HDMI_CTS_1, 0x0d0), + VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e0), + VC4_HDMI_REG(HDMI_HORZA, 0x0e4), + VC4_HDMI_REG(HDMI_HORZB, 0x0e8), + VC4_HDMI_REG(HDMI_VERTA0, 0x0ec), + VC4_HDMI_REG(HDMI_VERTB0, 0x0f0), + VC4_HDMI_REG(HDMI_VERTA1, 0x0f4), + VC4_HDMI_REG(HDMI_VERTB1, 0x0f8), + VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c), + VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0), + VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8), + + VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc), + VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0), + + VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000), + VC5_PHY_REG(HDMI_TX_PHY_POWERDOWN_CTL, 0x004), + VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008), + VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c), + VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010), + VC5_PHY_REG(HDMI_TX_PHY_CTL_3, 0x014), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_0, 0x01c), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_1, 0x020), + VC5_PHY_REG(HDMI_TX_PHY_CLK_DIV, 0x028), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x034), + VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x044), + VC5_PHY_REG(HDMI_TX_PHY_CHANNEL_SWAP, 0x04c), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, 0x050), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, 0x054), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, 0x05c), + + VC5_RM_REG(HDMI_RM_CONTROL, 0x000), + VC5_RM_REG(HDMI_RM_OFFSET, 0x018), + VC5_RM_REG(HDMI_RM_FORMAT, 0x01c), + + VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000), + + VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010), + VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014), + VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018), + VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c), + VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020), + VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028), + VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c), + VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030), + VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034), + VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038), + VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c), + VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040), + VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044), + + VC5_CSC_REG(HDMI_CSC_CTL, 0x000), + VC5_CSC_REG(HDMI_CSC_12_11, 0x004), + VC5_CSC_REG(HDMI_CSC_14_13, 0x008), + VC5_CSC_REG(HDMI_CSC_22_21, 0x00c), + VC5_CSC_REG(HDMI_CSC_24_23, 0x010), + VC5_CSC_REG(HDMI_CSC_32_31, 0x014), + VC5_CSC_REG(HDMI_CSC_34_33, 0x018), +}; + +static const struct vc4_hdmi_register vc5_hdmi_hdmi1_fields[] = { + VC4_HD_REG(HDMI_DVP_CTL, 0x0000), + VC4_HD_REG(HDMI_MAI_CTL, 0x0030), + VC4_HD_REG(HDMI_MAI_THR, 0x0034), + VC4_HD_REG(HDMI_MAI_FMT, 0x0038), + VC4_HD_REG(HDMI_MAI_DATA, 0x003c), + VC4_HD_REG(HDMI_MAI_SMP, 0x0040), + VC4_HD_REG(HDMI_VID_CTL, 0x0048), + VC4_HD_REG(HDMI_FRAME_COUNT, 0x0064), + + VC4_HDMI_REG(HDMI_FIFO_CTL, 0x074), + VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0b8), + VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0bc), + VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0c4), + VC4_HDMI_REG(HDMI_CRP_CFG, 0x0c8), + VC4_HDMI_REG(HDMI_CTS_0, 0x0cc), + VC4_HDMI_REG(HDMI_CTS_1, 0x0d0), + VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e0), + VC4_HDMI_REG(HDMI_HORZA, 0x0e4), + VC4_HDMI_REG(HDMI_HORZB, 0x0e8), + VC4_HDMI_REG(HDMI_VERTA0, 0x0ec), + VC4_HDMI_REG(HDMI_VERTB0, 0x0f0), + VC4_HDMI_REG(HDMI_VERTA1, 0x0f4), + VC4_HDMI_REG(HDMI_VERTB1, 0x0f8), + VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c), + VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0), + VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8), + + VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc), + VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0), + + VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000), + VC5_PHY_REG(HDMI_TX_PHY_POWERDOWN_CTL, 0x004), + VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008), + VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c), + VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010), + VC5_PHY_REG(HDMI_TX_PHY_CTL_3, 0x014), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_0, 0x01c), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_1, 0x020), + VC5_PHY_REG(HDMI_TX_PHY_CLK_DIV, 0x028), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x034), + VC5_PHY_REG(HDMI_TX_PHY_CHANNEL_SWAP, 0x04c), + VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x044), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, 0x050), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, 0x054), + VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, 0x05c), + + VC5_RM_REG(HDMI_RM_CONTROL, 0x000), + VC5_RM_REG(HDMI_RM_OFFSET, 0x018), + VC5_RM_REG(HDMI_RM_FORMAT, 0x01c), + + VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000), + + VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010), + VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014), + VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018), + VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c), + VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020), + VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028), + VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c), + VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030), + VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034), + VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038), + VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c), + VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040), + VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044), + + VC5_CSC_REG(HDMI_CSC_CTL, 0x000), + VC5_CSC_REG(HDMI_CSC_12_11, 0x004), + VC5_CSC_REG(HDMI_CSC_14_13, 0x008), + VC5_CSC_REG(HDMI_CSC_22_21, 0x00c), + VC5_CSC_REG(HDMI_CSC_24_23, 0x010), + VC5_CSC_REG(HDMI_CSC_32_31, 0x014), + VC5_CSC_REG(HDMI_CSC_34_33, 0x018), +}; + +static inline +void __iomem *__vc4_hdmi_get_field_base(struct vc4_hdmi *hdmi, + enum vc4_hdmi_regs reg) +{ + switch (reg) { + case VC4_HD: + return hdmi->hd_regs; + + case VC4_HDMI: + return hdmi->hdmicore_regs; + + case VC5_CSC: + return hdmi->csc_regs; + + case VC5_CEC: + return hdmi->cec_regs; + + case VC5_DVP: + return hdmi->dvp_regs; + + case VC5_PHY: + return hdmi->phy_regs; + + case VC5_RAM: + return hdmi->ram_regs; + + case VC5_RM: + return hdmi->rm_regs; + + default: + return NULL; + } + + return NULL; +} + +static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi, + enum vc4_hdmi_field reg) +{ + const struct vc4_hdmi_register *field; + const struct vc4_hdmi_variant *variant = hdmi->variant; + void __iomem *base; + + if (reg >= variant->num_registers) { + dev_warn(&hdmi->pdev->dev, + "Invalid register ID %u\n", reg); + return 0; + } + + field = &variant->registers[reg]; + base = __vc4_hdmi_get_field_base(hdmi, field->reg); + if (!base) { + dev_warn(&hdmi->pdev->dev, + "Unknown register ID %u\n", reg); + return 0; + } + + return readl(base + field->offset); +} +#define HDMI_READ(reg) vc4_hdmi_read(vc4_hdmi, reg) + +static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi, + enum vc4_hdmi_field reg, + u32 value) +{ + const struct vc4_hdmi_register *field; + const struct vc4_hdmi_variant *variant = hdmi->variant; + void __iomem *base; + + if (reg >= variant->num_registers) { + dev_warn(&hdmi->pdev->dev, + "Invalid register ID %u\n", reg); + return; + } + + field = &variant->registers[reg]; + base = __vc4_hdmi_get_field_base(hdmi, field->reg); + if (!base) + return; + + writel(value, base + field->offset); +} +#define HDMI_WRITE(reg, val) vc4_hdmi_write(vc4_hdmi, reg, val) + +#endif /* _VC4_HDMI_REGS_H_ */ diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2d2bf59c0503..0bd5ea435120 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -19,6 +19,8 @@ * each CRTC. */ +#include <linux/bitfield.h> +#include <linux/clk.h> #include <linux/component.h> #include <linux/platform_device.h> @@ -160,6 +162,7 @@ static void vc4_hvs_lut_load(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); u32 i; /* The LUT memory is laid out with each HVS channel in order, @@ -168,7 +171,7 @@ static void vc4_hvs_lut_load(struct drm_crtc *crtc) */ HVS_WRITE(SCALER_GAMADDR, SCALER_GAMADDR_AUTOINC | - (vc4_crtc->channel * 3 * crtc->gamma_size)); + (vc4_state->assigned_channel * 3 * crtc->gamma_size)); for (i = 0; i < crtc->gamma_size; i++) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]); @@ -194,6 +197,135 @@ static void vc4_hvs_update_gamma_lut(struct drm_crtc *crtc) vc4_hvs_lut_load(crtc); } +int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + u32 reg; + int ret; + + if (!vc4->hvs->hvs5) + return output; + + switch (output) { + case 0: + return 0; + + case 1: + return 1; + + case 2: + reg = HVS_READ(SCALER_DISPECTRL); + ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); + if (ret == 0) + return 2; + + return 0; + + case 3: + reg = HVS_READ(SCALER_DISPCTRL); + ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; + + return ret; + + case 4: + reg = HVS_READ(SCALER_DISPEOLN); + ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; + + return ret; + + case 5: + reg = HVS_READ(SCALER_DISPDITHER); + ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; + + return ret; + + default: + return -EPIPE; + } +} + +static int vc4_hvs_init_channel(struct vc4_dev *vc4, struct drm_crtc *crtc, + struct drm_display_mode *mode, bool oneshot) +{ + struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); + unsigned int chan = vc4_crtc_state->assigned_channel; + bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; + u32 dispbkgndx; + u32 dispctrl; + + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); + HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); + + /* Turn on the scaler, which will wait for vstart to start + * compositing. + * When feeding the transposer, we should operate in oneshot + * mode. + */ + dispctrl = SCALER_DISPCTRLX_ENABLE; + + if (!vc4->hvs->hvs5) + dispctrl |= VC4_SET_FIELD(mode->hdisplay, + SCALER_DISPCTRLX_WIDTH) | + VC4_SET_FIELD(mode->vdisplay, + SCALER_DISPCTRLX_HEIGHT) | + (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0); + else + dispctrl |= VC4_SET_FIELD(mode->hdisplay, + SCALER5_DISPCTRLX_WIDTH) | + VC4_SET_FIELD(mode->vdisplay, + SCALER5_DISPCTRLX_HEIGHT) | + (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0); + + HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl); + + dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan)); + dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; + dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE; + + HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | + SCALER_DISPBKGND_AUTOHS | + ((!vc4->hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) | + (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); + + /* Reload the LUT, since the SRAMs would have been disabled if + * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once. + */ + vc4_hvs_lut_load(crtc); + + return 0; +} + +void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE) + return; + + HVS_WRITE(SCALER_DISPCTRLX(chan), + HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), + HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE); + + /* Once we leave, the scaler should be disabled and its fifo empty. */ + WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); + + WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), + SCALER_DISPSTATX_MODE) != + SCALER_DISPSTATX_MODE_DISABLED); + + WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & + (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != + SCALER_DISPSTATX_EMPTY); +} + int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -248,12 +380,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc) crtc->state->event = NULL; } - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel), vc4_state->mm.start); spin_unlock_irqrestore(&dev->event_lock, flags); } else { - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel), vc4_state->mm.start); } } @@ -263,67 +395,31 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_display_mode *mode = &crtc->state->adjusted_mode; bool oneshot = vc4_state->feed_txp; - u32 dispctrl; vc4_hvs_update_dlist(crtc); - - /* Turn on the scaler, which will wait for vstart to start - * compositing. - * When feeding the transposer, we should operate in oneshot - * mode. - */ - dispctrl = SCALER_DISPCTRLX_ENABLE; - dispctrl |= VC4_SET_FIELD(mode->hdisplay, - SCALER_DISPCTRLX_WIDTH) | - VC4_SET_FIELD(mode->vdisplay, - SCALER_DISPCTRLX_HEIGHT) | - (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0); - - HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel), dispctrl); + vc4_hvs_init_channel(vc4, crtc, mode, oneshot); } void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct drm_device *dev = crtc->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - u32 chan = vc4_crtc->channel; + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state); + unsigned int chan = vc4_state->assigned_channel; - if (HVS_READ(SCALER_DISPCTRLX(chan)) & - SCALER_DISPCTRLX_ENABLE) { - HVS_WRITE(SCALER_DISPCTRLX(chan), - SCALER_DISPCTRLX_RESET); - - /* While the docs say that reset is self-clearing, it - * seems it doesn't actually. - */ - HVS_WRITE(SCALER_DISPCTRLX(chan), 0); - } - - /* Once we leave, the scaler should be disabled and its fifo empty. */ - - WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); - - WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), - SCALER_DISPSTATX_MODE) != - SCALER_DISPSTATX_MODE_DISABLED); - - WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & - (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != - SCALER_DISPSTATX_EMPTY); + vc4_hvs_stop_channel(dev, chan); } void vc4_hvs_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_plane *plane; struct vc4_plane_state *vc4_plane_state; @@ -365,8 +461,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, /* This sets a black background color fill, as is the case * with other DRM drivers. */ - HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), - HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)) | + HVS_WRITE(SCALER_DISPBKGNDX(vc4_state->assigned_channel), + HVS_READ(SCALER_DISPBKGNDX(vc4_state->assigned_channel)) | SCALER_DISPBKGND_FILL); /* Only update DISPLIST if the CRTC was already running and is not @@ -380,7 +476,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, vc4_hvs_update_dlist(crtc); if (crtc->state->color_mgmt_changed) { - u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)); + u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_state->assigned_channel)); if (crtc->state->gamma_lut) { vc4_hvs_update_gamma_lut(crtc); @@ -392,7 +488,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, */ dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; } - HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx); + HVS_WRITE(SCALER_DISPBKGNDX(vc4_state->assigned_channel), dispbkgndx); } if (debug_dump_regs) { @@ -401,50 +497,6 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, } } -void vc4_hvs_mode_set_nofb(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); - struct drm_display_mode *mode = &crtc->state->adjusted_mode; - bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; - - if (vc4_crtc->data->hvs_channel == 2) { - u32 dispctrl; - u32 dsp3_mux; - - /* - * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to - * FIFO X'. - * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. - * - * DSP3 is connected to FIFO2 unless the transposer is - * enabled. In this case, FIFO 2 is directly accessed by the - * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 - * route. - */ - if (vc4_state->feed_txp) - dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); - else - dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); - - dispctrl = HVS_READ(SCALER_DISPCTRL) & - ~SCALER_DISPCTRL_DSP3_MUX_MASK; - HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); - } - - HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), - SCALER_DISPBKGND_AUTOHS | - SCALER_DISPBKGND_GAMMA | - (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); - - /* Reload the LUT, since the SRAMs would have been disabled if - * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once. - */ - vc4_hvs_lut_load(crtc); -} - void vc4_hvs_mask_underrun(struct drm_device *dev, int channel) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -521,6 +573,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) hvs->pdev = pdev; + if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs")) + hvs->hvs5 = true; + hvs->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(hvs->regs)) return PTR_ERR(hvs->regs); @@ -529,7 +584,24 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) hvs->regset.regs = hvs_regs; hvs->regset.nregs = ARRAY_SIZE(hvs_regs); - hvs->dlist = hvs->regs + SCALER_DLIST_START; + if (hvs->hvs5) { + hvs->core_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hvs->core_clk)) { + dev_err(&pdev->dev, "Couldn't get core clock\n"); + return PTR_ERR(hvs->core_clk); + } + + ret = clk_prepare_enable(hvs->core_clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the core clock\n"); + return ret; + } + } + + if (!hvs->hvs5) + hvs->dlist = hvs->regs + SCALER_DLIST_START; + else + hvs->dlist = hvs->regs + SCALER5_DLIST_START; spin_lock_init(&hvs->mm_lock); @@ -547,7 +619,12 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ - drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + if (!hvs->hvs5) + /* 96kB */ + drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + else + /* 70k words */ + drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024); /* Upload filter kernels. We only have the one for now, so we * keep it around for the lifetime of the driver. @@ -605,6 +682,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, { struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = drm->dev_private; + struct vc4_hvs *hvs = vc4->hvs; if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter)) drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter); @@ -612,6 +690,8 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, drm_mm_takedown(&vc4->hvs->dlist_mm); drm_mm_takedown(&vc4->hvs->lbm_mm); + clk_disable_unprepare(hvs->core_clk); + vc4->hvs = NULL; } @@ -632,6 +712,7 @@ static int vc4_hvs_dev_remove(struct platform_device *pdev) } static const struct of_device_id vc4_hvs_dt_match[] = { + { .compatible = "brcm,bcm2711-hvs" }, { .compatible = "brcm,bcm2835-hvs" }, {} }; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 08318e69061b..149825ff5df8 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -11,6 +11,8 @@ * crtc, HDMI encoder). */ +#include <linux/clk.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> @@ -144,22 +146,130 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); } +static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned int i; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + u32 dispctrl; + u32 dsp3_mux; + + if (!crtc_state->active) + continue; + + if (vc4_state->assigned_channel != 2) + continue; + + /* + * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to + * FIFO X'. + * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. + * + * DSP3 is connected to FIFO2 unless the transposer is + * enabled. In this case, FIFO 2 is directly accessed by the + * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 + * route. + */ + if (vc4_state->feed_txp) + dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); + else + dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); + + dispctrl = HVS_READ(SCALER_DISPCTRL) & + ~SCALER_DISPCTRL_DSP3_MUX_MASK; + HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); + } +} + +static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned char dsp2_mux = 0; + unsigned char dsp3_mux = 3; + unsigned char dsp4_mux = 3; + unsigned char dsp5_mux = 3; + unsigned int i; + u32 reg; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + + if (!crtc_state->active) + continue; + + switch (vc4_crtc->data->hvs_output) { + case 2: + dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + break; + + case 3: + dsp3_mux = vc4_state->assigned_channel; + break; + + case 4: + dsp4_mux = vc4_state->assigned_channel; + break; + + case 5: + dsp5_mux = vc4_state->assigned_channel; + break; + + default: + break; + } + } + + reg = HVS_READ(SCALER_DISPECTRL); + HVS_WRITE(SCALER_DISPECTRL, + (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | + VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX)); + + reg = HVS_READ(SCALER_DISPCTRL); + HVS_WRITE(SCALER_DISPCTRL, + (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | + VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX)); + + reg = HVS_READ(SCALER_DISPEOLN); + HVS_WRITE(SCALER_DISPEOLN, + (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | + VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX)); + + reg = HVS_READ(SCALER_DISPDITHER); + HVS_WRITE(SCALER_DISPDITHER, + (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | + VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX)); +} + static void vc4_atomic_complete_commit(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc; + struct vc4_hvs *hvs = vc4->hvs; + struct drm_crtc_state *new_crtc_state; + struct drm_crtc *crtc; int i; - for (i = 0; i < dev->mode_config.num_crtc; i++) { - if (!state->crtcs[i].ptr || !state->crtcs[i].commit) + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + struct vc4_crtc_state *vc4_crtc_state; + + if (!new_crtc_state->commit) continue; - vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr); - vc4_hvs_mask_underrun(dev, vc4_crtc->channel); + vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); + vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 500000000); + drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_dependencies(state); @@ -168,6 +278,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); + if (vc4->hvs->hvs5) + vc5_hvs_pv_muxing_commit(vc4, state); + else + vc4_hvs_pv_muxing_commit(vc4, state); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); @@ -182,6 +297,9 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) drm_atomic_helper_commit_cleanup_done(state); + if (vc4->hvs->hvs5) + clk_set_min_rate(hvs->core_clk, 0); + drm_atomic_state_put(state); up(&vc4->async_modeset); @@ -374,8 +492,11 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) /* CTM is being enabled or the matrix changed. */ if (new_crtc_state->ctm) { + struct vc4_crtc_state *vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + /* fifo is 1-based since 0 disables CTM. */ - int fifo = to_vc4_crtc(crtc)->channel + 1; + int fifo = vc4_crtc_state->assigned_channel + 1; /* Check userland isn't trying to turn on CTM for more * than one CRTC at a time. @@ -415,6 +536,9 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) struct drm_plane *plane; int i; + if (!vc4->load_tracker_available) + return 0; + priv_state = drm_atomic_get_private_obj_state(state, &vc4->load_tracker); if (IS_ERR(priv_state)) @@ -485,10 +609,87 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { .atomic_destroy_state = vc4_load_tracker_destroy_state, }; +#define NUM_OUTPUTS 6 +#define NUM_CHANNELS 3 + static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { - int ret; + unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0); + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + int i, ret; + + /* + * Since the HVS FIFOs are shared across all the pixelvalves and + * the TXP (and thus all the CRTCs), we need to pull the current + * state of all the enabled CRTCs so that an update to a single + * CRTC still keeps the previous FIFOs enabled and assigned to + * the same CRTCs, instead of evaluating only the CRTC being + * modified. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_state *crtc_state; + + if (!crtc->state->enable) + continue; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct vc4_crtc_state *new_vc4_crtc_state = + to_vc4_crtc_state(new_crtc_state); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + unsigned int matching_channels; + + if (old_crtc_state->enable && !new_crtc_state->enable) + new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; + + if (!new_crtc_state->enable) + continue; + + if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) { + unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel); + continue; + } + + /* + * The problem we have to solve here is that we have + * up to 7 encoders, connected to up to 6 CRTCs. + * + * Those CRTCs, depending on the instance, can be + * routed to 1, 2 or 3 HVS FIFOs, and we need to set + * the change the muxing between FIFOs and outputs in + * the HVS accordingly. + * + * It would be pretty hard to come up with an + * algorithm that would generically solve + * this. However, the current routing trees we support + * allow us to simplify a bit the problem. + * + * Indeed, with the current supported layouts, if we + * try to assign in the ascending crtc index order the + * FIFOs, we can't fall into the situation where an + * earlier CRTC that had multiple routes is assigned + * one that was the only option for a later CRTC. + * + * If the layout changes and doesn't give us that in + * the future, we will need to have something smarter, + * but it works so far. + */ + matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; + if (matching_channels) { + unsigned int channel = ffs(matching_channels) - 1; + + new_vc4_crtc_state->assigned_channel = channel; + unassigned_channels &= ~BIT(channel); + } else { + return -EINVAL; + } + } ret = vc4_ctm_atomic_check(dev, state); if (ret < 0) @@ -512,12 +713,18 @@ int vc4_kms_load(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_ctm_state *ctm_state; struct vc4_load_tracker_state *load_state; + bool is_vc5 = of_device_is_compatible(dev->dev->of_node, + "brcm,bcm2711-vc5"); int ret; - /* Start with the load tracker enabled. Can be disabled through the - * debugfs load_tracker file. - */ - vc4->load_tracker_enabled = true; + if (!is_vc5) { + vc4->load_tracker_available = true; + + /* Start with the load tracker enabled. Can be + * disabled through the debugfs load_tracker file. + */ + vc4->load_tracker_enabled = true; + } sema_init(&vc4->async_modeset, 1); @@ -531,8 +738,14 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + if (is_vc5) { + dev->mode_config.max_width = 7680; + dev->mode_config.max_height = 7680; + } else { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } + dev->mode_config.funcs = &vc4_mode_funcs; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; @@ -547,14 +760,17 @@ int vc4_kms_load(struct drm_device *dev) drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); - if (!load_state) { - drm_atomic_private_obj_fini(&vc4->ctm_manager); - return -ENOMEM; - } + if (vc4->load_tracker_available) { + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) { + drm_atomic_private_obj_fini(&vc4->ctm_manager); + return -ENOMEM; + } - drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base, - &vc4_load_tracker_state_funcs); + drm_atomic_private_obj_init(dev, &vc4->load_tracker, + &load_state->base, + &vc4_load_tracker_state_funcs); + } drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index d040d9f12c6d..6b39cc2ca18d 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -32,45 +32,60 @@ static const struct hvs_format { u32 drm; /* DRM_FORMAT_* */ u32 hvs; /* HVS_FORMAT_* */ u32 pixel_order; + u32 pixel_order_hvs5; } hvs_formats[] = { { - .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_XRGB8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ABGR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB, }, { - .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_ARGB8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ABGR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB, }, { - .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_ABGR8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ARGB, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, + .drm = DRM_FORMAT_XBGR8888, + .hvs = HVS_PIXEL_FORMAT_RGBA8888, .pixel_order = HVS_PIXEL_ORDER_ARGB, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, + .drm = DRM_FORMAT_RGB565, + .hvs = HVS_PIXEL_FORMAT_RGB565, .pixel_order = HVS_PIXEL_ORDER_XRGB, }, { - .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565, + .drm = DRM_FORMAT_BGR565, + .hvs = HVS_PIXEL_FORMAT_RGB565, .pixel_order = HVS_PIXEL_ORDER_XBGR, }, { - .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, + .drm = DRM_FORMAT_ARGB1555, + .hvs = HVS_PIXEL_FORMAT_RGBA5551, .pixel_order = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, + .drm = DRM_FORMAT_XRGB1555, + .hvs = HVS_PIXEL_FORMAT_RGBA5551, .pixel_order = HVS_PIXEL_ORDER_ABGR, }, { - .drm = DRM_FORMAT_RGB888, .hvs = HVS_PIXEL_FORMAT_RGB888, + .drm = DRM_FORMAT_RGB888, + .hvs = HVS_PIXEL_FORMAT_RGB888, .pixel_order = HVS_PIXEL_ORDER_XRGB, }, { - .drm = DRM_FORMAT_BGR888, .hvs = HVS_PIXEL_FORMAT_RGB888, + .drm = DRM_FORMAT_BGR888, + .hvs = HVS_PIXEL_FORMAT_RGB888, .pixel_order = HVS_PIXEL_ORDER_XBGR, }, { @@ -422,10 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) static u32 vc4_lbm_size(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); - /* This is the worst case number. One of the two sizes will - * be used depending on the scaling configuration. - */ - u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); + u32 pix_per_line; u32 lbm; /* LBM is not needed when there's no vertical scaling. */ @@ -433,6 +445,18 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) vc4_state->y_scaling[1] == VC4_SCALING_NONE) return 0; + /* + * This can be further optimized in the RGB/YUV444 case if the PPF + * decimation factor is between 0.5 and 1.0 by using crtc_w. + * + * It's not an issue though, since in that case since src_w[0] is going + * to be greater than or equal to crtc_w. + */ + if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ) + pix_per_line = vc4_state->crtc_w; + else + pix_per_line = vc4_state->src_w[0]; + if (!vc4_state->is_yuv) { if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) lbm = pix_per_line * 8; @@ -492,6 +516,11 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) struct vc4_plane_state *vc4_state; struct drm_crtc_state *crtc_state; unsigned int vscale_factor; + struct vc4_dev *vc4; + + vc4 = to_vc4_dev(state->plane->dev); + if (!vc4->load_tracker_available) + return; vc4_state = to_vc4_plane_state(state); crtc_state = drm_atomic_get_existing_crtc_state(state->state, @@ -563,7 +592,9 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, &vc4_state->lbm, - lbm_size, 32, 0, 0); + lbm_size, + vc4->hvs->hvs5 ? 64 : 32, + 0, 0); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); if (ret) @@ -776,35 +807,6 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } - /* Control word */ - vc4_dlist_write(vc4_state, - SCALER_CTL0_VALID | - (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) | - (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) | - VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) | - (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | - (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | - VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | - (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | - VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | - VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); - - /* Position Word 0: Image Positions and Alpha Value */ - vc4_state->pos0_offset = vc4_state->dlist_count; - vc4_dlist_write(vc4_state, - VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) | - VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | - VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); - - /* Position Word 1: Scaled Image Dimensions. */ - if (!vc4_state->is_unity) { - vc4_dlist_write(vc4_state, - VC4_SET_FIELD(vc4_state->crtc_w, - SCALER_POS1_SCL_WIDTH) | - VC4_SET_FIELD(vc4_state->crtc_h, - SCALER_POS1_SCL_HEIGHT)); - } - /* Don't waste cycles mixing with plane alpha if the set alpha * is opaque or there is no per-pixel alpha information. * In any case we use the alpha property value as the fixed alpha. @@ -812,20 +814,120 @@ static int vc4_plane_mode_set(struct drm_plane *plane, mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && fb->format->has_alpha; - /* Position Word 2: Source Image Size, Alpha */ - vc4_state->pos2_offset = vc4_state->dlist_count; - vc4_dlist_write(vc4_state, - VC4_SET_FIELD(fb->format->has_alpha ? - SCALER_POS2_ALPHA_MODE_PIPELINE : - SCALER_POS2_ALPHA_MODE_FIXED, - SCALER_POS2_ALPHA_MODE) | - (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | - (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) | - VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT)); + if (!vc4->hvs->hvs5) { + /* Control word */ + vc4_dlist_write(vc4_state, + SCALER_CTL0_VALID | + (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) | + (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) | + VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) | + (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | + (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | + VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | + (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | + VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | + VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); + + /* Position Word 0: Image Positions and Alpha Value */ + vc4_state->pos0_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) | + VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | + VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); + + /* Position Word 1: Scaled Image Dimensions. */ + if (!vc4_state->is_unity) { + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(vc4_state->crtc_w, + SCALER_POS1_SCL_WIDTH) | + VC4_SET_FIELD(vc4_state->crtc_h, + SCALER_POS1_SCL_HEIGHT)); + } + + /* Position Word 2: Source Image Size, Alpha */ + vc4_state->pos2_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(fb->format->has_alpha ? + SCALER_POS2_ALPHA_MODE_PIPELINE : + SCALER_POS2_ALPHA_MODE_FIXED, + SCALER_POS2_ALPHA_MODE) | + (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | + (fb->format->has_alpha ? + SCALER_POS2_ALPHA_PREMULT : 0) | + VC4_SET_FIELD(vc4_state->src_w[0], + SCALER_POS2_WIDTH) | + VC4_SET_FIELD(vc4_state->src_h[0], + SCALER_POS2_HEIGHT)); + + /* Position Word 3: Context. Written by the HVS. */ + vc4_dlist_write(vc4_state, 0xc0c0c0c0); - /* Position Word 3: Context. Written by the HVS. */ - vc4_dlist_write(vc4_state, 0xc0c0c0c0); + } else { + u32 hvs_pixel_order = format->pixel_order; + + if (format->pixel_order_hvs5) + hvs_pixel_order = format->pixel_order_hvs5; + + /* Control word */ + vc4_dlist_write(vc4_state, + SCALER_CTL0_VALID | + (hvs_pixel_order << SCALER_CTL0_ORDER_SHIFT) | + (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | + VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | + (vc4_state->is_unity ? + SCALER5_CTL0_UNITY : 0) | + VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | + VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1) | + SCALER5_CTL0_ALPHA_EXPAND | + SCALER5_CTL0_RGB_EXPAND); + + /* Position Word 0: Image Positions and Alpha Value */ + vc4_state->pos0_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + (rotation & DRM_MODE_REFLECT_Y ? + SCALER5_POS0_VFLIP : 0) | + VC4_SET_FIELD(vc4_state->crtc_x, + SCALER_POS0_START_X) | + (rotation & DRM_MODE_REFLECT_X ? + SCALER5_POS0_HFLIP : 0) | + VC4_SET_FIELD(vc4_state->crtc_y, + SCALER5_POS0_START_Y) + ); + + /* Control Word 2 */ + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(state->alpha >> 4, + SCALER5_CTL2_ALPHA) | + (fb->format->has_alpha ? + SCALER5_CTL2_ALPHA_PREMULT : 0) | + (mix_plane_alpha ? + SCALER5_CTL2_ALPHA_MIX : 0) | + VC4_SET_FIELD(fb->format->has_alpha ? + SCALER5_CTL2_ALPHA_MODE_PIPELINE : + SCALER5_CTL2_ALPHA_MODE_FIXED, + SCALER5_CTL2_ALPHA_MODE) + ); + + /* Position Word 1: Scaled Image Dimensions. */ + if (!vc4_state->is_unity) { + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(vc4_state->crtc_w, + SCALER_POS1_SCL_WIDTH) | + VC4_SET_FIELD(vc4_state->crtc_h, + SCALER_POS1_SCL_HEIGHT)); + } + + /* Position Word 2: Source Image Size */ + vc4_state->pos2_offset = vc4_state->dlist_count; + vc4_dlist_write(vc4_state, + VC4_SET_FIELD(vc4_state->src_w[0], + SCALER5_POS2_WIDTH) | + VC4_SET_FIELD(vc4_state->src_h[0], + SCALER5_POS2_HEIGHT)); + + /* Position Word 3: Context. Written by the HVS. */ + vc4_dlist_write(vc4_state, 0xc0c0c0c0); + } /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers @@ -1203,6 +1305,10 @@ static bool vc4_format_mod_supported(struct drm_plane *plane, default: return false; } + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV420: @@ -1255,6 +1361,8 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, &vc4_plane_funcs, formats, ARRAY_SIZE(formats), modifiers, type, NULL); + if (ret) + return ERR_PTR(ret); drm_plane_helper_add(plane, &vc4_plane_helper_funcs); @@ -1283,7 +1391,7 @@ int vc4_plane_create_additional_planes(struct drm_device *drm) * modest number of planes to expose, that should hopefully * still cover any sane usecase. */ - for (i = 0; i < 8; i++) { + for (i = 0; i < 16; i++) { struct drm_plane *plane = vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 324462cc9cd4..be2c32a519b3 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -129,6 +129,8 @@ #define V3D_ERRSTAT 0x00f20 #define PV_CONTROL 0x00 +# define PV5_CONTROL_FIFO_LEVEL_HIGH_MASK VC4_MASK(26, 25) +# define PV5_CONTROL_FIFO_LEVEL_HIGH_SHIFT 25 # define PV_CONTROL_FORMAT_MASK VC4_MASK(23, 21) # define PV_CONTROL_FORMAT_SHIFT 21 # define PV_CONTROL_FORMAT_24 0 @@ -208,6 +210,11 @@ #define PV_HACT_ACT 0x30 +#define PV_MUX_CFG 0x34 +# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_MASK VC4_MASK(5, 2) +# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_SHIFT 2 +# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP 8 + #define SCALER_CHANNELS_COUNT 3 #define SCALER_DISPCTRL 0x00000000 @@ -286,9 +293,19 @@ #define SCALER_DISPID 0x00000008 #define SCALER_DISPECTRL 0x0000000c +# define SCALER_DISPECTRL_DSP2_MUX_SHIFT 31 +# define SCALER_DISPECTRL_DSP2_MUX_MASK VC4_MASK(31, 31) + #define SCALER_DISPPROF 0x00000010 + #define SCALER_DISPDITHER 0x00000014 +# define SCALER_DISPDITHER_DSP5_MUX_SHIFT 30 +# define SCALER_DISPDITHER_DSP5_MUX_MASK VC4_MASK(31, 30) + #define SCALER_DISPEOLN 0x00000018 +# define SCALER_DISPEOLN_DSP4_MUX_SHIFT 30 +# define SCALER_DISPEOLN_DSP4_MUX_MASK VC4_MASK(31, 30) + #define SCALER_DISPLIST0 0x00000020 #define SCALER_DISPLIST1 0x00000024 #define SCALER_DISPLIST2 0x00000028 @@ -327,6 +344,20 @@ # define SCALER_DISPCTRLX_HEIGHT_MASK VC4_MASK(11, 0) # define SCALER_DISPCTRLX_HEIGHT_SHIFT 0 +# define SCALER5_DISPCTRLX_WIDTH_MASK VC4_MASK(28, 16) +# define SCALER5_DISPCTRLX_WIDTH_SHIFT 16 +/* Generates a single frame when VSTART is seen and stops at the last + * pixel read from the FIFO. + */ +# define SCALER5_DISPCTRLX_ONESHOT BIT(15) +/* Processes a single context in the dlist and then task switch, + * instead of an entire line. + */ +# define SCALER5_DISPCTRLX_ONECTX_MASK VC4_MASK(14, 13) +# define SCALER5_DISPCTRLX_ONECTX_SHIFT 13 +# define SCALER5_DISPCTRLX_HEIGHT_MASK VC4_MASK(12, 0) +# define SCALER5_DISPCTRLX_HEIGHT_SHIFT 0 + #define SCALER_DISPBKGND0 0x00000044 # define SCALER_DISPBKGND_AUTOHS BIT(31) # define SCALER_DISPBKGND_INTERLACE BIT(30) @@ -460,32 +491,18 @@ #define SCALER_DLIST_START 0x00002000 #define SCALER_DLIST_SIZE 0x00004000 -#define VC4_HDMI_CORE_REV 0x000 +#define SCALER5_DLIST_START 0x00004000 -#define VC4_HDMI_SW_RESET_CONTROL 0x004 # define VC4_HDMI_SW_RESET_FORMAT_DETECT BIT(1) # define VC4_HDMI_SW_RESET_HDMI BIT(0) -#define VC4_HDMI_HOTPLUG_INT 0x008 - -#define VC4_HDMI_HOTPLUG 0x00c # define VC4_HDMI_HOTPLUG_CONNECTED BIT(0) -/* 3 bits per field, where each field maps from that corresponding MAI - * bus channel to the given HDMI channel. - */ -#define VC4_HDMI_MAI_CHANNEL_MAP 0x090 - -#define VC4_HDMI_MAI_CONFIG 0x094 # define VC4_HDMI_MAI_CONFIG_FORMAT_REVERSE BIT(27) # define VC4_HDMI_MAI_CONFIG_BIT_REVERSE BIT(26) # define VC4_HDMI_MAI_CHANNEL_MASK_MASK VC4_MASK(15, 0) # define VC4_HDMI_MAI_CHANNEL_MASK_SHIFT 0 -/* Last received format word on the MAI bus. */ -#define VC4_HDMI_MAI_FORMAT 0x098 - -#define VC4_HDMI_AUDIO_PACKET_CONFIG 0x09c # define VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_SAMPLE_FLAT BIT(29) # define VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS BIT(24) # define VC4_HDMI_AUDIO_PACKET_FORCE_SAMPLE_PRESENT BIT(19) @@ -499,12 +516,8 @@ # define VC4_HDMI_AUDIO_PACKET_CEA_MASK_MASK VC4_MASK(7, 0) # define VC4_HDMI_AUDIO_PACKET_CEA_MASK_SHIFT 0 -#define VC4_HDMI_RAM_PACKET_CONFIG 0x0a0 # define VC4_HDMI_RAM_PACKET_ENABLE BIT(16) -#define VC4_HDMI_RAM_PACKET_STATUS 0x0a4 - -#define VC4_HDMI_CRP_CFG 0x0a8 /* When set, the CTS_PERIOD counts based on MAI bus sync pulse instead * of pixel clock. */ @@ -518,23 +531,12 @@ # define VC4_HDMI_CRP_CFG_N_MASK VC4_MASK(19, 0) # define VC4_HDMI_CRP_CFG_N_SHIFT 0 -/* 20-bit fields containing CTS values to be transmitted if !EXTERNAL_CTS_EN */ -#define VC4_HDMI_CTS_0 0x0ac -#define VC4_HDMI_CTS_1 0x0b0 -/* 20-bit fields containing number of clocks to send CTS0/1 before - * switching to the other one. - */ -#define VC4_HDMI_CTS_PERIOD_0 0x0b4 -#define VC4_HDMI_CTS_PERIOD_1 0x0b8 - -#define VC4_HDMI_HORZA 0x0c4 # define VC4_HDMI_HORZA_VPOS BIT(14) # define VC4_HDMI_HORZA_HPOS BIT(13) /* Horizontal active pixels (hdisplay). */ # define VC4_HDMI_HORZA_HAP_MASK VC4_MASK(12, 0) # define VC4_HDMI_HORZA_HAP_SHIFT 0 -#define VC4_HDMI_HORZB 0x0c8 /* Horizontal pack porch (htotal - hsync_end). */ # define VC4_HDMI_HORZB_HBP_MASK VC4_MASK(29, 20) # define VC4_HDMI_HORZB_HBP_SHIFT 20 @@ -545,7 +547,6 @@ # define VC4_HDMI_HORZB_HFP_MASK VC4_MASK(9, 0) # define VC4_HDMI_HORZB_HFP_SHIFT 0 -#define VC4_HDMI_FIFO_CTL 0x05c # define VC4_HDMI_FIFO_CTL_RECENTER_DONE BIT(14) # define VC4_HDMI_FIFO_CTL_USE_EMPTY BIT(13) # define VC4_HDMI_FIFO_CTL_ON_VB BIT(7) @@ -558,15 +559,12 @@ # define VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N BIT(0) # define VC4_HDMI_FIFO_VALID_WRITE_MASK 0xefff -#define VC4_HDMI_SCHEDULER_CONTROL 0x0c0 # define VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT BIT(15) # define VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS BIT(5) # define VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT BIT(3) # define VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE BIT(1) # define VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI BIT(0) -#define VC4_HDMI_VERTA0 0x0cc -#define VC4_HDMI_VERTA1 0x0d4 /* Vertical sync pulse (vsync_end - vsync_start). */ # define VC4_HDMI_VERTA_VSP_MASK VC4_MASK(24, 20) # define VC4_HDMI_VERTA_VSP_SHIFT 20 @@ -577,8 +575,6 @@ # define VC4_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0) # define VC4_HDMI_VERTA_VAL_SHIFT 0 -#define VC4_HDMI_VERTB0 0x0d0 -#define VC4_HDMI_VERTB1 0x0d8 /* Vertical sync pulse offset (for interlaced) */ # define VC4_HDMI_VERTB_VSPO_MASK VC4_MASK(21, 9) # define VC4_HDMI_VERTB_VSPO_SHIFT 9 @@ -586,7 +582,6 @@ # define VC4_HDMI_VERTB_VBP_MASK VC4_MASK(8, 0) # define VC4_HDMI_VERTB_VBP_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_1 0x0e8 /* Set when the transmission has ended. */ # define VC4_HDMI_CEC_TX_EOM BIT(31) /* If set, transmission was acked on the 1st or 2nd attempt (only one @@ -627,7 +622,6 @@ /* Set these fields to how many bit clock cycles get to that many * microseconds. */ -#define VC4_HDMI_CEC_CNTRL_2 0x0ec # define VC4_HDMI_CEC_CNT_TO_1500_US_MASK VC4_MASK(30, 24) # define VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT 24 # define VC4_HDMI_CEC_CNT_TO_1300_US_MASK VC4_MASK(23, 17) @@ -639,7 +633,6 @@ # define VC4_HDMI_CEC_CNT_TO_400_US_MASK VC4_MASK(4, 0) # define VC4_HDMI_CEC_CNT_TO_400_US_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_3 0x0f0 # define VC4_HDMI_CEC_CNT_TO_2750_US_MASK VC4_MASK(31, 24) # define VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT 24 # define VC4_HDMI_CEC_CNT_TO_2400_US_MASK VC4_MASK(23, 16) @@ -649,7 +642,6 @@ # define VC4_HDMI_CEC_CNT_TO_1700_US_MASK VC4_MASK(7, 0) # define VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_4 0x0f4 # define VC4_HDMI_CEC_CNT_TO_4300_US_MASK VC4_MASK(31, 24) # define VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT 24 # define VC4_HDMI_CEC_CNT_TO_3900_US_MASK VC4_MASK(23, 16) @@ -659,7 +651,6 @@ # define VC4_HDMI_CEC_CNT_TO_3500_US_MASK VC4_MASK(7, 0) # define VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT 0 -#define VC4_HDMI_CEC_CNTRL_5 0x0f8 # define VC4_HDMI_CEC_TX_SW_RESET BIT(27) # define VC4_HDMI_CEC_RX_SW_RESET BIT(26) # define VC4_HDMI_CEC_PAD_SW_RESET BIT(25) @@ -672,39 +663,11 @@ # define VC4_HDMI_CEC_CNT_TO_4500_US_MASK VC4_MASK(7, 0) # define VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT 0 -/* Transmit data, first byte is low byte of the 32-bit reg. MSB of - * each byte transmitted first. - */ -#define VC4_HDMI_CEC_TX_DATA_1 0x0fc -#define VC4_HDMI_CEC_TX_DATA_2 0x100 -#define VC4_HDMI_CEC_TX_DATA_3 0x104 -#define VC4_HDMI_CEC_TX_DATA_4 0x108 -#define VC4_HDMI_CEC_RX_DATA_1 0x10c -#define VC4_HDMI_CEC_RX_DATA_2 0x110 -#define VC4_HDMI_CEC_RX_DATA_3 0x114 -#define VC4_HDMI_CEC_RX_DATA_4 0x118 - -#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0 - -#define VC4_HDMI_TX_PHY_CTL0 0x2c4 # define VC4_HDMI_TX_PHY_RNG_PWRDN BIT(25) -/* Interrupt status bits */ -#define VC4_HDMI_CPU_STATUS 0x340 -#define VC4_HDMI_CPU_SET 0x344 -#define VC4_HDMI_CPU_CLEAR 0x348 # define VC4_HDMI_CPU_CEC BIT(6) # define VC4_HDMI_CPU_HOTPLUG BIT(0) -#define VC4_HDMI_CPU_MASK_STATUS 0x34c -#define VC4_HDMI_CPU_MASK_SET 0x350 -#define VC4_HDMI_CPU_MASK_CLEAR 0x354 - -#define VC4_HDMI_GCP(x) (0x400 + ((x) * 0x4)) -#define VC4_HDMI_RAM_PACKET(x) (0x400 + ((x) * 0x24)) -#define VC4_HDMI_PACKET_STRIDE 0x24 - -#define VC4_HD_M_CTL 0x00c /* Debug: Current receive value on the CEC pad. */ # define VC4_HD_CECRXD BIT(9) /* Debug: Override CEC output to 0. */ @@ -714,7 +677,6 @@ # define VC4_HD_M_SW_RST BIT(2) # define VC4_HD_M_ENABLE BIT(0) -#define VC4_HD_MAI_CTL 0x014 /* Set when audio stream is received at a slower rate than the * sampling period, so MAI fifo goes empty. Write 1 to clear. */ @@ -739,7 +701,6 @@ /* Single-shot reset bit. Read value is undefined. */ # define VC4_HD_MAI_CTL_RESET BIT(0) -#define VC4_HD_MAI_THR 0x018 # define VC4_HD_MAI_THR_PANICHIGH_MASK VC4_MASK(29, 24) # define VC4_HD_MAI_THR_PANICHIGH_SHIFT 24 # define VC4_HD_MAI_THR_PANICLOW_MASK VC4_MASK(21, 16) @@ -749,31 +710,23 @@ # define VC4_HD_MAI_THR_DREQLOW_MASK VC4_MASK(5, 0) # define VC4_HD_MAI_THR_DREQLOW_SHIFT 0 -/* Format header to be placed on the MAI data. Unused. */ -#define VC4_HD_MAI_FMT 0x01c - -/* Register for DMAing in audio data to be transported over the MAI - * bus to the Falcon core. - */ -#define VC4_HD_MAI_DATA 0x020 - /* Divider from HDMI HSM clock to MAI serial clock. Sampling period * converges to N / (M + 1) cycles. */ -#define VC4_HD_MAI_SMP 0x02c # define VC4_HD_MAI_SMP_N_MASK VC4_MASK(31, 8) # define VC4_HD_MAI_SMP_N_SHIFT 8 # define VC4_HD_MAI_SMP_M_MASK VC4_MASK(7, 0) # define VC4_HD_MAI_SMP_M_SHIFT 0 -#define VC4_HD_VID_CTL 0x038 # define VC4_HD_VID_CTL_ENABLE BIT(31) # define VC4_HD_VID_CTL_UNDERFLOW_ENABLE BIT(30) # define VC4_HD_VID_CTL_FRAME_COUNTER_RESET BIT(29) # define VC4_HD_VID_CTL_VSYNC_LOW BIT(28) # define VC4_HD_VID_CTL_HSYNC_LOW BIT(27) +# define VC4_HD_VID_CTL_CLRSYNC BIT(24) +# define VC4_HD_VID_CTL_CLRRGB BIT(23) +# define VC4_HD_VID_CTL_BLANKPIX BIT(18) -#define VC4_HD_CSC_CTL 0x040 # define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5) # define VC4_HD_CSC_CTL_ORDER_SHIFT 5 # define VC4_HD_CSC_CTL_ORDER_RGB 0 @@ -791,14 +744,7 @@ # define VC4_HD_CSC_CTL_RGB2YCC BIT(1) # define VC4_HD_CSC_CTL_ENABLE BIT(0) -#define VC4_HD_CSC_12_11 0x044 -#define VC4_HD_CSC_14_13 0x048 -#define VC4_HD_CSC_22_21 0x04c -#define VC4_HD_CSC_24_23 0x050 -#define VC4_HD_CSC_32_31 0x054 -#define VC4_HD_CSC_34_33 0x058 - -#define VC4_HD_FRAME_COUNT 0x068 +# define VC4_DVP_HT_CLOCK_STOP_PIXEL BIT(1) /* HVS display list information. */ #define HVS_BOOTLOADER_DLIST_END 32 @@ -825,6 +771,8 @@ enum hvs_pixel_format { HVS_PIXEL_FORMAT_PALETTE = 13, HVS_PIXEL_FORMAT_YUV444_RGB = 14, HVS_PIXEL_FORMAT_AYUV444_RGB = 15, + HVS_PIXEL_FORMAT_RGBA1010102 = 16, + HVS_PIXEL_FORMAT_YCBCR_10BIT = 17, }; /* Note: the LSB is the rightmost character shown. Only valid for @@ -879,6 +827,10 @@ enum hvs_pixel_format { #define SCALER_CTL0_RGBA_EXPAND_MSB 2 #define SCALER_CTL0_RGBA_EXPAND_ROUND 3 +#define SCALER5_CTL0_ALPHA_EXPAND BIT(12) + +#define SCALER5_CTL0_RGB_EXPAND BIT(11) + #define SCALER_CTL0_SCL1_MASK VC4_MASK(10, 8) #define SCALER_CTL0_SCL1_SHIFT 8 @@ -896,10 +848,13 @@ enum hvs_pixel_format { /* Set to indicate no scaling. */ #define SCALER_CTL0_UNITY BIT(4) +#define SCALER5_CTL0_UNITY BIT(15) #define SCALER_CTL0_PIXEL_FORMAT_MASK VC4_MASK(3, 0) #define SCALER_CTL0_PIXEL_FORMAT_SHIFT 0 +#define SCALER5_CTL0_PIXEL_FORMAT_MASK VC4_MASK(4, 0) + #define SCALER_POS0_FIXED_ALPHA_MASK VC4_MASK(31, 24) #define SCALER_POS0_FIXED_ALPHA_SHIFT 24 @@ -909,12 +864,48 @@ enum hvs_pixel_format { #define SCALER_POS0_START_X_MASK VC4_MASK(11, 0) #define SCALER_POS0_START_X_SHIFT 0 +#define SCALER5_POS0_START_Y_MASK VC4_MASK(27, 16) +#define SCALER5_POS0_START_Y_SHIFT 16 + +#define SCALER5_POS0_START_X_MASK VC4_MASK(13, 0) +#define SCALER5_POS0_START_X_SHIFT 0 + +#define SCALER5_POS0_VFLIP BIT(31) +#define SCALER5_POS0_HFLIP BIT(15) + +#define SCALER5_CTL2_ALPHA_MODE_MASK VC4_MASK(31, 30) +#define SCALER5_CTL2_ALPHA_MODE_SHIFT 30 +#define SCALER5_CTL2_ALPHA_MODE_PIPELINE 0 +#define SCALER5_CTL2_ALPHA_MODE_FIXED 1 +#define SCALER5_CTL2_ALPHA_MODE_FIXED_NONZERO 2 +#define SCALER5_CTL2_ALPHA_MODE_FIXED_OVER_0x07 3 + +#define SCALER5_CTL2_ALPHA_PREMULT BIT(29) + +#define SCALER5_CTL2_ALPHA_MIX BIT(28) + +#define SCALER5_CTL2_ALPHA_LOC BIT(25) + +#define SCALER5_CTL2_MAP_SEL_MASK VC4_MASK(18, 17) +#define SCALER5_CTL2_MAP_SEL_SHIFT 17 + +#define SCALER5_CTL2_GAMMA BIT(16) + +#define SCALER5_CTL2_ALPHA_MASK VC4_MASK(15, 4) +#define SCALER5_CTL2_ALPHA_SHIFT 4 + #define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16) #define SCALER_POS1_SCL_HEIGHT_SHIFT 16 #define SCALER_POS1_SCL_WIDTH_MASK VC4_MASK(11, 0) #define SCALER_POS1_SCL_WIDTH_SHIFT 0 +#define SCALER5_POS1_SCL_HEIGHT_MASK VC4_MASK(28, 16) +#define SCALER5_POS1_SCL_HEIGHT_SHIFT 16 + +#define SCALER5_POS1_SCL_WIDTH_MASK VC4_MASK(12, 0) +#define SCALER5_POS1_SCL_WIDTH_SHIFT 0 + #define SCALER_POS2_ALPHA_MODE_MASK VC4_MASK(31, 30) #define SCALER_POS2_ALPHA_MODE_SHIFT 30 #define SCALER_POS2_ALPHA_MODE_PIPELINE 0 @@ -930,6 +921,12 @@ enum hvs_pixel_format { #define SCALER_POS2_WIDTH_MASK VC4_MASK(11, 0) #define SCALER_POS2_WIDTH_SHIFT 0 +#define SCALER5_POS2_HEIGHT_MASK VC4_MASK(28, 16) +#define SCALER5_POS2_HEIGHT_SHIFT 16 + +#define SCALER5_POS2_WIDTH_MASK VC4_MASK(12, 0) +#define SCALER5_POS2_WIDTH_SHIFT 0 + /* Color Space Conversion words. Some values are S2.8 signed * integers, except that the 2 integer bits map as {0x0: 0, 0x1: 1, * 0x2: 2, 0x3: -1} diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index a7c3af0005a0..34612edcabbd 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -13,6 +13,7 @@ #include <linux/of_platform.h> #include <linux/pm_runtime.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_fb_cma_helper.h> @@ -385,31 +386,37 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = { }; static int vc4_txp_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); int ret; - ret = vc4_hvs_atomic_check(crtc, state); + ret = vc4_hvs_atomic_check(crtc, crtc_state); if (ret) return ret; - state->no_vblank = true; + crtc_state->no_vblank = true; vc4_state->feed_txp = true; return 0; } static void vc4_txp_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); drm_crtc_vblank_on(crtc); vc4_hvs_atomic_enable(crtc, old_state); } static void vc4_txp_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; /* Disable vblank irq handling before crtc is disabled. */ @@ -436,7 +443,6 @@ static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = { .atomic_flush = vc4_hvs_atomic_flush, .atomic_enable = vc4_txp_atomic_enable, .atomic_disable = vc4_txp_atomic_disable, - .mode_set_nofb = vc4_hvs_mode_set_nofb, }; static irqreturn_t vc4_txp_interrupt(int irq, void *data) @@ -452,7 +458,8 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data) } static const struct vc4_crtc_data vc4_txp_crtc_data = { - .hvs_channel = 2, + .hvs_available_channels = BIT(2), + .hvs_output = 2, }; static int vc4_txp_bind(struct device *dev, struct device *master, void *data) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 313339bbff90..fa54a6d1403d 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -50,6 +50,8 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 +static const struct drm_gem_object_funcs vgem_gem_object_funcs; + static struct vgem_device { struct drm_device drm; struct platform_device *platform; @@ -167,6 +169,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, if (!obj) return ERR_PTR(-ENOMEM); + obj->base.funcs = &vgem_gem_object_funcs; + ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); if (ret) { kfree(obj); @@ -321,7 +325,7 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); + return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT); } static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, @@ -401,20 +405,20 @@ static int vgem_prime_mmap(struct drm_gem_object *obj, return 0; } -static void vgem_release(struct drm_device *dev) -{ - struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); - - platform_device_unregister(vgem->platform); -} +static const struct drm_gem_object_funcs vgem_gem_object_funcs = { + .free = vgem_gem_free_object, + .pin = vgem_prime_pin, + .unpin = vgem_prime_unpin, + .get_sg_table = vgem_prime_get_sg_table, + .vmap = vgem_prime_vmap, + .vunmap = vgem_prime_vunmap, + .vm_ops = &vgem_gem_vm_ops, +}; static struct drm_driver vgem_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, - .release = vgem_release, .open = vgem_open, .postclose = vgem_postclose, - .gem_free_object_unlocked = vgem_gem_free_object, - .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, .num_ioctls = ARRAY_SIZE(vgem_ioctls), .fops = &vgem_driver_fops, @@ -423,13 +427,8 @@ static struct drm_driver vgem_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_pin = vgem_prime_pin, - .gem_prime_unpin = vgem_prime_unpin, .gem_prime_import = vgem_prime_import, .gem_prime_import_sg_table = vgem_prime_import_sg_table, - .gem_prime_get_sg_table = vgem_prime_get_sg_table, - .gem_prime_vmap = vgem_prime_vmap, - .gem_prime_vunmap = vgem_prime_vunmap, .gem_prime_mmap = vgem_prime_mmap, .name = DRIVER_NAME, @@ -442,48 +441,49 @@ static struct drm_driver vgem_driver = { static int __init vgem_init(void) { int ret; + struct platform_device *pdev; - vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL); - if (!vgem_device) - return -ENOMEM; + pdev = platform_device_register_simple("vgem", -1, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); - vgem_device->platform = - platform_device_register_simple("vgem", -1, NULL, 0); - if (IS_ERR(vgem_device->platform)) { - ret = PTR_ERR(vgem_device->platform); - goto out_free; + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_unregister; } - dma_coerce_mask_and_coherent(&vgem_device->platform->dev, + dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - ret = drm_dev_init(&vgem_device->drm, &vgem_driver, - &vgem_device->platform->dev); - if (ret) - goto out_unregister; - drmm_add_final_kfree(&vgem_device->drm, vgem_device); + + vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver, + struct vgem_device, drm); + if (IS_ERR(vgem_device)) { + ret = PTR_ERR(vgem_device); + goto out_devres; + } + vgem_device->platform = pdev; /* Final step: expose the device/driver to userspace */ ret = drm_dev_register(&vgem_device->drm, 0); if (ret) - goto out_put; + goto out_devres; return 0; -out_put: - drm_dev_put(&vgem_device->drm); - platform_device_unregister(vgem_device->platform); - return ret; +out_devres: + devres_release_group(&pdev->dev, NULL); out_unregister: - platform_device_unregister(vgem_device->platform); -out_free: - kfree(vgem_device); + platform_device_unregister(pdev); return ret; } static void __exit vgem_exit(void) { + struct platform_device *pdev = vgem_device->platform; + drm_dev_unregister(&vgem_device->drm); - drm_dev_put(&vgem_device->drm); + devres_release_group(&pdev->dev, NULL); + platform_device_unregister(pdev); } module_init(vgem_init); diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index 45cc9e900260..dae1bacd86c1 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c @@ -129,9 +129,9 @@ int via_mem_alloc(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : dev_priv->agp_initialized)) { + mutex_unlock(&dev->struct_mutex); DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); - mutex_unlock(&dev->struct_mutex); return -EINVAL; } diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile index 92aa2b3d349d..b99fa4a73b68 100644 --- a/drivers/gpu/drm/virtio/Makefile +++ b/drivers/gpu/drm/virtio/Makefile @@ -3,7 +3,7 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \ +virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \ virtgpu_display.o virtgpu_vq.o \ virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index 3221520f61f0..f336a8fa6666 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -42,14 +42,21 @@ static void virtio_add_int(struct seq_file *m, const char *name, static int virtio_gpu_features(struct seq_file *m, void *data) { - struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_info_node *node = (struct drm_info_node *)m->private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; virtio_add_bool(m, "virgl", vgdev->has_virgl_3d); virtio_add_bool(m, "edid", vgdev->has_edid); virtio_add_bool(m, "indirect", vgdev->has_indirect); + virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid); + virtio_add_bool(m, "blob resources", vgdev->has_resource_blob); virtio_add_int(m, "cap sets", vgdev->num_capsets); virtio_add_int(m, "scanouts", vgdev->num_scanouts); + if (vgdev->host_visible_region.len) { + seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region", + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + } return 0; } @@ -65,9 +72,27 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) return 0; } +static int +virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; + struct drm_printer p; + + if (!vgdev->has_host_visible) { + seq_puts(m, "Host allocations not visible to guest\n"); + return 0; + } + + p = drm_seq_file_printer(m); + drm_mm_print(&vgdev->host_visible_mm, &p); + return 0; +} + static struct drm_info_list virtio_gpu_debugfs_list[] = { { "virtio-gpu-features", virtio_gpu_features }, { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL }, + { "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm }, }; #define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list) diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index effea07abe62..4bf74836bd53 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -95,12 +95,12 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc) } static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { } static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct virtio_gpu_device *vgdev = dev->dev_private; @@ -111,13 +111,13 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, } static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { return 0; } static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); @@ -325,11 +325,14 @@ static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) { - int i; + int i, ret; + + ret = drmm_mode_config_init(vgdev->ddev); + if (ret) + return ret; - drm_mode_config_init(vgdev->ddev); vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true; vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs; @@ -343,6 +346,7 @@ void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) vgdev_output_init(vgdev, i); drm_mode_config_reset(vgdev->ddev); + return 0; } void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) @@ -351,5 +355,4 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) for (i = 0 ; i < vgdev->num_scanouts; ++i) kfree(vgdev->outputs[i].edid); - drm_mode_config_cleanup(vgdev->ddev); } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b039f493bda9..86330f1ade72 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -166,6 +166,7 @@ static unsigned int features[] = { #endif VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_RESOURCE_UUID, + VIRTIO_GPU_F_RESOURCE_BLOB, }; static struct virtio_driver virtio_gpu_driver = { .feature_table = features, @@ -203,7 +204,6 @@ static struct drm_driver driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_mmap = drm_gem_prime_mmap, - .gem_prime_export = virtgpu_gem_prime_export, .gem_prime_import = virtgpu_gem_prime_import, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index a52b7a39f286..3c0e17212c33 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -35,6 +35,7 @@ #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> @@ -49,18 +50,21 @@ #define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 -#define UUID_INITIALIZING 0 -#define UUID_INITIALIZED 1 -#define UUID_INITIALIZATION_FAILED 2 +#define STATE_INITIALIZING 0 +#define STATE_OK 1 +#define STATE_ERR 2 struct virtio_gpu_object_params { - uint32_t format; - uint32_t width; - uint32_t height; unsigned long size; bool dumb; /* 3d */ bool virgl; + bool blob; + + /* classic resources only */ + uint32_t format; + uint32_t width; + uint32_t height; uint32_t target; uint32_t bind; uint32_t depth; @@ -68,6 +72,12 @@ struct virtio_gpu_object_params { uint32_t last_level; uint32_t nr_samples; uint32_t flags; + + /* blob resources only */ + uint32_t ctx_id; + uint32_t blob_mem; + uint32_t blob_flags; + uint64_t blob_id; }; struct virtio_gpu_object { @@ -75,6 +85,8 @@ struct virtio_gpu_object { uint32_t hw_res_handle; bool dumb; bool created; + bool host3d_blob, guest_blob; + uint32_t blob_mem, blob_flags; int uuid_state; uuid_t uuid; @@ -88,9 +100,19 @@ struct virtio_gpu_object_shmem { uint32_t mapped; }; +struct virtio_gpu_object_vram { + struct virtio_gpu_object base; + uint32_t map_state; + uint32_t map_info; + struct drm_mm_node vram_node; +}; + #define to_virtio_gpu_shmem(virtio_gpu_object) \ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base) +#define to_virtio_gpu_vram(virtio_gpu_object) \ + container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base) + struct virtio_gpu_object_array { struct ww_acquire_ctx ticket; struct list_head next; @@ -208,6 +230,10 @@ struct virtio_gpu_device { bool has_edid; bool has_indirect; bool has_resource_assign_uuid; + bool has_resource_blob; + bool has_host_visible; + struct virtio_shm_region host_visible_region; + struct drm_mm host_visible_mm; struct work_struct config_changed_work; @@ -219,8 +245,10 @@ struct virtio_gpu_device { uint32_t num_capsets; struct list_head cap_cache; - /* protects resource state when exporting */ + /* protects uuid state when exporting */ spinlock_t resource_export_lock; + /* protects map state and host_visible_mm */ + spinlock_t host_visible_lock; }; struct virtio_gpu_fpriv { @@ -229,8 +257,8 @@ struct virtio_gpu_fpriv { struct mutex context_lock; }; -/* virtgpu_ioctl.c */ -#define DRM_VIRTIO_NUM_IOCTLS 10 +/* virtio_ioctl.c */ +#define DRM_VIRTIO_NUM_IOCTLS 11 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); @@ -323,12 +351,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); @@ -351,8 +383,28 @@ int virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_array *objs); +int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs, uint64_t offset); + +void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); + +void +virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_mem_entry *ents, + uint32_t nents); +void +virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, + struct virtio_gpu_object *bo, + struct drm_framebuffer *fb, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y); + /* virtgpu_display.c */ -void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); /* virtgpu_plane.c */ @@ -381,7 +433,11 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo); +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, + uint32_t *resid); /* virtgpu_prime.c */ +int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, int flags); struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, @@ -395,4 +451,9 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table( /* virtgpu_debugfs.c */ void virtio_gpu_debugfs_init(struct drm_minor *minor); +/* virtgpu_vram.c */ +bool virtio_gpu_is_vram(struct virtio_gpu_object *bo); +int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr); #endif diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index c8da7adc6b30..5417f365d1a3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -34,6 +34,10 @@ #include "virtgpu_drv.h" +#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \ + VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \ + VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) + void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; @@ -208,11 +212,20 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, switch (param->param) { case VIRTGPU_PARAM_3D_FEATURES: - value = vgdev->has_virgl_3d == true ? 1 : 0; + value = vgdev->has_virgl_3d ? 1 : 0; break; case VIRTGPU_PARAM_CAPSET_QUERY_FIX: value = 1; break; + case VIRTGPU_PARAM_RESOURCE_BLOB: + value = vgdev->has_resource_blob ? 1 : 0; + break; + case VIRTGPU_PARAM_HOST_VISIBLE: + value = vgdev->has_host_visible ? 1 : 0; + break; + case VIRTGPU_PARAM_CROSS_DEVICE: + value = vgdev->has_resource_assign_uuid ? 1 : 0; + break; default: return -EINVAL; } @@ -301,6 +314,9 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, ri->size = qobj->base.base.size; ri->res_handle = qobj->hw_res_handle; + if (qobj->host3d_blob || qobj->guest_blob) + ri->blob_mem = qobj->blob_mem; + drm_gem_object_put(gobj); return 0; } @@ -312,6 +328,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_from_host *args = data; + struct virtio_gpu_object *bo; struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; @@ -325,6 +342,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, if (objs == NULL) return -ENOENT; + bo = gem_to_virtio_gpu_obj(objs->objs[0]); + if (bo->guest_blob && !bo->host3d_blob) { + ret = -EINVAL; + goto err_put_free; + } + + if (!bo->host3d_blob && (args->stride || args->layer_stride)) { + ret = -EINVAL; + goto err_put_free; + } + ret = virtio_gpu_array_lock_resv(objs); if (ret != 0) goto err_put_free; @@ -334,9 +362,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ret = -ENOMEM; goto err_unlock; } + virtio_gpu_cmd_transfer_from_host_3d - (vgdev, vfpriv->ctx_id, offset, args->level, - &args->box, objs, fence); + (vgdev, vfpriv->ctx_id, offset, args->level, args->stride, + args->layer_stride, &args->box, objs, fence); dma_fence_put(&fence->f); virtio_gpu_notify(vgdev); return 0; @@ -354,6 +383,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_to_host *args = data; + struct virtio_gpu_object *bo; struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; @@ -363,6 +393,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, if (objs == NULL) return -ENOENT; + bo = gem_to_virtio_gpu_obj(objs->objs[0]); + if (bo->guest_blob && !bo->host3d_blob) { + ret = -EINVAL; + goto err_put_free; + } + if (!vgdev->has_virgl_3d) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, offset, @@ -370,6 +406,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, objs, NULL); } else { virtio_gpu_create_context(dev, file); + + if (!bo->host3d_blob && (args->stride || args->layer_stride)) { + ret = -EINVAL; + goto err_put_free; + } + ret = virtio_gpu_array_lock_resv(objs); if (ret != 0) goto err_put_free; @@ -381,8 +423,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, virtio_gpu_cmd_transfer_to_host_3d (vgdev, - vfpriv ? vfpriv->ctx_id : 0, offset, - args->level, &args->box, objs, fence); + vfpriv ? vfpriv->ctx_id : 0, offset, args->level, + args->stride, args->layer_stride, &args->box, objs, + fence); dma_fence_put(&fence->f); } virtio_gpu_notify(vgdev); @@ -491,6 +534,134 @@ copy_exit: return 0; } +static int verify_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_fpriv *vfpriv, + struct virtio_gpu_object_params *params, + struct drm_virtgpu_resource_create_blob *rc_blob, + bool *guest_blob, bool *host3d_blob) +{ + if (!vgdev->has_resource_blob) + return -EINVAL; + + if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) || + !rc_blob->blob_flags) + return -EINVAL; + + if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { + if (!vgdev->has_resource_assign_uuid) + return -EINVAL; + } + + switch (rc_blob->blob_mem) { + case VIRTGPU_BLOB_MEM_GUEST: + *guest_blob = true; + break; + case VIRTGPU_BLOB_MEM_HOST3D_GUEST: + *guest_blob = true; + fallthrough; + case VIRTGPU_BLOB_MEM_HOST3D: + *host3d_blob = true; + break; + default: + return -EINVAL; + } + + if (*host3d_blob) { + if (!vgdev->has_virgl_3d) + return -EINVAL; + + /* Must be dword aligned. */ + if (rc_blob->cmd_size % 4 != 0) + return -EINVAL; + + params->ctx_id = vfpriv->ctx_id; + params->blob_id = rc_blob->blob_id; + } else { + if (rc_blob->blob_id != 0) + return -EINVAL; + + if (rc_blob->cmd_size != 0) + return -EINVAL; + } + + params->blob_mem = rc_blob->blob_mem; + params->size = rc_blob->size; + params->blob = true; + params->blob_flags = rc_blob->blob_flags; + return 0; +} + +static int virtio_gpu_resource_create_blob(struct drm_device *dev, + void *data, struct drm_file *file) +{ + int ret = 0; + uint32_t handle = 0; + bool guest_blob = false; + bool host3d_blob = false; + struct drm_gem_object *obj; + struct virtio_gpu_object *bo; + struct virtio_gpu_object_params params = { 0 }; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct drm_virtgpu_resource_create_blob *rc_blob = data; + + if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob, + &guest_blob, &host3d_blob)) + return -EINVAL; + + if (vgdev->has_virgl_3d) + virtio_gpu_create_context(dev, file); + + if (rc_blob->cmd_size) { + void *buf; + + buf = memdup_user(u64_to_user_ptr(rc_blob->cmd), + rc_blob->cmd_size); + + if (IS_ERR(buf)) + return PTR_ERR(buf); + + virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size, + vfpriv->ctx_id, NULL, NULL); + } + + if (guest_blob) + ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL); + else if (!guest_blob && host3d_blob) + ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo); + else + return -EINVAL; + + if (ret < 0) + return ret; + + bo->guest_blob = guest_blob; + bo->host3d_blob = host3d_blob; + bo->blob_mem = rc_blob->blob_mem; + bo->blob_flags = rc_blob->blob_flags; + + obj = &bo->base.base; + if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { + ret = virtio_gpu_resource_assign_uuid(vgdev, bo); + if (ret) { + drm_gem_object_release(obj); + return ret; + } + } + + ret = drm_gem_handle_create(file, obj, &handle); + if (ret) { + drm_gem_object_release(obj); + return ret; + } + drm_gem_object_put(obj); + + rc_blob->res_handle = bo->hw_res_handle; + rc_blob->bo_handle = handle; + + return 0; +} + struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, DRM_RENDER_ALLOW), @@ -523,4 +694,8 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, + virtio_gpu_resource_create_blob, + DRM_RENDER_ALLOW), }; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 75d0dc2f6d28..b4ec479c32cd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, vgdev->capsets[i].id > 0, 5 * HZ); if (ret == 0) { DRM_ERROR("timed out waiting for cap set %d\n", i); + spin_lock(&vgdev->display_info_lock); kfree(vgdev->capsets); vgdev->capsets = NULL; + spin_unlock(&vgdev->display_info_lock); return; } DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", @@ -103,7 +105,7 @@ int virtio_gpu_init(struct drm_device *dev) /* this will expand later */ struct virtqueue *vqs[2]; u32 num_scanouts, num_capsets; - int ret; + int ret = 0; if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1)) return -ENODEV; @@ -119,6 +121,7 @@ int virtio_gpu_init(struct drm_device *dev) spin_lock_init(&vgdev->display_info_lock); spin_lock_init(&vgdev->resource_export_lock); + spin_lock_init(&vgdev->host_visible_lock); ida_init(&vgdev->ctx_id_ida); ida_init(&vgdev->resource_ida); init_waitqueue_head(&vgdev->resp_wq); @@ -150,10 +153,33 @@ int virtio_gpu_init(struct drm_device *dev) if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { vgdev->has_resource_assign_uuid = true; } + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { + vgdev->has_resource_blob = true; + } + if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, + VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { + if (!devm_request_mem_region(&vgdev->vdev->dev, + vgdev->host_visible_region.addr, + vgdev->host_visible_region.len, + dev_name(&vgdev->vdev->dev))) { + DRM_ERROR("Could not reserve host visible region\n"); + goto err_vqs; + } - DRM_INFO("features: %cvirgl %cedid\n", - vgdev->has_virgl_3d ? '+' : '-', - vgdev->has_edid ? '+' : '-'); + DRM_INFO("Host memory window: 0x%lx +0x%lx\n", + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + vgdev->has_host_visible = true; + drm_mm_init(&vgdev->host_visible_mm, + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + } + + DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-', + vgdev->has_resource_blob ? '+' : '-', + vgdev->has_host_visible ? '+' : '-'); ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { @@ -184,7 +210,11 @@ int virtio_gpu_init(struct drm_device *dev) num_capsets, &num_capsets); DRM_INFO("number of cap sets: %d\n", num_capsets); - virtio_gpu_modeset_init(vgdev); + ret = virtio_gpu_modeset_init(vgdev); + if (ret) { + DRM_ERROR("modeset init failed\n"); + goto err_scanouts; + } virtio_device_ready(vgdev->vdev); @@ -236,6 +266,10 @@ void virtio_gpu_release(struct drm_device *dev) virtio_gpu_modeset_fini(vgdev); virtio_gpu_free_vbufs(vgdev); virtio_gpu_cleanup_cap_cache(vgdev); + + if (vgdev->has_host_visible) + drm_mm_takedown(&vgdev->host_visible_mm); + kfree(vgdev->capsets); kfree(vgdev); } diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 842f8b61aa89..2d3aa7baffe4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -31,8 +31,7 @@ static int virtio_gpu_virglrenderer_workaround = 1; module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); -static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, - uint32_t *resid) +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) { if (virtio_gpu_virglrenderer_workaround) { /* @@ -72,9 +71,8 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) if (shmem->pages) { if (shmem->mapped) { - dma_unmap_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->mapped, - DMA_TO_DEVICE); + dma_unmap_sgtable(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE, 0); shmem->mapped = 0; } @@ -85,6 +83,18 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) } drm_gem_shmem_free_object(&bo->base.base); + } else if (virtio_gpu_is_vram(bo)) { + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + + spin_lock(&vgdev->host_visible_lock); + if (drm_mm_node_allocated(&vram->vram_node)) + drm_mm_remove_node(&vram->vram_node); + + spin_unlock(&vgdev->host_visible_lock); + + drm_gem_free_mmap_offset(&vram->base.base.base); + drm_gem_object_release(&vram->base.base.base); + kfree(vram); } } @@ -108,6 +118,7 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .close = virtio_gpu_gem_object_close, .print_info = drm_gem_shmem_print_info, + .export = virtgpu_gem_prime_export, .pin = drm_gem_shmem_pin, .unpin = drm_gem_shmem_unpin, .get_sg_table = drm_gem_shmem_get_sg_table, @@ -164,13 +175,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, } if (use_dma_api) { - shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, - shmem->pages->nents, - DMA_TO_DEVICE); - *nents = shmem->mapped; + ret = dma_map_sgtable(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE, 0); + if (ret) + return ret; + *nents = shmem->mapped = shmem->pages->nents; } else { - *nents = shmem->pages->nents; + *nents = shmem->pages->orig_nents; } *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), @@ -180,13 +191,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, return -ENOMEM; } - for_each_sg(shmem->pages->sgl, sg, *nents, si) { - (*ents)[si].addr = cpu_to_le64(use_dma_api - ? sg_dma_address(sg) - : sg_phys(sg)); - (*ents)[si].length = cpu_to_le32(sg->length); - (*ents)[si].padding = 0; + if (use_dma_api) { + for_each_sgtable_dma_sg(shmem->pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg)); + (*ents)[si].length = cpu_to_le32(sg_dma_len(sg)); + (*ents)[si].padding = 0; + } + } else { + for_each_sgtable_sg(shmem->pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_phys(sg)); + (*ents)[si].length = cpu_to_le32(sg->length); + (*ents)[si].padding = 0; + } } + return 0; } @@ -228,21 +246,24 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, goto err_put_objs; } - if (params->virgl) { - virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, - objs, fence); - } else { - virtio_gpu_cmd_create_resource(vgdev, bo, params, - objs, fence); - } - ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); if (ret != 0) { virtio_gpu_free_object(&shmem_obj->base); return ret; } - virtio_gpu_object_attach(vgdev, bo, ents, nents); + if (params->blob) { + virtio_gpu_cmd_resource_create_blob(vgdev, bo, params, + ents, nents); + } else if (params->virgl) { + virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, + objs, fence); + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } else { + virtio_gpu_cmd_create_resource(vgdev, bo, params, + objs, fence); + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } *bo_ptr = bo; return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 6a311cd93440..42ac08ed1442 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -174,12 +174,23 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, plane->state->src_h >> 16, plane->state->src_x >> 16, plane->state->src_y >> 16); - virtio_gpu_cmd_set_scanout(vgdev, output->index, - bo->hw_res_handle, - plane->state->src_w >> 16, - plane->state->src_h >> 16, - plane->state->src_x >> 16, - plane->state->src_y >> 16); + + if (bo->host3d_blob || bo->guest_blob) { + virtio_gpu_cmd_set_scanout_blob + (vgdev, output->index, bo, + plane->state->fb, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); + } else { + virtio_gpu_cmd_set_scanout(vgdev, output->index, + bo->hw_res_handle, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); + } } virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index acd14ef73d56..1ef1e2f22633 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -34,8 +34,8 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_device *vgdev = obj->dev->dev_private; - wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING); - if (bo->uuid_state != UUID_INITIALIZED) + wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING); + if (bo->uuid_state != STATE_OK) return -ENODEV; uuid_copy(uuid, &bo->uuid); @@ -59,6 +59,24 @@ const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .get_uuid = virtgpu_virtio_get_uuid, }; +int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + int ret; + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + + virtio_gpu_array_add_obj(objs, &bo->base.base); + ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); + if (ret) + return ret; + + return 0; +} + struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, int flags) { @@ -66,22 +84,20 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, struct drm_device *dev = obj->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); - struct virtio_gpu_object_array *objs; int ret = 0; + bool blob = bo->host3d_blob || bo->guest_blob; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - if (vgdev->has_resource_assign_uuid) { - objs = virtio_gpu_array_alloc(1); - if (!objs) - return ERR_PTR(-ENOMEM); - virtio_gpu_array_add_obj(objs, &bo->base.base); - - ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); - if (ret) - return ERR_PTR(ret); - virtio_gpu_notify(vgdev); - } else { - bo->uuid_state = UUID_INITIALIZATION_FAILED; + if (!blob) { + if (vgdev->has_resource_assign_uuid) { + ret = virtio_gpu_resource_assign_uuid(vgdev, bo); + if (ret) + return ERR_PTR(ret); + + virtio_gpu_notify(vgdev); + } else { + bo->uuid_state = STATE_ERR; + } } exp_info.ops = &virtgpu_dmabuf_ops.ops; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index c93c2db35aaf..857f730747b6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -302,7 +302,7 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) return NULL; } - for_each_sg(sgt->sgl, sg, *sg_ents, i) { + for_each_sgtable_sg(sgt, sg, i) { pg = vmalloc_to_page(data); if (!pg) { sg_free_table(sgt); @@ -320,13 +320,13 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) return sgt; } -static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_fence *fence, - int elemcnt, - struct scatterlist **sgs, - int outcnt, - int incnt) +static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence, + int elemcnt, + struct scatterlist **sgs, + int outcnt, + int incnt) { struct virtqueue *vq = vgdev->ctrlq.vq; int ret, idx; @@ -335,7 +335,7 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, if (fence && vbuf->objs) virtio_gpu_array_unlock_resv(vbuf->objs); free_vbuf(vgdev, vbuf); - return; + return -1; } if (vgdev->has_indirect) @@ -373,15 +373,16 @@ again: spin_unlock(&vgdev->ctrlq.qlock); drm_dev_exit(idx); + return 0; } -static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_fence *fence) +static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence) { struct scatterlist *sgs[3], vcmd, vout, vresp; struct sg_table *sgt = NULL; - int elemcnt = 0, outcnt = 0, incnt = 0; + int elemcnt = 0, outcnt = 0, incnt = 0, ret; /* set up vcmd */ sg_init_one(&vcmd, vbuf->buf, vbuf->size); @@ -398,7 +399,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, if (!sgt) { if (fence && vbuf->objs) virtio_gpu_array_unlock_resv(vbuf->objs); - return; + return -1; } elemcnt += sg_ents; @@ -419,13 +420,14 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, incnt++; } - virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, - incnt); + ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, + incnt); if (sgt) { sg_free_table(sgt); kfree(sgt); } + return ret; } void virtio_gpu_notify(struct virtio_gpu_device *vgdev) @@ -444,10 +446,10 @@ void virtio_gpu_notify(struct virtio_gpu_device *vgdev) virtqueue_notify(vgdev->ctrlq.vq); } -static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); + return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); } static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, @@ -534,6 +536,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, { struct virtio_gpu_resource_unref *cmd_p; struct virtio_gpu_vbuffer *vbuf; + int ret; cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), virtio_gpu_cmd_unref_cb); @@ -543,7 +546,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); vbuf->resp_cb_data = bo; - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + if (ret < 0) + virtio_gpu_cleanup_object(bo); } void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, @@ -603,9 +608,8 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) - dma_sync_sg_for_device(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->pages->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); @@ -684,9 +688,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, int i = le32_to_cpu(cmd->capset_index); spin_lock(&vgdev->display_info_lock); - vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); - vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); - vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + if (vgdev->capsets) { + vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); + vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); + vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + } else { + DRM_ERROR("invalid capset memory."); + } spin_unlock(&vgdev->display_info_lock); wake_up(&vgdev->resp_wq); } @@ -1008,6 +1016,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) @@ -1016,12 +1026,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); - struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); - if (use_dma_api) - dma_sync_sg_for_device(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->pages->nents, - DMA_TO_DEVICE); + if (virtio_gpu_is_shmem(bo) && use_dma_api) { + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE); + } cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); @@ -1034,6 +1044,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, convert_to_hw_box(&cmd_p->box, box); cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } @@ -1041,6 +1053,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, uint32_t ctx_id, uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, struct drm_virtgpu_3d_box *box, struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) @@ -1060,6 +1074,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, convert_to_hw_box(&cmd_p->box, box); cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); } @@ -1118,14 +1134,14 @@ static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev, uint32_t resp_type = le32_to_cpu(resp->hdr.type); spin_lock(&vgdev->resource_export_lock); - WARN_ON(obj->uuid_state != UUID_INITIALIZING); + WARN_ON(obj->uuid_state != STATE_INITIALIZING); if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID && - obj->uuid_state == UUID_INITIALIZING) { - memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b)); - obj->uuid_state = UUID_INITIALIZED; + obj->uuid_state == STATE_INITIALIZING) { + import_uuid(&obj->uuid, resp->uuid); + obj->uuid_state = STATE_OK; } else { - obj->uuid_state = UUID_INITIALIZATION_FAILED; + obj->uuid_state = STATE_ERR; } spin_unlock(&vgdev->resource_export_lock); @@ -1144,7 +1160,7 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); if (!resp_buf) { spin_lock(&vgdev->resource_export_lock); - bo->uuid_state = UUID_INITIALIZATION_FAILED; + bo->uuid_state = STATE_ERR; spin_unlock(&vgdev->resource_export_lock); virtio_gpu_array_put_free(objs); return -ENOMEM; @@ -1162,3 +1178,134 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); return 0; } + +static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *bo = + gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); + struct virtio_gpu_resp_map_info *resp = + (struct virtio_gpu_resp_map_info *)vbuf->resp_buf; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + uint32_t resp_type = le32_to_cpu(resp->hdr.type); + + spin_lock(&vgdev->host_visible_lock); + + if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) { + vram->map_info = resp->map_info; + vram->map_state = STATE_OK; + } else { + vram->map_state = STATE_ERR; + } + + spin_unlock(&vgdev->host_visible_lock); + wake_up_all(&vgdev->resp_wq); +} + +int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs, uint64_t offset) +{ + struct virtio_gpu_resource_map_blob *cmd_p; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_resp_map_info *resp_buf; + + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); + if (!resp_buf) { + virtio_gpu_array_put_free(objs); + return -ENOMEM; + } + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_map_info), resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + vbuf->objs = objs; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + struct virtio_gpu_resource_unmap_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void +virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_mem_entry *ents, + uint32_t nents) +{ + struct virtio_gpu_resource_create_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB); + cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->blob_mem = cpu_to_le32(params->blob_mem); + cmd_p->blob_flags = cpu_to_le32(params->blob_flags); + cmd_p->blob_id = cpu_to_le64(params->blob_id); + cmd_p->size = cpu_to_le64(params->size); + cmd_p->nr_entries = cpu_to_le32(nents); + + vbuf->data_buf = ents; + vbuf->data_size = sizeof(*ents) * nents; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + bo->created = true; +} + +void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, + struct virtio_gpu_object *bo, + struct drm_framebuffer *fb, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y) +{ + uint32_t i; + struct virtio_gpu_set_scanout_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + uint32_t format = virtio_gpu_translate_format(fb->format->format); + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->scanout_id = cpu_to_le32(scanout_id); + + cmd_p->format = cpu_to_le32(format); + cmd_p->width = cpu_to_le32(fb->width); + cmd_p->height = cpu_to_le32(fb->height); + + for (i = 0; i < 4; i++) { + cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]); + cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]); + } + + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c new file mode 100644 index 000000000000..23c21bc4d01e --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "virtgpu_drv.h" + +static void virtio_gpu_vram_free(struct drm_gem_object *obj) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + bool unmap; + + if (bo->created) { + spin_lock(&vgdev->host_visible_lock); + unmap = drm_mm_node_allocated(&vram->vram_node); + spin_unlock(&vgdev->host_visible_lock); + + if (unmap) + virtio_gpu_cmd_unmap(vgdev, bo); + + virtio_gpu_cmd_unref_resource(vgdev, bo); + virtio_gpu_notify(vgdev); + return; + } +} + +static const struct vm_operations_struct virtio_gpu_vram_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + unsigned long vm_size = vma->vm_end - vma->vm_start; + + if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) + return -EINVAL; + + wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING); + if (vram->map_state != STATE_OK) + return -EINVAL; + + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_ops = &virtio_gpu_vram_vm_ops; + + if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* Partial mappings of GEM buffers don't happen much in practice. */ + if (vm_size != vram->vram_node.size) + return -EINVAL; + + ret = io_remap_pfn_range(vma, vma->vm_start, + vram->vram_node.start >> PAGE_SHIFT, + vm_size, vma->vm_page_prot); + return ret; +} + +static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { + .open = virtio_gpu_gem_object_open, + .close = virtio_gpu_gem_object_close, + .free = virtio_gpu_vram_free, + .mmap = virtio_gpu_vram_mmap, +}; + +bool virtio_gpu_is_vram(struct virtio_gpu_object *bo) +{ + return bo->base.base.funcs == &virtio_gpu_vram_funcs; +} + +static int virtio_gpu_vram_map(struct virtio_gpu_object *bo) +{ + int ret; + uint64_t offset; + struct virtio_gpu_object_array *objs; + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + + if (!vgdev->has_host_visible) + return -EINVAL; + + spin_lock(&vgdev->host_visible_lock); + ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node, + bo->base.base.size); + spin_unlock(&vgdev->host_visible_lock); + + if (ret) + return ret; + + objs = virtio_gpu_array_alloc(1); + if (!objs) { + ret = -ENOMEM; + goto err_remove_node; + } + + virtio_gpu_array_add_obj(objs, &bo->base.base); + /*TODO: Add an error checking helper function in drm_mm.h */ + offset = vram->vram_node.start - vgdev->host_visible_region.addr; + + ret = virtio_gpu_cmd_map(vgdev, objs, offset); + if (ret) { + virtio_gpu_array_put_free(objs); + goto err_remove_node; + } + + return 0; + +err_remove_node: + spin_lock(&vgdev->host_visible_lock); + drm_mm_remove_node(&vram->vram_node); + spin_unlock(&vgdev->host_visible_lock); + return ret; +} + +int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr) +{ + struct drm_gem_object *obj; + struct virtio_gpu_object_vram *vram; + int ret; + + vram = kzalloc(sizeof(*vram), GFP_KERNEL); + if (!vram) + return -ENOMEM; + + obj = &vram->base.base.base; + obj->funcs = &virtio_gpu_vram_funcs; + drm_gem_private_object_init(vgdev->ddev, obj, params->size); + + /* Create fake offset */ + ret = drm_gem_create_mmap_offset(obj); + if (ret) { + kfree(vram); + return ret; + } + + ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle); + if (ret) { + kfree(vram); + return ret; + } + + virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL, + 0); + if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) { + ret = virtio_gpu_vram_map(&vram->base); + if (ret) { + virtio_gpu_vram_free(obj); + return ret; + } + } + + *bo_ptr = &vram->base; + return 0; +} diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 0b767d7efa24..72f779cbfedd 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -1,4 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only -vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_composer.o +vkms-y := \ + vkms_drv.o \ + vkms_plane.o \ + vkms_output.o \ + vkms_crtc.o \ + vkms_composer.o \ + vkms_writeback.o obj-$(CONFIG_DRM_VKMS) += vkms.o diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index eaecc5a6c5db..66c6842d70db 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -5,35 +5,46 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_vblank.h> #include "vkms_drv.h" +static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer, + const struct vkms_composer *composer) +{ + u32 pixel; + int src_offset = composer->offset + (y * composer->pitch) + + (x * composer->cpp); + + pixel = *(u32 *)&buffer[src_offset]; + + return pixel; +} + /** * compute_crc - Compute CRC value on output frame * - * @vaddr_out: address to final framebuffer + * @vaddr: address to final framebuffer * @composer: framebuffer's metadata * * returns CRC value computed using crc32 on the visible portion of * the final framebuffer at vaddr_out */ -static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer) +static uint32_t compute_crc(const u8 *vaddr, + const struct vkms_composer *composer) { - int i, j, src_offset; + int x, y; + u32 crc = 0, pixel = 0; int x_src = composer->src.x1 >> 16; int y_src = composer->src.y1 >> 16; int h_src = drm_rect_height(&composer->src) >> 16; int w_src = drm_rect_width(&composer->src) >> 16; - u32 crc = 0; - - for (i = y_src; i < y_src + h_src; ++i) { - for (j = x_src; j < x_src + w_src; ++j) { - src_offset = composer->offset - + (i * composer->pitch) - + (j * composer->cpp); - crc = crc32_le(crc, vaddr_out + src_offset, - sizeof(u32)); + + for (y = y_src; y < y_src + h_src; ++y) { + for (x = x_src; x < x_src + w_src; ++x) { + pixel = get_pixel_from_buffer(x, y, vaddr, composer); + crc = crc32_le(crc, (void *)&pixel, sizeof(u32)); } } @@ -119,47 +130,43 @@ static void compose_cursor(struct vkms_composer *cursor_composer, void *vaddr_out) { struct drm_gem_object *cursor_obj; - struct vkms_gem_object *cursor_vkms_obj; + struct drm_gem_shmem_object *cursor_shmem_obj; cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0); - cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj); + cursor_shmem_obj = to_drm_gem_shmem_obj(cursor_obj); - if (WARN_ON(!cursor_vkms_obj->vaddr)) + if (WARN_ON(!cursor_shmem_obj->vaddr)) return; - blend(vaddr_out, cursor_vkms_obj->vaddr, + blend(vaddr_out, cursor_shmem_obj->vaddr, primary_composer, cursor_composer); } -static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer, - struct vkms_composer *cursor_composer) +static int compose_planes(void **vaddr_out, + struct vkms_composer *primary_composer, + struct vkms_composer *cursor_composer) { struct drm_framebuffer *fb = &primary_composer->fb; struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); - struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj); - void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL); - u32 crc = 0; + struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj); - if (!vaddr_out) { - DRM_ERROR("Failed to allocate memory for output frame."); - return 0; + if (!*vaddr_out) { + *vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL); + if (!*vaddr_out) { + DRM_ERROR("Cannot allocate memory for output frame."); + return -ENOMEM; + } } - if (WARN_ON(!vkms_obj->vaddr)) { - kfree(vaddr_out); - return crc; - } + if (WARN_ON(!shmem_obj->vaddr)) + return -EINVAL; - memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size); + memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size); if (cursor_composer) - compose_cursor(cursor_composer, primary_composer, vaddr_out); - - crc = compute_crc(vaddr_out, primary_composer); - - kfree(vaddr_out); + compose_cursor(cursor_composer, primary_composer, *vaddr_out); - return crc; + return 0; } /** @@ -180,14 +187,17 @@ void vkms_composer_worker(struct work_struct *work) struct vkms_output *out = drm_crtc_to_vkms_output(crtc); struct vkms_composer *primary_composer = NULL; struct vkms_composer *cursor_composer = NULL; + bool crc_pending, wb_pending; + void *vaddr_out = NULL; u32 crc32 = 0; u64 frame_start, frame_end; - bool crc_pending; + int ret; spin_lock_irq(&out->composer_lock); frame_start = crtc_state->frame_start; frame_end = crtc_state->frame_end; crc_pending = crtc_state->crc_pending; + wb_pending = crtc_state->wb_pending; crtc_state->frame_start = 0; crtc_state->frame_end = 0; crtc_state->crc_pending = false; @@ -206,8 +216,29 @@ void vkms_composer_worker(struct work_struct *work) if (crtc_state->num_active_planes == 2) cursor_composer = crtc_state->active_planes[1]->composer; - if (primary_composer) - crc32 = _vkms_get_crc(primary_composer, cursor_composer); + if (!primary_composer) + return; + + if (wb_pending) + vaddr_out = crtc_state->active_writeback; + + ret = compose_planes(&vaddr_out, primary_composer, cursor_composer); + if (ret) { + if (ret == -EINVAL && !wb_pending) + kfree(vaddr_out); + return; + } + + crc32 = compute_crc(vaddr_out, primary_composer); + + if (wb_pending) { + drm_writeback_signal_completion(&out->wb_connector, 0); + spin_lock_irq(&out->composer_lock); + crtc_state->wb_pending = false; + spin_unlock_irq(&out->composer_lock); + } else { + kfree(vaddr_out); + } /* * The worker can fall behind the vblank hrtimer, make sure we catch up. @@ -256,7 +287,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name, return 0; } -static void vkms_set_composer(struct vkms_output *out, bool enabled) +void vkms_set_composer(struct vkms_output *out, bool enabled) { bool old_enabled; diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 09c012d54d58..0443b7deeaef 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -168,9 +168,11 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = { }; static int vkms_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state); struct drm_plane *plane; struct drm_plane_state *plane_state; int i = 0, ret; @@ -178,12 +180,12 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, if (vkms_state->active_planes) return 0; - ret = drm_atomic_add_affected_planes(state->state, crtc); + ret = drm_atomic_add_affected_planes(crtc_state->state, crtc); if (ret < 0) return ret; - drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(state->state, + drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { + plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); WARN_ON(!plane_state); @@ -199,8 +201,8 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, vkms_state->num_active_planes = i; i = 0; - drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) { - plane_state = drm_atomic_get_existing_plane_state(state->state, + drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { + plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); if (!plane_state->visible) @@ -214,19 +216,19 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc, } static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { drm_crtc_vblank_off(crtc); } static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); @@ -237,7 +239,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, } static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83dd5567de8b..25faba5aac08 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -23,6 +23,7 @@ #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_vblank.h> #include "vkms_drv.h" @@ -39,31 +40,12 @@ bool enable_cursor = true; module_param_named(enable_cursor, enable_cursor, bool, 0444); MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support"); -static const struct file_operations vkms_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = drm_gem_mmap, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .poll = drm_poll, - .read = drm_read, - .llseek = no_llseek, - .release = drm_release, -}; - -static const struct vm_operations_struct vkms_gem_vm_ops = { - .fault = vkms_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; +DEFINE_DRM_GEM_FOPS(vkms_driver_fops); static void vkms_release(struct drm_device *dev) { struct vkms_device *vkms = container_of(dev, struct vkms_device, drm); - platform_device_unregister(vkms->platform); - drm_atomic_helper_shutdown(&vkms->drm); - drm_mode_config_cleanup(&vkms->drm); destroy_workqueue(vkms->output.composer_workq); } @@ -100,11 +82,8 @@ static struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .release = vkms_release, .fops = &vkms_driver_fops, - .dumb_create = vkms_dumb_create, - .gem_vm_ops = &vkms_gem_vm_ops, - .gem_free_object_unlocked = vkms_gem_free_object, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import_sg_table = vkms_prime_import_sg_table, + .gem_create_object = drm_gem_shmem_create_object_cached, + DRM_GEM_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -135,7 +114,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev) dev->mode_config.max_height = YRES_MAX; dev->mode_config.cursor_width = 512; dev->mode_config.cursor_height = 512; - dev->mode_config.preferred_depth = 24; + dev->mode_config.preferred_depth = 32; dev->mode_config.helper_private = &vkms_mode_config_helpers; return vkms_output_init(vkmsdev, 0); @@ -144,30 +123,31 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev) static int __init vkms_init(void) { int ret; + struct platform_device *pdev; - vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL); - if (!vkms_device) - return -ENOMEM; + pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); - vkms_device->platform = - platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); - if (IS_ERR(vkms_device->platform)) { - ret = PTR_ERR(vkms_device->platform); - goto out_free; + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_unregister; } - ret = drm_dev_init(&vkms_device->drm, &vkms_driver, - &vkms_device->platform->dev); - if (ret) - goto out_unregister; - drmm_add_final_kfree(&vkms_device->drm, vkms_device); + vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver, + struct vkms_device, drm); + if (IS_ERR(vkms_device)) { + ret = PTR_ERR(vkms_device); + goto out_devres; + } + vkms_device->platform = pdev; ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, DMA_BIT_MASK(64)); if (ret) { DRM_ERROR("Could not initialize DMA support\n"); - goto out_put; + goto out_devres; } vkms_device->drm.irq_enabled = true; @@ -175,39 +155,43 @@ static int __init vkms_init(void) ret = drm_vblank_init(&vkms_device->drm, 1); if (ret) { DRM_ERROR("Failed to vblank\n"); - goto out_put; + goto out_devres; } ret = vkms_modeset_init(vkms_device); if (ret) - goto out_put; + goto out_devres; ret = drm_dev_register(&vkms_device->drm, 0); if (ret) - goto out_put; + goto out_devres; + + drm_fbdev_generic_setup(&vkms_device->drm, 0); return 0; -out_put: - drm_dev_put(&vkms_device->drm); - platform_device_unregister(vkms_device->platform); - return ret; +out_devres: + devres_release_group(&pdev->dev, NULL); out_unregister: - platform_device_unregister(vkms_device->platform); -out_free: - kfree(vkms_device); + platform_device_unregister(pdev); return ret; } static void __exit vkms_exit(void) { + struct platform_device *pdev; + if (!vkms_device) { DRM_INFO("vkms_device is NULL.\n"); return; } + pdev = vkms_device->platform; + drm_dev_unregister(&vkms_device->drm); - drm_dev_put(&vkms_device->drm); + drm_atomic_helper_shutdown(&vkms_device->drm); + devres_release_group(&pdev->dev, NULL); + platform_device_unregister(pdev); } module_init(vkms_init); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index f4036bb0b9a8..5ed91ff08cb3 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -8,6 +8,7 @@ #include <drm/drm.h> #include <drm/drm_gem.h> #include <drm/drm_encoder.h> +#include <drm/drm_writeback.h> #define XRES_MIN 20 #define YRES_MIN 20 @@ -52,9 +53,11 @@ struct vkms_crtc_state { int num_active_planes; /* stack of active planes for crc computation, should be in z order */ struct vkms_plane_state **active_planes; + void *active_writeback; - /* below three are protected by vkms_output.composer_lock */ + /* below four are protected by vkms_output.composer_lock */ bool crc_pending; + bool wb_pending; u64 frame_start; u64 frame_end; }; @@ -63,6 +66,7 @@ struct vkms_output { struct drm_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; + struct drm_writeback_connector wb_connector; struct hrtimer vblank_hrtimer; ktime_t period_ns; struct drm_pending_vblank_event *event; @@ -84,23 +88,12 @@ struct vkms_device { struct vkms_output output; }; -struct vkms_gem_object { - struct drm_gem_object gem; - struct mutex pages_lock; /* Page lock used in page fault handler */ - struct page **pages; - unsigned int vmap_count; - void *vaddr; -}; - #define drm_crtc_to_vkms_output(target) \ container_of(target, struct vkms_output, crtc) #define drm_device_to_vkms_device(target) \ container_of(target, struct vkms_device, drm) -#define drm_gem_to_vkms_gem(target)\ - container_of(target, struct vkms_gem_object, gem) - #define to_vkms_crtc_state(target)\ container_of(target, struct vkms_crtc_state, base) @@ -116,24 +109,6 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index); struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, enum drm_plane_type type, int index); -/* Gem stuff */ -vm_fault_t vkms_gem_fault(struct vm_fault *vmf); - -int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, - struct drm_mode_create_dumb *args); - -void vkms_gem_free_object(struct drm_gem_object *obj); - -int vkms_gem_vmap(struct drm_gem_object *obj); - -void vkms_gem_vunmap(struct drm_gem_object *obj); - -/* Prime */ -struct drm_gem_object * -vkms_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg); - /* CRC Support */ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, size_t *count); @@ -143,5 +118,9 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name, /* Composer Support */ void vkms_composer_worker(struct work_struct *work); +void vkms_set_composer(struct vkms_output *out, bool enabled); + +/* Writeback */ +int vkms_enable_writeback_connector(struct vkms_device *vkmsdev); #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c deleted file mode 100644 index a017fc59905e..000000000000 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ /dev/null @@ -1,248 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ - -#include <linux/dma-buf.h> -#include <linux/shmem_fs.h> -#include <linux/vmalloc.h> -#include <drm/drm_prime.h> - -#include "vkms_drv.h" - -static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev, - u64 size) -{ - struct vkms_gem_object *obj; - int ret; - - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (!obj) - return ERR_PTR(-ENOMEM); - - size = roundup(size, PAGE_SIZE); - ret = drm_gem_object_init(dev, &obj->gem, size); - if (ret) { - kfree(obj); - return ERR_PTR(ret); - } - - mutex_init(&obj->pages_lock); - - return obj; -} - -void vkms_gem_free_object(struct drm_gem_object *obj) -{ - struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, - gem); - - WARN_ON(gem->pages); - WARN_ON(gem->vaddr); - - mutex_destroy(&gem->pages_lock); - drm_gem_object_release(obj); - kfree(gem); -} - -vm_fault_t vkms_gem_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct vkms_gem_object *obj = vma->vm_private_data; - unsigned long vaddr = vmf->address; - pgoff_t page_offset; - loff_t num_pages; - vm_fault_t ret = VM_FAULT_SIGBUS; - - page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; - num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE); - - if (page_offset > num_pages) - return VM_FAULT_SIGBUS; - - mutex_lock(&obj->pages_lock); - if (obj->pages) { - get_page(obj->pages[page_offset]); - vmf->page = obj->pages[page_offset]; - ret = 0; - } - mutex_unlock(&obj->pages_lock); - if (ret) { - struct page *page; - struct address_space *mapping; - - mapping = file_inode(obj->gem.filp)->i_mapping; - page = shmem_read_mapping_page(mapping, page_offset); - - if (!IS_ERR(page)) { - vmf->page = page; - ret = 0; - } else { - switch (PTR_ERR(page)) { - case -ENOSPC: - case -ENOMEM: - ret = VM_FAULT_OOM; - break; - case -EBUSY: - ret = VM_FAULT_RETRY; - break; - case -EFAULT: - case -EINVAL: - ret = VM_FAULT_SIGBUS; - break; - default: - WARN_ON(PTR_ERR(page)); - ret = VM_FAULT_SIGBUS; - break; - } - } - } - return ret; -} - -static struct drm_gem_object *vkms_gem_create(struct drm_device *dev, - struct drm_file *file, - u32 *handle, - u64 size) -{ - struct vkms_gem_object *obj; - int ret; - - if (!file || !dev || !handle) - return ERR_PTR(-EINVAL); - - obj = __vkms_gem_create(dev, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - ret = drm_gem_handle_create(file, &obj->gem, handle); - if (ret) - return ERR_PTR(ret); - - return &obj->gem; -} - -int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, - struct drm_mode_create_dumb *args) -{ - struct drm_gem_object *gem_obj; - u64 pitch, size; - - if (!args || !dev || !file) - return -EINVAL; - - pitch = args->width * DIV_ROUND_UP(args->bpp, 8); - size = pitch * args->height; - - if (!size) - return -EINVAL; - - gem_obj = vkms_gem_create(dev, file, &args->handle, size); - if (IS_ERR(gem_obj)) - return PTR_ERR(gem_obj); - - args->size = gem_obj->size; - args->pitch = pitch; - - drm_gem_object_put(gem_obj); - - DRM_DEBUG_DRIVER("Created object of size %lld\n", size); - - return 0; -} - -static struct page **_get_pages(struct vkms_gem_object *vkms_obj) -{ - struct drm_gem_object *gem_obj = &vkms_obj->gem; - - if (!vkms_obj->pages) { - struct page **pages = drm_gem_get_pages(gem_obj); - - if (IS_ERR(pages)) - return pages; - - if (cmpxchg(&vkms_obj->pages, NULL, pages)) - drm_gem_put_pages(gem_obj, pages, false, true); - } - - return vkms_obj->pages; -} - -void vkms_gem_vunmap(struct drm_gem_object *obj) -{ - struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); - - mutex_lock(&vkms_obj->pages_lock); - if (vkms_obj->vmap_count < 1) { - WARN_ON(vkms_obj->vaddr); - WARN_ON(vkms_obj->pages); - mutex_unlock(&vkms_obj->pages_lock); - return; - } - - vkms_obj->vmap_count--; - - if (vkms_obj->vmap_count == 0) { - vunmap(vkms_obj->vaddr); - vkms_obj->vaddr = NULL; - drm_gem_put_pages(obj, vkms_obj->pages, false, true); - vkms_obj->pages = NULL; - } - - mutex_unlock(&vkms_obj->pages_lock); -} - -int vkms_gem_vmap(struct drm_gem_object *obj) -{ - struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); - int ret = 0; - - mutex_lock(&vkms_obj->pages_lock); - - if (!vkms_obj->vaddr) { - unsigned int n_pages = obj->size >> PAGE_SHIFT; - struct page **pages = _get_pages(vkms_obj); - - if (IS_ERR(pages)) { - ret = PTR_ERR(pages); - goto out; - } - - vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); - if (!vkms_obj->vaddr) - goto err_vmap; - } - - vkms_obj->vmap_count++; - goto out; - -err_vmap: - ret = -ENOMEM; - drm_gem_put_pages(obj, vkms_obj->pages, false, true); - vkms_obj->pages = NULL; -out: - mutex_unlock(&vkms_obj->pages_lock); - return ret; -} - -struct drm_gem_object * -vkms_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg) -{ - struct vkms_gem_object *obj; - int npages; - - obj = __vkms_gem_create(dev, attach->dmabuf->size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; - DRM_DEBUG_PRIME("Importing %d pages\n", npages); - - obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!obj->pages) { - vkms_gem_free_object(&obj->gem); - return ERR_PTR(-ENOMEM); - } - - drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); - return &obj->gem; -} diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 85afb77e97f0..4a1848b0318f 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -80,6 +80,10 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) goto err_attach; } + ret = vkms_enable_writeback_connector(vkmsdev); + if (ret) + DRM_ERROR("Failed to init writeback connector\n"); + drm_mode_config_reset(dev); return 0; diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 6d31265a2ab7..9890137bcb8d 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -5,6 +5,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include "vkms_drv.h" @@ -145,15 +146,15 @@ static int vkms_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { struct drm_gem_object *gem_obj; - int ret; + void *vaddr; if (!state->fb) return 0; gem_obj = drm_gem_fb_get_obj(state->fb, 0); - ret = vkms_gem_vmap(gem_obj); - if (ret) - DRM_ERROR("vmap failed: %d\n", ret); + vaddr = drm_gem_shmem_vmap(gem_obj); + if (IS_ERR(vaddr)) + DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); return drm_gem_fb_prepare_fb(plane, state); } @@ -162,12 +163,14 @@ static void vkms_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct drm_gem_object *gem_obj; + struct drm_gem_shmem_object *shmem_obj; if (!old_state->fb) return; gem_obj = drm_gem_fb_get_obj(old_state->fb, 0); - vkms_gem_vunmap(gem_obj); + shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0)); + drm_gem_shmem_vunmap(gem_obj, shmem_obj->vaddr); } static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c new file mode 100644 index 000000000000..26b903926872 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "vkms_drv.h" +#include <drm/drm_fourcc.h> +#include <drm/drm_writeback.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> + +static const u32 vkms_wb_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +static const struct drm_connector_funcs vkms_wb_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + if (fb->format->format != vkms_wb_formats[0]) { + struct drm_format_name_buf format_name; + + DRM_DEBUG_KMS("Invalid pixel format %s\n", + drm_get_format_name(fb->format->format, + &format_name)); + return -EINVAL; + } + + return 0; +} + +static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = { + .atomic_check = vkms_wb_encoder_atomic_check, +}; + +static int vkms_wb_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + return drm_add_modes_noedid(connector, dev->mode_config.max_width, + dev->mode_config.max_height); +} + +static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job) +{ + struct drm_gem_object *gem_obj; + void *vaddr; + + if (!job->fb) + return 0; + + gem_obj = drm_gem_fb_get_obj(job->fb, 0); + vaddr = drm_gem_shmem_vmap(gem_obj); + if (IS_ERR(vaddr)) { + DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); + return PTR_ERR(vaddr); + } + + job->priv = vaddr; + + return 0; +} + +static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct drm_gem_object *gem_obj; + struct vkms_device *vkmsdev; + + if (!job->fb) + return; + + gem_obj = drm_gem_fb_get_obj(job->fb, 0); + drm_gem_shmem_vunmap(gem_obj, job->priv); + + vkmsdev = drm_device_to_vkms_device(gem_obj->dev); + vkms_set_composer(&vkmsdev->output, false); +} + +static void vkms_wb_atomic_commit(struct drm_connector *conn, + struct drm_connector_state *state) +{ + struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev); + struct vkms_output *output = &vkmsdev->output; + struct drm_writeback_connector *wb_conn = &output->wb_connector; + struct drm_connector_state *conn_state = wb_conn->base.state; + struct vkms_crtc_state *crtc_state = output->composer_state; + + if (!conn_state) + return; + + vkms_set_composer(&vkmsdev->output, true); + + spin_lock_irq(&output->composer_lock); + crtc_state->active_writeback = conn_state->writeback_job->priv; + crtc_state->wb_pending = true; + spin_unlock_irq(&output->composer_lock); + drm_writeback_queue_job(wb_conn, state); +} + +static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = { + .get_modes = vkms_wb_connector_get_modes, + .prepare_writeback_job = vkms_wb_prepare_job, + .cleanup_writeback_job = vkms_wb_cleanup_job, + .atomic_commit = vkms_wb_atomic_commit, +}; + +int vkms_enable_writeback_connector(struct vkms_device *vkmsdev) +{ + struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector; + + vkmsdev->output.wb_connector.encoder.possible_crtcs = 1; + drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs); + + return drm_writeback_connector_init(&vkmsdev->drm, wb, + &vkms_wb_connector_funcs, + &vkms_wb_encoder_helper_funcs, + vkms_wb_formats, + ARRAY_SIZE(vkms_wb_formats)); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 1629427d5734..f21881e087db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -459,19 +459,19 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, int ret = 0; /* Buffer objects need to be either pinned or reserved: */ - if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT)) + if (!(dst->pin_count)) dma_resv_assert_held(dst->base.resv); - if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT)) + if (!(src->pin_count)) dma_resv_assert_held(src->base.resv); - if (dst->ttm->state == tt_unpopulated) { - ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx); + if (!ttm_tt_is_populated(dst->ttm)) { + ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); if (ret) return ret; } - if (src->ttm->state == tt_unpopulated) { - ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx); + if (!ttm_tt_is_populated(src->ttm)) { + ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx); if (ret) return ret; } @@ -484,8 +484,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, d.src_pages = src->ttm->pages; d.dst_num_pages = dst->num_pages; d.src_num_pages = src->num_pages; - d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL); - d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL); + d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL); + d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL); d.diff = diff; for (j = 0; j < h; ++j) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 3229451d0706..263d76ae43f0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -106,7 +106,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; - if (buf->pin_count > 0) + if (buf->base.pin_count > 0) ret = ttm_bo_mem_compat(placement, &bo->mem, &new_flags) == true ? 0 : -EINVAL; else @@ -155,7 +155,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, if (unlikely(ret != 0)) goto err; - if (buf->pin_count > 0) { + if (buf->base.pin_count > 0) { ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem, &new_flags) == true ? 0 : -EINVAL; goto out_unreserve; @@ -246,12 +246,12 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, if (bo->mem.mem_type == TTM_PL_VRAM && bo->mem.start < bo->num_pages && bo->mem.start > 0 && - buf->pin_count == 0) { + buf->base.pin_count == 0) { ctx.interruptible = false; (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); } - if (buf->pin_count > 0) + if (buf->base.pin_count > 0) ret = ttm_bo_mem_compat(&placement, &bo->mem, &new_flags) == true ? 0 : -EINVAL; else @@ -343,21 +343,13 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) dma_resv_assert_held(bo->base.resv); - if (pin) { - if (vbo->pin_count++ > 0) - return; - } else { - WARN_ON(vbo->pin_count <= 0); - if (--vbo->pin_count > 0) - return; - } + if (pin == !!bo->pin_count) + return; pl.fpfn = 0; pl.lpfn = 0; - pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB - | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; - if (pin) - pl.flags |= TTM_PL_FLAG_NO_EVICT; + pl.mem_type = bo->mem.mem_type; + pl.flags = bo->mem.placement; memset(&placement, 0, sizeof(placement)); placement.num_placement = 1; @@ -366,8 +358,12 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) ret = ttm_bo_validate(bo, &placement, &ctx); BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); -} + if (pin) + ttm_bo_pin(bo); + else + ttm_bo_unpin(bo); +} /** * vmw_bo_map_and_cache - Map a buffer object and cache the map @@ -485,6 +481,49 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) ttm_prime_object_kfree(vmw_user_bo, prime); } +/** + * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. + * + * @dev_priv: Pointer to the device private struct + * @size: size of the BO we need + * @placement: where to put it + * @p_bo: resulting BO + * + * Creates and pin a simple BO for in kernel use. + */ +int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, + struct ttm_placement *placement, + struct ttm_buffer_object **p_bo) +{ + unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct ttm_operation_ctx ctx = { false, false }; + struct ttm_buffer_object *bo; + size_t acc_size; + int ret; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (unlikely(!bo)) + return -ENOMEM; + + acc_size = ttm_round_pot(sizeof(*bo)); + acc_size += ttm_round_pot(npages * sizeof(void *)); + acc_size += ttm_round_pot(sizeof(struct ttm_tt)); + ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, + ttm_bo_type_device, placement, 0, + &ctx, acc_size, NULL, NULL, NULL); + if (unlikely(ret)) + goto error_free; + + ttm_bo_pin(bo); + ttm_bo_unreserve(bo); + *p_bo = bo; + + return 0; + +error_free: + kfree(bo); + return ret; +} /** * vmw_bo_init - Initialize a vmw buffer object @@ -494,6 +533,7 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) * @size: Buffer object size in bytes. * @placement: Initial placement. * @interruptible: Whether waits should be performed interruptible. + * @pin: If the BO should be created pinned at a fixed location. * @bo_free: The buffer object destructor. * Returns: Zero on success, negative error code on error. * @@ -502,9 +542,10 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) int vmw_bo_init(struct vmw_private *dev_priv, struct vmw_buffer_object *vmw_bo, size_t size, struct ttm_placement *placement, - bool interruptible, + bool interruptible, bool pin, void (*bo_free)(struct ttm_buffer_object *bo)) { + struct ttm_operation_ctx ctx = { interruptible, false }; struct ttm_bo_device *bdev = &dev_priv->bdev; size_t acc_size; int ret; @@ -518,11 +559,16 @@ int vmw_bo_init(struct vmw_private *dev_priv, vmw_bo->base.priority = 3; vmw_bo->res_tree = RB_ROOT; - ret = ttm_bo_init(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, - 0, interruptible, acc_size, - NULL, NULL, bo_free); - return ret; + ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, + ttm_bo_type_device, placement, + 0, &ctx, acc_size, NULL, NULL, bo_free); + if (unlikely(ret)) + return ret; + + if (pin) + ttm_bo_pin(&vmw_bo->base); + ttm_bo_unreserve(&vmw_bo->base); + return 0; } @@ -611,7 +657,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv, ret = vmw_bo_init(dev_priv, &user_bo->vbo, size, (dev_priv->has_mob) ? &vmw_sys_placement : - &vmw_vram_sys_placement, true, + &vmw_vram_sys_placement, true, false, &vmw_user_bo_destroy); if (unlikely(ret != 0)) return ret; @@ -1146,9 +1192,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, { struct vmw_buffer_object *vbo; - if (mem == NULL) - return; - /* Make sure @bo is embedded in a struct vmw_buffer_object? */ if (bo->destroy != vmw_bo_bo_free && bo->destroy != vmw_user_bo_destroy) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 3b41cf63110a..9a9fe10d829b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -1245,9 +1245,9 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, !dev_priv->has_mob) return -ENOMEM; - ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, - &vmw_mob_ne_placement, 0, false, - &man->cmd_space); + ret = vmw_bo_create_kernel(dev_priv, size, + &vmw_mob_placement, + &man->cmd_space); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 65e8e7a97724..984d8884357d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -410,8 +410,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) if (!buf) return -ENOMEM; - ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, - true, vmw_bo_bo_free); + ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement, + true, true, vmw_bo_bo_free); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index e60012886065..0c42d2c05f43 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -372,7 +372,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) return -ENOMEM; ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, - &vmw_sys_ne_placement, false, + &vmw_sys_placement, false, true, &vmw_bo_bo_free); if (unlikely(ret != 0)) return ret; @@ -468,7 +468,10 @@ out_no_query_bo: if (dev_priv->cman) vmw_cmdbuf_remove_pool(dev_priv->cman); if (dev_priv->has_mob) { - (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); + ttm_resource_manager_evict_all(&dev_priv->bdev, man); vmw_otables_takedown(dev_priv); } if (dev_priv->cman) @@ -501,7 +504,10 @@ static void vmw_release_device_early(struct vmw_private *dev_priv) vmw_cmdbuf_remove_pool(dev_priv->cman); if (dev_priv->has_mob) { - ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); + ttm_resource_manager_evict_all(&dev_priv->bdev, man); vmw_otables_takedown(dev_priv); } } @@ -589,10 +595,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) else dev_priv->map_mode = vmw_dma_map_populate; - if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) && - (dev_priv->map_mode == vmw_dma_alloc_coherent)) - return -EINVAL; - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; } @@ -626,9 +628,8 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv) #ifdef CONFIG_TRANSPARENT_HUGEPAGE ret = vmw_thp_init(dev_priv); #else - ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, - TTM_PL_FLAG_CACHED, TTM_PL_FLAG_CACHED, - false, dev_priv->vram_size >> PAGE_SHIFT); + ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false, + dev_priv->vram_size >> PAGE_SHIFT); #endif ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); return ret; @@ -793,8 +794,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (unlikely(ret != 0)) goto out_err0; - dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, - SCATTERLIST_MAX_SEGMENT)); + dma_set_max_seg_size(dev->dev, U32_MAX); if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", @@ -873,17 +873,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) drm_vma_offset_manager_init(&dev_priv->vma_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); - ret = ttm_bo_device_init(&dev_priv->bdev, - &vmw_bo_driver, + ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver, + dev_priv->dev->dev, dev->anon_inode->i_mapping, &dev_priv->vma_manager, + dev_priv->map_mode == vmw_dma_alloc_coherent, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_no_bdev; } - ttm_manager_type(&dev_priv->bdev, TTM_PL_SYSTEM)->available_caching = - TTM_PL_FLAG_CACHED; /* * Enable VRAM, but initially don't use it until SVGA is enabled and @@ -1260,7 +1259,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv) if (ttm_resource_manager_used(man)) { ttm_resource_manager_set_used(man, false); spin_unlock(&dev_priv->svga_lock); - if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) + if (ttm_resource_manager_evict_all(&dev_priv->bdev, man)) DRM_ERROR("Failed evicting VRAM buffers.\n"); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | @@ -1367,6 +1366,10 @@ static int vmw_pm_freeze(struct device *kdev) struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; int ret; /* @@ -1387,7 +1390,7 @@ static int vmw_pm_freeze(struct device *kdev) vmw_execbuf_release_pinned_bo(dev_priv); vmw_resource_evict_all(dev_priv); vmw_release_device_early(dev_priv); - ttm_bo_swapout_all(); + while (ttm_bo_swapout(&ctx) == 0); if (dev_priv->enable_fb) vmw_fifo_resource_dec(dev_priv); if (atomic_read(&dev_priv->num_fifo_resources) != 0) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 871ad738dadb..b45becbb00f8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -82,9 +82,7 @@ VMWGFX_NUM_GB_SCREEN_TARGET) #define VMW_PL_GMR (TTM_PL_PRIV + 0) -#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0) #define VMW_PL_MOB (TTM_PL_PRIV + 1) -#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1) #define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_SURFACE ttm_driver_type1 @@ -101,7 +99,6 @@ struct vmw_fpriv { * struct vmw_buffer_object - TTM buffer object with vmwgfx additions * @base: The TTM buffer object * @res_tree: RB tree of resources using this buffer object as a backing MOB - * @pin_count: pin depth * @cpu_writers: Number of synccpu write grabs. Protected by reservation when * increased. May be decreased without reservation. * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB @@ -112,7 +109,6 @@ struct vmw_fpriv { struct vmw_buffer_object { struct ttm_buffer_object base; struct rb_root res_tree; - s32 pin_count; atomic_t cpu_writers; /* Not ref-counted. Protected by binding_mutex */ struct vmw_resource *dx_query_ctx; @@ -847,10 +843,14 @@ extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, SVGAGuestPtr *ptr); extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin); extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); +extern int vmw_bo_create_kernel(struct vmw_private *dev_priv, + unsigned long size, + struct ttm_placement *placement, + struct ttm_buffer_object **p_bo); extern int vmw_bo_init(struct vmw_private *dev_priv, struct vmw_buffer_object *vmw_bo, size_t size, struct ttm_placement *placement, - bool interruptible, + bool interruptible, bool pin, void (*bo_free)(struct ttm_buffer_object *bo)); extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, struct ttm_object_file *tfile); @@ -1007,16 +1007,12 @@ extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, extern const size_t vmw_tt_size; extern struct ttm_placement vmw_vram_placement; -extern struct ttm_placement vmw_vram_ne_placement; extern struct ttm_placement vmw_vram_sys_placement; extern struct ttm_placement vmw_vram_gmr_placement; -extern struct ttm_placement vmw_vram_gmr_ne_placement; extern struct ttm_placement vmw_sys_placement; -extern struct ttm_placement vmw_sys_ne_placement; extern struct ttm_placement vmw_evictable_placement; extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_mob_placement; -extern struct ttm_placement vmw_mob_ne_placement; extern struct ttm_placement vmw_nonfixed_placement; extern struct ttm_bo_driver vmw_bo_driver; extern const struct vmw_sg_table * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index c59806d40e15..4d60201037d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -406,7 +406,7 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, ret = vmw_bo_init(vmw_priv, vmw_bo, size, &vmw_sys_placement, - false, + false, false, &vmw_bo_bo_free); if (unlikely(ret != 0)) goto err_unlock; /* init frees the buffer on failure */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index bb76acb5b0fc..be325a62c178 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -60,7 +60,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) - return (id != -ENOMEM ? 0 : id); + return id; spin_lock(&gman->lock); @@ -112,8 +112,6 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man = &gman->manager; man->func = &vmw_gmrid_manager_func; - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; /* TODO: This is most likely not correct */ man->use_tt = true; ttm_resource_manager_init(man, 0); @@ -145,7 +143,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) ttm_resource_manager_set_used(man, false); - ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); + ttm_resource_manager_evict_all(&dev_priv->bdev, man); ttm_resource_manager_cleanup(man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 312ed0881a99..bc67f2b930e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -522,8 +522,10 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *new_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, + crtc); struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); int connector_mask = drm_connector_mask(&du->connector); bool has_primary = new_state->plane_mask & @@ -552,13 +554,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { } void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event = crtc->state->event; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 3ee03227607c..03f3694015ce 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -473,11 +473,11 @@ void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, bool unreference); int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state); + struct drm_atomic_state *state); void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); + struct drm_atomic_state *state); void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); + struct drm_atomic_state *state); void vmw_du_crtc_reset(struct drm_crtc *crtc); struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc); void vmw_du_crtc_destroy_state(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index c4017c7a24db..9d1de5b5cc6a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -214,7 +214,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) * CRTC, it makes more sense to do those at plane update time. */ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { } @@ -224,7 +224,7 @@ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc, * @crtc: CRTC to be turned off */ static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index e9f448a5ebb3..15b5bde69324 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -24,7 +24,7 @@ * */ -#include <linux/frame.h> +#include <linux/objtool.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -599,4 +599,3 @@ out_open: return -EINVAL; } - diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index d4d66532f9c9..0b76b3d17d4c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -206,7 +206,7 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo, * @start: First page of the range within the buffer object. * @end: Last page of the range within the buffer object + 1. * - * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange. + * This is similar to ttm_bo_unmap_virtual() except it takes a subrange. */ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, pgoff_t start, pgoff_t end) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c0f156078dda..00b535831a7a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -370,7 +370,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, ret = vmw_bo_init(res->dev_priv, backup, res->backup_size, res->func->backup_placement, - interruptible, + interruptible, false, &vmw_bo_bo_free); if (unlikely(ret != 0)) goto out_no_bo; @@ -867,7 +867,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, mutex_lock(&dev_priv->binding_mutex); dx_query_mob = container_of(bo, struct vmw_buffer_object, base); - if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { + if (!dx_query_mob || !dx_query_mob->dx_query_ctx) { mutex_unlock(&dev_priv->binding_mutex); return; } @@ -1002,7 +1002,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) vbo = res->backup; ttm_bo_reserve(&vbo->base, interruptible, false, NULL); - if (!vbo->pin_count) { + if (!vbo->base.pin_count) { ret = ttm_bo_validate (&vbo->base, res->func->backup_placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 4bf0f5ec4fc2..4bdad2f2d130 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -279,7 +279,7 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc) * This is called after a mode set has been completed. */ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { } @@ -289,7 +289,7 @@ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, * @crtc: CRTC to be turned off */ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; @@ -451,8 +451,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, */ vmw_overlay_pause_all(dev_priv); ret = vmw_bo_init(dev_priv, vps->bo, size, - &vmw_vram_ne_placement, - false, &vmw_bo_bo_free); + &vmw_vram_placement, + false, true, &vmw_bo_bo_free); vmw_overlay_resume_all(dev_priv); if (ret) { vps->bo = NULL; /* vmw_bo_init frees on error */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index e139fdfd1635..f328aa5839a2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -978,8 +978,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, if (unlikely(!buf)) return -ENOMEM; - ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement, - true, vmw_bo_bo_free); + ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement, + true, true, vmw_bo_bo_free); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index cf3aafd00837..5b04ec047ef3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -408,12 +408,12 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc) } static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { } static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 3c00a9e7cfcc..155ca3a5c7e5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -26,6 +26,8 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man) return container_of(man, struct vmw_thp_manager, manager); } +static const struct ttm_resource_manager_func vmw_thp_func; + static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, unsigned long align_pages, const struct ttm_place *place, @@ -101,7 +103,7 @@ found_unlock: mem->start = node->start; } - return 0; + return ret; } @@ -123,25 +125,21 @@ static void vmw_thp_put_node(struct ttm_resource_manager *man, int vmw_thp_init(struct vmw_private *dev_priv) { - struct ttm_resource_manager *man; struct vmw_thp_manager *rman; rman = kzalloc(sizeof(*rman), GFP_KERNEL); if (!rman) return -ENOMEM; - man = &rman->manager; - man->available_caching = TTM_PL_FLAG_CACHED; - man->default_caching = TTM_PL_FLAG_CACHED; - - ttm_resource_manager_init(man, + ttm_resource_manager_init(&rman->manager, dev_priv->vram_size >> PAGE_SHIFT); - drm_mm_init(&rman->mm, 0, man->size); + rman->manager.func = &vmw_thp_func; + drm_mm_init(&rman->mm, 0, rman->manager.size); spin_lock_init(&rman->lock); ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager); - ttm_resource_manager_set_used(man, true); + ttm_resource_manager_set_used(&rman->manager, true); return 0; } @@ -154,7 +152,7 @@ void vmw_thp_fini(struct vmw_private *dev_priv) ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); + ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man); if (ret) return; spin_lock(&rman->lock); @@ -176,7 +174,7 @@ static void vmw_thp_debug(struct ttm_resource_manager *man, spin_unlock(&rman->lock); } -const struct ttm_resource_manager_func vmw_thp_func = { +static const struct ttm_resource_manager_func vmw_thp_func = { .alloc = vmw_thp_get_node, .free = vmw_thp_put_node, .debug = vmw_thp_debug diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index c7f10b2c93d2..51f70bea41cc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -28,54 +28,33 @@ #include "vmwgfx_drv.h" #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_page_alloc.h> static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED -}; - -static const struct ttm_place vram_ne_placement_flags = { - .fpfn = 0, - .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = TTM_PL_VRAM, + .flags = 0 }; static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED -}; - -static const struct ttm_place sys_ne_placement_flags = { - .fpfn = 0, - .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }; static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED -}; - -static const struct ttm_place gmr_ne_placement_flags = { - .fpfn = 0, - .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = VMW_PL_GMR, + .flags = 0 }; static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED -}; - -static const struct ttm_place mob_ne_placement_flags = { - .fpfn = 0, - .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT + .mem_type = VMW_PL_MOB, + .flags = 0 }; struct ttm_placement vmw_vram_placement = { @@ -89,11 +68,13 @@ static const struct ttm_place vram_gmr_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = 0 }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = 0 } }; @@ -101,11 +82,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = 0 }, { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = 0 } }; @@ -116,27 +99,6 @@ struct ttm_placement vmw_vram_gmr_placement = { .busy_placement = &gmr_placement_flags }; -static const struct ttm_place vram_gmr_ne_placement_flags[] = { - { - .fpfn = 0, - .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT - }, { - .fpfn = 0, - .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT - } -}; - -struct ttm_placement vmw_vram_gmr_ne_placement = { - .num_placement = 2, - .placement = vram_gmr_ne_placement_flags, - .num_busy_placement = 1, - .busy_placement = &gmr_ne_placement_flags -}; - struct ttm_placement vmw_vram_sys_placement = { .num_placement = 1, .placement = &vram_placement_flags, @@ -144,13 +106,6 @@ struct ttm_placement vmw_vram_sys_placement = { .busy_placement = &sys_placement_flags }; -struct ttm_placement vmw_vram_ne_placement = { - .num_placement = 1, - .placement = &vram_ne_placement_flags, - .num_busy_placement = 1, - .busy_placement = &vram_ne_placement_flags -}; - struct ttm_placement vmw_sys_placement = { .num_placement = 1, .placement = &sys_placement_flags, @@ -158,30 +113,27 @@ struct ttm_placement vmw_sys_placement = { .busy_placement = &sys_placement_flags }; -struct ttm_placement vmw_sys_ne_placement = { - .num_placement = 1, - .placement = &sys_ne_placement_flags, - .num_busy_placement = 1, - .busy_placement = &sys_ne_placement_flags -}; - static const struct ttm_place evictable_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }, { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_VRAM, + .flags = 0 }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = 0 }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = 0 } }; @@ -189,15 +141,18 @@ static const struct ttm_place nonfixed_placement_flags[] = { { .fpfn = 0, .lpfn = 0, - .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + .mem_type = TTM_PL_SYSTEM, + .flags = 0 }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_GMR, + .flags = 0 }, { .fpfn = 0, .lpfn = 0, - .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + .mem_type = VMW_PL_MOB, + .flags = 0 } }; @@ -222,13 +177,6 @@ struct ttm_placement vmw_mob_placement = { .busy_placement = &mob_placement_flags }; -struct ttm_placement vmw_mob_ne_placement = { - .num_placement = 1, - .num_busy_placement = 1, - .placement = &mob_ne_placement_flags, - .busy_placement = &mob_ne_placement_flags -}; - struct ttm_placement vmw_nonfixed_placement = { .num_placement = 3, .placement = nonfixed_placement_flags, @@ -237,7 +185,7 @@ struct ttm_placement vmw_nonfixed_placement = { }; struct vmw_ttm_tt { - struct ttm_dma_tt dma_ttm; + struct ttm_tt dma_ttm; struct vmw_private *dev_priv; int gmr_id; struct vmw_mob *mob; @@ -246,6 +194,7 @@ struct vmw_ttm_tt { struct vmw_sg_table vsgt; uint64_t sg_alloc_size; bool mapped; + bool bound; }; const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); @@ -362,8 +311,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) { struct device *dev = vmw_tt->dev_priv->dev->dev; - dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; } @@ -383,16 +331,8 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) { struct device *dev = vmw_tt->dev_priv->dev->dev; - int ret; - ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, - DMA_BIDIRECTIONAL); - if (unlikely(ret == 0)) - return -ENOMEM; - - vmw_tt->sgt.nents = ret; - - return 0; + return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); } /** @@ -419,13 +359,14 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) int ret = 0; static size_t sgl_size; static size_t sgt_size; + struct scatterlist *sg; if (vmw_tt->mapped) return 0; vsgt->mode = dev_priv->map_mode; - vsgt->pages = vmw_tt->dma_ttm.ttm.pages; - vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; + vsgt->pages = vmw_tt->dma_ttm.pages; + vsgt->num_pages = vmw_tt->dma_ttm.num_pages; vsgt->addrs = vmw_tt->dma_ttm.dma_address; vsgt->sgt = &vmw_tt->sgt; @@ -441,18 +382,20 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) return ret; - ret = __sg_alloc_table_from_pages - (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, - (unsigned long) vsgt->num_pages << PAGE_SHIFT, - dma_get_max_seg_size(dev_priv->dev->dev), - GFP_KERNEL); - if (unlikely(ret != 0)) + sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, + vsgt->num_pages, 0, + (unsigned long) vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->dev->dev), + NULL, 0, GFP_KERNEL); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); goto out_sg_alloc_fail; + } - if (vsgt->num_pages > vmw_tt->sgt.nents) { + if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { uint64_t over_alloc = sgl_size * (vsgt->num_pages - - vmw_tt->sgt.nents); + vmw_tt->sgt.orig_nents); ttm_mem_global_free(glob, over_alloc); vmw_tt->sg_alloc_size -= over_alloc; @@ -533,17 +476,24 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) { struct vmw_ttm_tt *vmw_tt = - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); return &vmw_tt->vsgt; } -static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) +static int vmw_ttm_bind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); - int ret; + container_of(ttm, struct vmw_ttm_tt, dma_ttm); + int ret = 0; + + if (!bo_mem) + return -EINVAL; + + if (vmw_be->bound) + return 0; ret = vmw_ttm_map_dma(vmw_be); if (unlikely(ret != 0)) @@ -554,8 +504,9 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) switch (bo_mem->mem_type) { case VMW_PL_GMR: - return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, + ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); + break; case VMW_PL_MOB: if (unlikely(vmw_be->mob == NULL)) { vmw_be->mob = @@ -564,19 +515,25 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) return -ENOMEM; } - return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, + ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); + break; default: BUG(); } - return 0; + vmw_be->bound = true; + return ret; } -static void vmw_ttm_unbind(struct ttm_tt *ttm) +static void vmw_ttm_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(ttm, struct vmw_ttm_tt, dma_ttm); + + if (!vmw_be->bound) + return; switch (vmw_be->mem_type) { case VMW_PL_GMR: @@ -591,17 +548,20 @@ static void vmw_ttm_unbind(struct ttm_tt *ttm) if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) vmw_ttm_unmap_dma(vmw_be); + vmw_be->bound = false; } -static void vmw_ttm_destroy(struct ttm_tt *ttm) +static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(ttm, struct vmw_ttm_tt, dma_ttm); + vmw_ttm_unbind(bdev, ttm); + ttm_tt_destroy_common(bdev, ttm); vmw_ttm_unmap_dma(vmw_be); if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) - ttm_dma_tt_fini(&vmw_be->dma_ttm); + ttm_tt_fini(&vmw_be->dma_ttm); else ttm_tt_fini(ttm); @@ -612,41 +572,21 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) } -static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) +static int vmw_ttm_populate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - struct vmw_ttm_tt *vmw_tt = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); - struct vmw_private *dev_priv = vmw_tt->dev_priv; - struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); - int ret; - - if (ttm->state != tt_unpopulated) + /* TODO: maybe completely drop this ? */ + if (ttm_tt_is_populated(ttm)) return 0; - if (dev_priv->map_mode == vmw_dma_alloc_coherent) { - size_t size = - ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); - ret = ttm_mem_global_alloc(glob, size, ctx); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev, - ctx); - if (unlikely(ret != 0)) - ttm_mem_global_free(glob, size); - } else - ret = ttm_pool_populate(ttm, ctx); - - return ret; + return ttm_pool_alloc(&bdev->pool, ttm, ctx); } -static void vmw_ttm_unpopulate(struct ttm_tt *ttm) +static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, + struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, - dma_ttm.ttm); - struct vmw_private *dev_priv = vmw_tt->dev_priv; - struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); - + dma_ttm); if (vmw_tt->mob) { vmw_mob_destroy(vmw_tt->mob); @@ -654,22 +594,9 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm) } vmw_ttm_unmap_dma(vmw_tt); - if (dev_priv->map_mode == vmw_dma_alloc_coherent) { - size_t size = - ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); - - ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); - ttm_mem_global_free(glob, size); - } else - ttm_pool_unpopulate(ttm); + ttm_pool_free(&bdev->pool, ttm); } -static struct ttm_backend_func vmw_ttm_func = { - .bind = vmw_ttm_bind, - .unbind = vmw_ttm_unbind, - .destroy = vmw_ttm_destroy, -}; - static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { @@ -680,18 +607,19 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, if (!vmw_be) return NULL; - vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev); vmw_be->mob = NULL; if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) - ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags); + ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags, + ttm_cached); else - ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags); + ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags, + ttm_cached); if (unlikely(ret != 0)) goto out_no_init; - return &vmw_be->dma_ttm.ttm; + return &vmw_be->dma_ttm; out_no_init: kfree(vmw_be); return NULL; @@ -721,9 +649,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc case VMW_PL_MOB: return 0; case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = dev_priv->vram_start; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + dev_priv->vram_start; mem->bus.is_iomem = true; + mem->bus.caching = ttm_cached; break; default: return -EINVAL; @@ -745,6 +674,8 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *mem) { + if (!mem) + return; vmw_bo_move_notify(bo, mem); vmw_query_move_notify(bo, mem); } @@ -761,16 +692,65 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) (void) ttm_bo_wait(bo, false, false); } +static int vmw_move(struct ttm_buffer_object *bo, + bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem) +{ + struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type); + struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); + int ret; + + if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) { + ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); + if (ret) + return ret; + } + + vmw_move_notify(bo, evict, new_mem); + + if (old_man->use_tt && new_man->use_tt) { + if (bo->mem.mem_type == TTM_PL_SYSTEM) { + ttm_bo_assign_mem(bo, new_mem); + return 0; + } + ret = ttm_bo_wait_ctx(bo, ctx); + if (ret) + goto fail; + + vmw_ttm_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); + ttm_bo_assign_mem(bo, new_mem); + return 0; + } else { + ret = ttm_bo_move_memcpy(bo, ctx, new_mem); + if (ret) + goto fail; + } + return 0; +fail: + swap(*new_mem, bo->mem); + vmw_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + return ret; +} + +static void +vmw_delete_mem_notify(struct ttm_buffer_object *bo) +{ + vmw_move_notify(bo, false, NULL); +} struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, + .ttm_tt_destroy = &vmw_ttm_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = vmw_evict_flags, - .move = NULL, + .move = vmw_move, .verify_access = vmw_verify_access, - .move_notify = vmw_move_notify, + .delete_mem_notify = vmw_delete_mem_notify, .swap_notify = vmw_swap_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, }; @@ -786,20 +766,18 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, struct ttm_buffer_object *bo; int ret; - ret = ttm_bo_create(&dev_priv->bdev, bo_size, - ttm_bo_type_device, - &vmw_sys_ne_placement, - 0, false, &bo); - + ret = vmw_bo_create_kernel(dev_priv, bo_size, + &vmw_sys_placement, + &bo); if (unlikely(ret != 0)) return ret; ret = ttm_bo_reserve(bo, false, true, NULL); BUG_ON(ret != 0); - ret = vmw_ttm_populate(bo->ttm, &ctx); + ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); if (likely(ret == 0)) { struct vmw_ttm_tt *vmw_tt = - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); ret = vmw_ttm_map_dma(vmw_tt); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index e69bc373ae2e..f2e2bf6d1421 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -540,7 +540,7 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, if (atomic_read(&vbo->cpu_writers)) return -EBUSY; - if (vbo->pin_count > 0) + if (vbo->base.pin_count > 0) return 0; if (validate_as_mob) diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index cc93a8c9547b..98b6d2ba088a 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -381,6 +381,23 @@ void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, fb_cookie); } +void xen_drm_front_gem_object_free(struct drm_gem_object *obj) +{ + struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; + int idx; + + if (drm_dev_enter(obj->dev, &idx)) { + xen_drm_front_dbuf_destroy(drm_info->front_info, + xen_drm_front_dbuf_to_cookie(obj)); + drm_dev_exit(idx); + } else { + dbuf_free(&drm_info->front_info->dbuf_list, + xen_drm_front_dbuf_to_cookie(obj)); + } + + xen_drm_front_gem_free_object_unlocked(obj); +} + static int xen_drm_drv_dumb_create(struct drm_file *filp, struct drm_device *dev, struct drm_mode_create_dumb *args) @@ -435,23 +452,6 @@ fail: return ret; } -static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj) -{ - struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; - int idx; - - if (drm_dev_enter(obj->dev, &idx)) { - xen_drm_front_dbuf_destroy(drm_info->front_info, - xen_drm_front_dbuf_to_cookie(obj)); - drm_dev_exit(idx); - } else { - dbuf_free(&drm_info->front_info->dbuf_list, - xen_drm_front_dbuf_to_cookie(obj)); - } - - xen_drm_front_gem_free_object_unlocked(obj); -} - static void xen_drm_drv_release(struct drm_device *dev) { struct xen_drm_front_drm_info *drm_info = dev->dev_private; @@ -483,22 +483,12 @@ static const struct file_operations xen_drm_dev_fops = { .mmap = xen_drm_front_gem_mmap, }; -static const struct vm_operations_struct xen_drm_drv_vm_ops = { - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .release = xen_drm_drv_release, - .gem_vm_ops = &xen_drm_drv_vm_ops, - .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, - .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table, - .gem_prime_vmap = xen_drm_front_gem_prime_vmap, - .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap, .gem_prime_mmap = xen_drm_front_gem_prime_mmap, .dumb_create = xen_drm_drv_dumb_create, .fops = &xen_drm_dev_fops, diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h index 54486d89650e..cefafe859aba 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.h +++ b/drivers/gpu/drm/xen/xen_drm_front.h @@ -160,4 +160,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, int conn_idx, u64 fb_cookie); +void xen_drm_front_gem_object_free(struct drm_gem_object *obj); + #endif /* __XEN_DRM_FRONT_H_ */ diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 534daf37c97e..4f34ef34ba60 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -57,6 +57,19 @@ static void gem_free_pages_array(struct xen_gem_object *xen_obj) xen_obj->pages = NULL; } +static const struct vm_operations_struct xen_drm_drv_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = { + .free = xen_drm_front_gem_object_free, + .get_sg_table = xen_drm_front_gem_get_sg_table, + .vmap = xen_drm_front_gem_prime_vmap, + .vunmap = xen_drm_front_gem_prime_vunmap, + .vm_ops = &xen_drm_drv_vm_ops, +}; + static struct xen_gem_object *gem_create_obj(struct drm_device *dev, size_t size) { @@ -67,6 +80,8 @@ static struct xen_gem_object *gem_create_obj(struct drm_device *dev, if (!xen_obj) return ERR_PTR(-ENOMEM); + xen_obj->base.funcs = &xen_drm_front_gem_object_funcs; + ret = drm_gem_object_init(dev, &xen_obj->base, size); if (ret < 0) { kfree(xen_obj); @@ -180,7 +195,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj) if (!xen_obj->pages) return ERR_PTR(-ENOMEM); - return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); + return drm_prime_pages_to_sg(gem_obj->dev, + xen_obj->pages, xen_obj->num_pages); } struct drm_gem_object * @@ -217,7 +233,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, return ERR_PTR(ret); DRM_DEBUG("Imported buffer of size %zu with nents %u\n", - size, sgt->nents); + size, sgt->orig_nents); return &xen_obj->base; } diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index a455cfc1bee5..c685d94409b0 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -28,7 +28,6 @@ #include <linux/dmaengine.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/spinlock.h> @@ -242,12 +241,6 @@ static const u32 scaling_factors_565[] = { ZYNQMP_DISP_AV_BUF_5BIT_SF, }; -static const u32 scaling_factors_666[] = { - ZYNQMP_DISP_AV_BUF_6BIT_SF, - ZYNQMP_DISP_AV_BUF_6BIT_SF, - ZYNQMP_DISP_AV_BUF_6BIT_SF, -}; - static const u32 scaling_factors_888[] = { ZYNQMP_DISP_AV_BUF_8BIT_SF, ZYNQMP_DISP_AV_BUF_8BIT_SF, @@ -1322,8 +1315,7 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp, snprintf(dma_channel_name, sizeof(dma_channel_name), "%s%u", dma_names[layer->id], i); - dma->chan = of_dma_request_slave_channel(disp->dev->of_node, - dma_channel_name); + dma->chan = dma_request_chan(disp->dev, dma_channel_name); if (IS_ERR(dma->chan)) { dev_err(disp->dev, "failed to request dma channel\n"); ret = PTR_ERR(dma->chan); @@ -1455,7 +1447,7 @@ static int zynqmp_disp_crtc_setup_clock(struct drm_crtc *crtc, static void zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { struct zynqmp_disp *disp = crtc_to_disp(crtc); struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; @@ -1486,8 +1478,10 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc, static void zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); struct zynqmp_disp *disp = crtc_to_disp(crtc); struct drm_plane_state *old_plane_state; @@ -1510,21 +1504,21 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc, } static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) + struct drm_atomic_state *state) { - return drm_atomic_add_affected_planes(state->state, crtc); + return drm_atomic_add_affected_planes(state, crtc); } static void zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void zynqmp_disp_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_atomic_state *state) { if (crtc->state->event) { struct drm_pending_vblank_event *event; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index 26328c76305b..f3ffc3703a0e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -80,19 +80,7 @@ static struct drm_driver zynqmp_dpsub_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, - .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, - .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, - .gem_prime_vmap = drm_gem_cma_prime_vmap, - .gem_prime_vunmap = drm_gem_cma_prime_vunmap, - .gem_prime_mmap = drm_gem_cma_prime_mmap, - .gem_free_object_unlocked = drm_gem_cma_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, - .dumb_create = zynqmp_dpsub_dumb_create, - .dumb_destroy = drm_gem_dumb_destroy, + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create), .fops = &zynqmp_dpsub_drm_fops, @@ -111,7 +99,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) /* Initialize mode config, vblank and the KMS poll helper. */ ret = drmm_mode_config_init(drm); if (ret < 0) - goto err_dev_put; + return ret; drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs; drm->mode_config.min_width = 0; @@ -121,7 +109,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) ret = drm_vblank_init(drm, 1); if (ret) - goto err_dev_put; + return ret; drm->irq_enabled = 1; @@ -154,8 +142,6 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub) err_poll_fini: drm_kms_helper_poll_fini(drm); -err_dev_put: - drm_dev_put(drm); return ret; } @@ -208,27 +194,16 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) int ret; /* Allocate private data. */ - dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL); - if (!dpsub) - return -ENOMEM; + dpsub = devm_drm_dev_alloc(&pdev->dev, &zynqmp_dpsub_drm_driver, + struct zynqmp_dpsub, drm); + if (IS_ERR(dpsub)) + return PTR_ERR(dpsub); dpsub->dev = &pdev->dev; platform_set_drvdata(pdev, dpsub); dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT)); - /* - * Initialize the DRM device early, as the DRM core mandates usage of - * the managed memory helpers tied to the DRM device. - */ - ret = drm_dev_init(&dpsub->drm, &zynqmp_dpsub_drm_driver, &pdev->dev); - if (ret < 0) { - kfree(dpsub); - return ret; - } - - drmm_add_final_kfree(&dpsub->drm, dpsub); - /* Try the reserved memory. Proceed if there's none. */ of_reserved_mem_device_init(&pdev->dev); @@ -286,8 +261,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev) clk_disable_unprepare(dpsub->apb_clk); of_reserved_mem_device_release(&pdev->dev); - drm_dev_put(drm); - return 0; } diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c index 5259ff2825f9..904f62f3bfc1 100644 --- a/drivers/gpu/drm/zte/zx_vou.c +++ b/drivers/gpu/drm/zte/zx_vou.c @@ -350,7 +350,7 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc) } static void zx_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_display_mode *mode = &crtc->state->adjusted_mode; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; @@ -455,7 +455,7 @@ static void zx_crtc_atomic_enable(struct drm_crtc *crtc, } static void zx_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct zx_crtc *zcrtc = to_zx_crtc(crtc); const struct zx_crtc_bits *bits = zcrtc->bits; @@ -473,7 +473,7 @@ static void zx_crtc_atomic_disable(struct drm_crtc *crtc, } static void zx_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_pending_vblank_event *event = crtc->state->event; diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 89b6c14b7392..82d0a60ba3f7 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -170,11 +170,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(dev, sgt, dir, 0); + if (err) goto unpin; - } job->unpins[job->num_unpins].dev = dev; job->unpins[job->num_unpins].dir = dir; @@ -228,7 +226,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) } if (host->domain) { - for_each_sg(sgt->sgl, sg, sgt->nents, j) + for_each_sgtable_sg(sgt, sg, j) gather_size += sg->length; gather_size = iova_align(&host->iova, gather_size); @@ -240,9 +238,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto put; } - err = iommu_map_sg(host->domain, + err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc), - sgt->sgl, sgt->nents, IOMMU_READ); + sgt, IOMMU_READ); if (err == 0) { __free_iova(&host->iova, alloc); err = -EINVAL; @@ -252,12 +250,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) job->unpins[job->num_unpins].size = gather_size; phys_addr = iova_dma_addr(&host->iova, alloc); } else if (sgt) { - err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0); + if (err) goto put; - } job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].dev = host->dev; @@ -660,8 +655,7 @@ void host1x_job_unpin(struct host1x_job *job) } if (unpin->dev && sgt) - dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents, - unpin->dir); + dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0); host1x_bo_unpin(dev, unpin->bo, sgt); host1x_bo_put(unpin->bo); diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c index e606464aa43c..2efe12dde8bc 100644 --- a/drivers/gpu/host1x/mipi.c +++ b/drivers/gpu/host1x/mipi.c @@ -293,19 +293,13 @@ int tegra_mipi_disable(struct tegra_mipi_device *dev) } EXPORT_SYMBOL(tegra_mipi_disable); -int tegra_mipi_wait(struct tegra_mipi_device *device) +int tegra_mipi_finish_calibration(struct tegra_mipi_device *device) { struct tegra_mipi *mipi = device->mipi; void __iomem *status_reg = mipi->regs + (MIPI_CAL_STATUS << 2); u32 value; int err; - err = clk_enable(device->mipi->clk); - if (err < 0) - return err; - - mutex_lock(&device->mipi->lock); - err = readl_relaxed_poll_timeout(status_reg, value, !(value & MIPI_CAL_STATUS_ACTIVE) && (value & MIPI_CAL_STATUS_DONE), 50, @@ -315,9 +309,9 @@ int tegra_mipi_wait(struct tegra_mipi_device *device) return err; } -EXPORT_SYMBOL(tegra_mipi_wait); +EXPORT_SYMBOL(tegra_mipi_finish_calibration); -int tegra_mipi_calibrate(struct tegra_mipi_device *device) +int tegra_mipi_start_calibration(struct tegra_mipi_device *device) { const struct tegra_mipi_soc *soc = device->mipi->soc; unsigned int i; @@ -381,12 +375,16 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device) value |= MIPI_CAL_CTRL_START; tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL); - mutex_unlock(&device->mipi->lock); - clk_disable(device->mipi->clk); + /* + * Wait for min 72uS to let calibration logic finish calibration + * sequence codes before waiting for pads idle state to apply the + * results. + */ + usleep_range(75, 80); return 0; } -EXPORT_SYMBOL(tegra_mipi_calibrate); +EXPORT_SYMBOL(tegra_mipi_start_calibration); static const struct tegra_mipi_pad tegra114_mipi_pads[] = { { .data = MIPI_CAL_CONFIG_CSIA }, diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 087304b1a5d7..1401fd52f37a 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -1034,17 +1034,12 @@ static int vga_switcheroo_runtime_suspend(struct device *dev) static int vga_switcheroo_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - int ret; mutex_lock(&vgasr_mutex); vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON); mutex_unlock(&vgasr_mutex); pci_wakeup_bus(pdev->bus); - ret = dev->bus->pm->runtime_resume(dev); - if (ret) - return ret; - - return 0; + return dev->bus->pm->runtime_resume(dev); } /** |