diff options
Diffstat (limited to 'drivers/gpu')
189 files changed, 39985 insertions, 2385 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 59babd5a5396..8ae7ab68cb97 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -82,13 +82,13 @@ config DRM_TTM config DRM_GEM_CMA_HELPER bool - depends on DRM && HAVE_DMA_ATTRS + depends on DRM help Choose this if you need the GEM CMA helper functions config DRM_KMS_CMA_HELPER bool - depends on DRM && HAVE_DMA_ATTRS + depends on DRM select DRM_GEM_CMA_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index f858aa25fbb2..61766dec6a8d 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -21,8 +21,6 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_AGP) += drm_agpsupport.o -drm-y += $(drm-m) - drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o @@ -62,7 +60,7 @@ obj-$(CONFIG_DRM_ARMADA) += armada/ obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ -obj-$(CONFIG_DRM_OMAP) += omapdrm/ +obj-y += omapdrm/ obj-y += tilcdc/ obj-$(CONFIG_DRM_QXL) += qxl/ obj-$(CONFIG_DRM_BOCHS) += bochs/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 66f729eaf00b..20c9539abc36 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o # add asic specific block -amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ +amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ amdgpu_amdkfd_gfx_v7.o @@ -34,6 +34,7 @@ amdgpu-y += \ # add GMC block amdgpu-y += \ + gmc_v7_0.o \ gmc_v8_0.o # add IH block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 313b0cc8d676..82edf95b7740 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2278,60 +2278,60 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) #define amdgpu_dpm_get_temperature(adev) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ - (adev)->pm.funcs->get_temperature((adev)) + (adev)->pm.funcs->get_temperature((adev))) #define amdgpu_dpm_set_fan_control_mode(adev, m) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->set_fan_control_mode((adev), (m)) + (adev)->pm.funcs->set_fan_control_mode((adev), (m))) #define amdgpu_dpm_get_fan_control_mode(adev) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ - (adev)->pm.funcs->get_fan_control_mode((adev)) + (adev)->pm.funcs->get_fan_control_mode((adev))) #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ - (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) + (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ - (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) + (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) #define amdgpu_dpm_get_sclk(adev, l) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->get_sclk((adev), (l)) + (adev)->pm.funcs->get_sclk((adev), (l))) #define amdgpu_dpm_get_mclk(adev, l) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->get_mclk((adev), (l)) + (adev)->pm.funcs->get_mclk((adev), (l))) #define amdgpu_dpm_force_performance_level(adev, l) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->force_performance_level((adev), (l)) + (adev)->pm.funcs->force_performance_level((adev), (l))) #define amdgpu_dpm_powergate_uvd(adev, g) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ - (adev)->pm.funcs->powergate_uvd((adev), (g)) + (adev)->pm.funcs->powergate_uvd((adev), (g))) #define amdgpu_dpm_powergate_vce(adev, g) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ - (adev)->pm.funcs->powergate_vce((adev), (g)) + (adev)->pm.funcs->powergate_vce((adev), (g))) #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) + (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) #define amdgpu_dpm_get_current_power_state(adev) \ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 0e1376317683..362bedc9e507 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_fw_version = get_fw_version }; -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions() +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 79fa5c7de856..04b744d64b57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_fw_version = get_fw_version }; -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions() +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6f89f8e034d0..b882e8175615 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -478,9 +478,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; unsigned i; - amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); - if (!error) { + amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); + /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. * This assures that the smallest buffers are added first diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b5dbbb573491..9c1af8976bef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -256,11 +256,11 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, /* tonga */ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index cfb6caad2a73..919146780a15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -333,6 +333,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) if (!adev->mode_info.mode_config_initialized) return 0; + /* don't init fbdev if there are no connectors */ + if (list_empty(&adev->ddev->mode_config.connector_list)) + return 0; + /* select 8 bpp console on low vram cards */ if (adev->mc.real_vram_size <= (32*1024*1024)) bpp_sel = 8; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index c3ce103b6a33..b8fbbd7699e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_cache.h> #include "amdgpu.h" #include "amdgpu_trace.h" @@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, AMDGPU_GEM_DOMAIN_OA); bo->flags = flags; + + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; + amdgpu_fill_placement_to_bo(bo, placement); /* Kernel allocation are uninterruptible */ r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, @@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, } if (fpfn > bo->placements[i].fpfn) bo->placements[i].fpfn = fpfn; - if (lpfn && lpfn < bo->placements[i].lpfn) + if (!bo->placements[i].lpfn || + (lpfn && lpfn < bo->placements[i].lpfn)) bo->placements[i].lpfn = lpfn; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 5ee9a0690278..b9d0d55f6b47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -99,13 +99,24 @@ static int amdgpu_pp_early_init(void *handle) #ifdef CONFIG_DRM_AMD_POWERPLAY switch (adev->asic_type) { - case CHIP_TONGA: - case CHIP_FIJI: - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; - break; - default: - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; - break; + case CHIP_TONGA: + case CHIP_FIJI: + adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + break; + /* These chips don't have powerplay implemenations */ + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: + case CHIP_KAVERI: + case CHIP_TOPAZ: + default: + adev->pp_enabled = false; + break; } #else adev->pp_enabled = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 78e9b0f14661..d1f234dd2126 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -487,7 +487,7 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); - rptr_next = ~0; + rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8a1752ff3d8e..55cf05e1c81c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -808,7 +808,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, flags |= AMDGPU_PTE_SNOOPED; } - if (adev->asic_type >= CHIP_TOPAZ) + if (adev->asic_type >= CHIP_TONGA) flags |= AMDGPU_PTE_EXECUTABLE; flags |= AMDGPU_PTE_READABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index aefc668e6b5d..9599f7559b3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1282,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT * 8); - unsigned pd_size, pd_entries, pts_size; + unsigned pd_size, pd_entries; int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -1300,8 +1300,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) pd_entries = amdgpu_vm_num_pdes(adev); /* allocate page table array */ - pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); - vm->page_tables = kzalloc(pts_size, GFP_KERNEL); + vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); if (vm->page_tables == NULL) { DRM_ERROR("Cannot allocate memory for page table array\n"); return -ENOMEM; @@ -1361,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) amdgpu_bo_unref(&vm->page_tables[i].entry.robj); - kfree(vm->page_tables); + drm_free_large(vm->page_tables); amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 72793f93e2fc..6c76139de1c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4738,6 +4738,22 @@ static int gfx_v7_0_early_init(void *handle) return 0; } +static int gfx_v7_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); + if (r) + return r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); + if (r) + return r; + + return 0; +} + static int gfx_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; @@ -4890,6 +4906,8 @@ static int gfx_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); gfx_v7_0_cp_enable(adev, false); gfx_v7_0_rlc_stop(adev); gfx_v7_0_fini_pg(adev); @@ -5527,7 +5545,7 @@ static int gfx_v7_0_set_powergating_state(void *handle, const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .early_init = gfx_v7_0_early_init, - .late_init = NULL, + .late_init = gfx_v7_0_late_init, .sw_init = gfx_v7_0_sw_init, .sw_fini = gfx_v7_0_sw_fini, .hw_init = gfx_v7_0_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 13235d84e5a6..8f8ec37ecd88 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin"); MODULE_FIRMWARE("amdgpu/topaz_pfp.bin"); MODULE_FIRMWARE("amdgpu/topaz_me.bin"); MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); -MODULE_FIRMWARE("amdgpu/topaz_mec2.bin"); MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); @@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - if (adev->asic_type != CHIP_STONEY) { + if ((adev->asic_type != CHIP_STONEY) && + (adev->asic_type != CHIP_TOPAZ)) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); if (!err) { @@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) if (r) return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_CP_MEC1); - if (r) - return -EINVAL; + if (adev->asic_type == CHIP_TOPAZ) { + r = gfx_v8_0_cp_compute_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_MEC1); + if (r) + return -EINVAL; + } } } @@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); gfx_v8_0_cp_enable(adev, false); gfx_v8_0_rlc_stop(adev); gfx_v8_0_cp_compute_fini(adev); @@ -4186,7 +4194,18 @@ static int gfx_v8_0_soft_reset(void *handle) gfx_v8_0_cp_gfx_enable(adev, false); /* Disable MEC parsing/prefetching */ - /* XXX todo */ + gfx_v8_0_cp_compute_enable(adev, false); + + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, + GMCON_DEBUG, GFX_STALL, 1); + tmp = REG_SET_FIELD(tmp, + GMCON_DEBUG, GFX_CLEAR, 1); + WREG32(mmGMCON_DEBUG, tmp); + + udelay(50); + } if (grbm_soft_reset) { tmp = RREG32(mmGRBM_SOFT_RESET); @@ -4215,6 +4234,16 @@ static int gfx_v8_0_soft_reset(void *handle) WREG32(mmSRBM_SOFT_RESET, tmp); tmp = RREG32(mmSRBM_SOFT_RESET); } + + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, + GMCON_DEBUG, GFX_STALL, 0); + tmp = REG_SET_FIELD(tmp, + GMCON_DEBUG, GFX_CLEAR, 0); + WREG32(mmGMCON_DEBUG, tmp); + } + /* Wait a little for things to settle down */ udelay(50); gfx_v8_0_print_status((void *)adev); @@ -4308,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); + if (r) + return r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); + if (r) + return r; + /* requires IBs so do in late init after IB pool is initialized */ r = gfx_v8_0_do_edc_gpr_workarounds(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 3f956065d069..8aa2991ab379 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); +MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); + +static const u32 golden_settings_iceland_a11[] = +{ + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff +}; + +static const u32 iceland_mgcg_cgcg_init[] = +{ + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 +}; + +static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + amdgpu_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_iceland_a11, + (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); + break; + default: + break; + } +} /** - * gmc8_mc_wait_for_idle - wait for MC idle callback. + * gmc7_mc_wait_for_idle - wait for MC idle callback. * * @adev: amdgpu_device pointer * @@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) case CHIP_HAWAII: chip_name = "hawaii"; break; + case CHIP_TOPAZ: + chip_name = "topaz"; + break; case CHIP_KAVERI: case CHIP_KABINI: return 0; default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + if (adev->asic_type == CHIP_TOPAZ) + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&adev->mc.fw, fw_name, adev->dev); if (err) goto out; @@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v7_0_init_golden_registers(adev); + gmc_v7_0_mc_program(adev); if (!(adev->flags & AMD_IS_APU)) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c0c9a0101eb4..3efd45546241 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -42,9 +42,7 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); -MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); -MODULE_FIRMWARE("amdgpu/fiji_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] = mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_iceland_a11[] = -{ - mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, - mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, - mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, - mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff -}; - -static const u32 iceland_mgcg_cgcg_init[] = -{ - mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 -}; - static const u32 cz_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 @@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] = static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { - case CHIP_TOPAZ: - amdgpu_program_register_sequence(adev, - iceland_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_iceland_a11, - (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); - break; case CHIP_FIJI: amdgpu_program_register_sequence(adev, fiji_mgcg_cgcg_init, @@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); switch (adev->asic_type) { - case CHIP_TOPAZ: - chip_name = "topaz"; - break; case CHIP_TONGA: chip_name = "tonga"; break; case CHIP_FIJI: - chip_name = "fiji"; - break; case CHIP_CARRIZO: case CHIP_STONEY: return 0; @@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle) gmc_v8_0_mc_program(adev); - if (!(adev->flags & AMD_IS_APU)) { + if (adev->asic_type == CHIP_TONGA) { r = gmc_v8_0_mc_load_microcode(adev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 966d4b2ed9da..090486c18249 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) case AMDGPU_UCODE_ID_CP_ME: return UCODE_ID_CP_ME_MASK; case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; + return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; case AMDGPU_UCODE_ID_CP_MEC2: return UCODE_ID_CP_MEC_MASK; case AMDGPU_UCODE_ID_RLC_G: @@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev) return -EINVAL; } - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); - return -EINVAL; - } - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++])) { DRM_ERROR("Failed to get firmware entry for SDMA0\n"); @@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev) UCODE_ID_CP_ME_MASK | UCODE_ID_CP_PFP_MASK | UCODE_ID_CP_MEC_MASK | - UCODE_ID_CP_MEC_JT1_MASK | - UCODE_ID_CP_MEC_JT2_MASK; + UCODE_ID_CP_MEC_JT1_MASK; + if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { DRM_ERROR("Fail to request SMU load ucode\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index f4a1346525fe..0497784b3652 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c @@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle) static int tonga_dpm_suspend(void *handle) { - return 0; + return tonga_dpm_hw_fini(handle); } static int tonga_dpm_resume(void *handle) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - ret = tonga_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - -fail: - mutex_unlock(&adev->pm.mutex); - return ret; + return tonga_dpm_hw_init(handle); } static int tonga_dpm_set_clockgating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 652e76644c31..89f5a1ff6f43 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -61,6 +61,7 @@ #include "vi.h" #include "vi_dpm.h" #include "gmc_v8_0.h" +#include "gmc_v7_0.h" #include "gfx_v8_0.h" #include "sdma_v2_4.h" #include "sdma_v3_0.h" @@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = }, { .type = AMD_IP_BLOCK_TYPE_GMC, - .major = 8, - .minor = 0, + .major = 7, + .minor = 4, .rev = 0, - .funcs = &gmc_v8_0_ip_funcs, + .funcs = &gmc_v7_0_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_IH, @@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle) break; case CHIP_FIJI: adev->has_uvd = true; - adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_VCE_MGCG; + adev->cg_flags = 0; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x3c; break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index c6a1b4cc6458..d2b49c026cf6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -558,20 +558,10 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, return -EINVAL; /* this is the actual buffer to work with */ - - args_buff = kmalloc(args->buf_size_in_bytes - - sizeof(*args), GFP_KERNEL); - if (args_buff == NULL) - return -ENOMEM; - - status = copy_from_user(args_buff, cmd_from_user, + args_buff = memdup_user(cmd_from_user, args->buf_size_in_bytes - sizeof(*args)); - - if (status != 0) { - pr_debug("Failed to copy address watch user data\n"); - kfree(args_buff); - return -EINVAL; - } + if (IS_ERR(args_buff)) + return PTR_ERR(args_buff); aw_info.process = p; @@ -677,22 +667,12 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, if (cmd_from_user == NULL) return -EINVAL; - /* this is the actual buffer to work with */ - - args_buff = kmalloc(args->buf_size_in_bytes - sizeof(*args), - GFP_KERNEL); - - if (args_buff == NULL) - return -ENOMEM; + /* copy the entire buffer from user */ - /* Now copy the entire buffer from user */ - status = copy_from_user(args_buff, cmd_from_user, + args_buff = memdup_user(cmd_from_user, args->buf_size_in_bytes - sizeof(*args)); - if (status != 0) { - pr_debug("Failed to copy wave control user data\n"); - kfree(args_buff); - return -EINVAL; - } + if (IS_ERR(args_buff)) + return PTR_ERR(args_buff); /* move ptr to the start of the "pay-load" area */ wac_info.process = p; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 9be007081b72..a902ae037398 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work) kfree(p); - kfree((void *)work); + kfree(work); } static void kfd_process_destroy_delayed(struct rcu_head *rcu) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8f5d5edcf193..aa67244a77ae 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -64,6 +64,11 @@ static int pp_sw_init(void *handle) if (ret == 0) ret = hwmgr->hwmgr_func->backend_init(hwmgr); + if (ret) + printk("amdgpu: powerplay initialization failed\n"); + else + printk("amdgpu: powerplay initialized\n"); + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 873a8d264d5c..ec222c665602 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -272,6 +272,9 @@ static int cz_start_smu(struct pp_smumgr *smumgr) UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; + if (smumgr->chip_id == CHIP_STONEY) + fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); + cz_request_smu_load_fw(smumgr); cz_check_fw_load_finish(smumgr, fw_to_check); @@ -282,7 +285,7 @@ static int cz_start_smu(struct pp_smumgr *smumgr) return ret; } -static uint8_t cz_translate_firmware_enum_to_arg( +static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, enum cz_scratch_entry firmware_enum) { uint8_t ret = 0; @@ -292,7 +295,10 @@ static uint8_t cz_translate_firmware_enum_to_arg( ret = UCODE_ID_SDMA0; break; case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: - ret = UCODE_ID_SDMA1; + if (smumgr->chip_id == CHIP_STONEY) + ret = UCODE_ID_SDMA0; + else + ret = UCODE_ID_SDMA1; break; case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: ret = UCODE_ID_CP_CE; @@ -307,7 +313,10 @@ static uint8_t cz_translate_firmware_enum_to_arg( ret = UCODE_ID_CP_MEC_JT1; break; case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: - ret = UCODE_ID_CP_MEC_JT2; + if (smumgr->chip_id == CHIP_STONEY) + ret = UCODE_ID_CP_MEC_JT1; + else + ret = UCODE_ID_CP_MEC_JT2; break; case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: ret = UCODE_ID_GMCON_RENG; @@ -396,7 +405,7 @@ static int cz_smu_populate_single_scratch_task( struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = type; - task->arg = cz_translate_firmware_enum_to_arg(fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->scratch_buffer_length; i++) @@ -433,7 +442,7 @@ static int cz_smu_populate_single_ucode_load_task( struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = TASK_TYPE_UCODE_LOAD; - task->arg = cz_translate_firmware_enum_to_arg(fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->driver_buffer_length; i++) @@ -509,8 +518,14 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr) CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - cz_smu_populate_single_ucode_load_task(smumgr, + + if (smumgr->chip_id == CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); + else + cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); + cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); @@ -551,7 +566,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - cz_smu_populate_single_ucode_load_task(smumgr, + if (smumgr->chip_id == CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); + else + cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); @@ -561,7 +580,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - cz_smu_populate_single_ucode_load_task(smumgr, + if (smumgr->chip_id == CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(smumgr, + CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); + else + cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); cz_smu_populate_single_ucode_load_task(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); @@ -618,7 +641,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { - firmware_type = cz_translate_firmware_enum_to_arg( + firmware_type = cz_translate_firmware_enum_to_arg(smumgr, firmware_list[i]); ucode_id = cz_convert_fw_type_to_cgs(firmware_type); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index f0c39840bacf..0ab7c24cd7d6 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -945,9 +945,23 @@ static void wait_for_fences(struct drm_device *dev, } } -static bool framebuffer_changed(struct drm_device *dev, - struct drm_atomic_state *old_state, - struct drm_crtc *crtc) +/** + * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed + * @dev: DRM device + * @old_state: atomic state object with old state structures + * @crtc: DRM crtc + * + * Checks whether the framebuffer used for this CRTC changes as a result of + * the atomic update. This is useful for drivers which cannot use + * drm_atomic_helper_wait_for_vblanks() and need to reimplement its + * functionality. + * + * Returns: + * true if the framebuffer changed. + */ +bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, + struct drm_atomic_state *old_state, + struct drm_crtc *crtc) { struct drm_plane *plane; struct drm_plane_state *old_plane_state; @@ -964,6 +978,7 @@ static bool framebuffer_changed(struct drm_device *dev, return false; } +EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed); /** * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs @@ -998,7 +1013,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, if (old_state->legacy_cursor_update) continue; - if (!framebuffer_changed(dev, old_state, crtc)) + if (!drm_atomic_helper_framebuffer_changed(dev, + old_state, crtc)) continue; ret = drm_crtc_vblank_get(crtc); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 6ed90a2437e5..8ae13de272c4 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -803,6 +803,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) return mstb; } +static void drm_dp_free_mst_port(struct kref *kref); + +static void drm_dp_free_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); + if (mstb->port_parent) { + if (list_empty(&mstb->port_parent->next)) + kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); + } + kfree(mstb); +} + static void drm_dp_destroy_mst_branch_device(struct kref *kref) { struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); @@ -810,6 +822,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) bool wake_tx = false; /* + * init kref again to be used by ports to remove mst branch when it is + * not needed anymore + */ + kref_init(kref); + + if (mstb->port_parent && list_empty(&mstb->port_parent->next)) + kref_get(&mstb->port_parent->kref); + + /* * destroy all ports - don't need lock * as there are no more references to the mst branch * device at this point. @@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) if (wake_tx) wake_up(&mstb->mgr->tx_waitq); - kfree(mstb); + + kref_put(kref, drm_dp_free_mst_branch_device); } static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) @@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref) * from an EDID retrieval */ mutex_lock(&mgr->destroy_connector_lock); + kref_get(&port->parent->kref); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); @@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) return send_link; } -static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, - struct drm_dp_mst_port *port) +static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) { int ret; - if (port->dpcd_rev >= 0x12) { - port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); - if (!port->guid_valid) { - ret = drm_dp_send_dpcd_write(mstb->mgr, - port, - DP_GUID, - 16, port->guid); - port->guid_valid = true; + + memcpy(mstb->guid, guid, 16); + + if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { + if (mstb->port_parent) { + ret = drm_dp_send_dpcd_write( + mstb->mgr, + mstb->port_parent, + DP_GUID, + 16, + mstb->guid); + } else { + + ret = drm_dp_dpcd_write( + mstb->mgr->aux, + DP_GUID, + mstb->guid, + 16); } } } @@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, port->dpcd_rev = port_msg->dpcd_revision; port->num_sdp_streams = port_msg->num_sdp_streams; port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; - memcpy(port->guid, port_msg->peer_guid, 16); /* manage mstb port lists with mgr lock - take a reference for this list */ @@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, if (old_ddps != port->ddps) { if (port->ddps) { - drm_dp_check_port_guid(mstb, port); if (!port->input) drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); } else { - port->guid_valid = false; port->available_pbn = 0; } } @@ -1130,13 +1159,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_put_port(port); goto out; } - if (port->port_num >= DP_MST_LOGICAL_PORT_0) { - port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); - drm_mode_connector_set_tile_property(port->connector); - } + + drm_mode_connector_set_tile_property(port->connector); + (*mstb->mgr->cbs->register_connector)(port->connector); } - out: /* put reference to this port */ drm_dp_put_port(port); @@ -1161,11 +1188,9 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, port->ddps = conn_stat->displayport_device_plug_status; if (old_ddps != port->ddps) { + dowork = true; if (port->ddps) { - drm_dp_check_port_guid(mstb, port); - dowork = true; } else { - port->guid_valid = false; port->available_pbn = 0; } } @@ -1222,13 +1247,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_port *port; + if (memcmp(mstb->guid, guid, 16) == 0) + return mstb; + + list_for_each_entry(port, &mstb->ports, next) { if (!port->mstb) continue; - if (port->guid_valid && memcmp(port->guid, guid, 16) == 0) - return port->mstb; - found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); if (found_mstb) @@ -1247,10 +1273,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( /* find the port by iterating down */ mutex_lock(&mgr->lock); - if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0) - mstb = mgr->mst_primary; - else - mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); if (mstb) kref_get(&mstb->kref); @@ -1271,8 +1294,13 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m if (port->input) continue; - if (!port->ddps) + if (!port->ddps) { + if (port->cached_edid) { + kfree(port->cached_edid); + port->cached_edid = NULL; + } continue; + } if (!port->available_pbn) drm_dp_send_enum_path_resources(mgr, mstb, port); @@ -1283,6 +1311,12 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m drm_dp_check_and_send_link_address(mgr, mstb_child); drm_dp_put_mst_branch_device(mstb_child); } + } else if (port->pdt == DP_PEER_DEVICE_SST_SINK || + port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) { + if (!port->cached_edid) { + port->cached_edid = + drm_get_edid(port->connector, &port->aux.ddc); + } } } } @@ -1302,6 +1336,8 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) drm_dp_check_and_send_link_address(mgr, mstb); drm_dp_put_mst_branch_device(mstb); } + + (*mgr->cbs->hotplug)(mgr); } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -1555,10 +1591,12 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg->reply.u.link_addr.ports[i].num_sdp_streams, txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); } + + drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); } - (*mgr->cbs->hotplug)(mgr); } } else { mstb->link_address_sent = false; @@ -1602,6 +1640,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, return 0; } +static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) +{ + if (!mstb->port_parent) + return NULL; + + if (mstb->port_parent->mstb != mstb) + return mstb->port_parent; + + return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); +} + +static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + int *port_num) +{ + struct drm_dp_mst_branch *rmstb = NULL; + struct drm_dp_mst_port *found_port; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { + found_port = drm_dp_get_last_connected_port_to_mstb(mstb); + + if (found_port) { + rmstb = found_port->parent; + kref_get(&rmstb->kref); + *port_num = found_port->port_num; + } + } + mutex_unlock(&mgr->lock); + return rmstb; +} + static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int id, @@ -1609,13 +1678,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb; - int len, ret; + int len, ret, port_num; u8 sinks[DRM_DP_MAX_SDP_STREAMS]; int i; + port_num = port->port_num; mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); - if (!mstb) - return -EINVAL; + if (!mstb) { + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); + + if (!mstb) + return -EINVAL; + } txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) { @@ -1627,7 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, sinks[i] = i; txmsg->dst = mstb; - len = build_allocate_payload(txmsg, port->port_num, + len = build_allocate_payload(txmsg, port_num, id, pbn, port->num_sdp_streams, sinks); @@ -1983,31 +2057,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mgr->mst_primary = mstb; kref_get(&mgr->mst_primary->kref); - { - struct drm_dp_payload reset_pay; - reset_pay.start_slot = 0; - reset_pay.num_slots = 0x3f; - drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); - } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); if (ret < 0) { goto out_unlock; } - - /* sort out guid */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); - if (ret != 16) { - DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); - goto out_unlock; - } - - mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); - if (!mgr->guid_valid) { - ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); - mgr->guid_valid = true; + { + struct drm_dp_payload reset_pay; + reset_pay.start_slot = 0; + reset_pay.num_slots = 0x3f; + drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); } queue_work(system_long_wq, &mgr->work); @@ -2231,9 +2291,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) } drm_dp_update_port(mstb, &msg.u.conn_stat); - DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); - (*mgr->cbs->hotplug)(mgr); + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); if (!mstb) @@ -2320,10 +2379,6 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector case DP_PEER_DEVICE_SST_SINK: status = connector_status_connected; - /* for logical ports - cache the EDID */ - if (port->port_num >= 8 && !port->cached_edid) { - port->cached_edid = drm_get_edid(connector, &port->aux.ddc); - } break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) @@ -2378,10 +2433,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else { - edid = drm_get_edid(connector, &port->aux.ddc); - drm_mode_connector_set_tile_property(connector); - } + port->has_audio = drm_detect_monitor_audio(edid); drm_dp_put_port(port); return edid; @@ -2446,6 +2498,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); if (pbn == port->vcpi.pbn) { *slots = port->vcpi.num_slots; + drm_dp_put_port(port); return true; } } @@ -2605,32 +2658,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status); */ int drm_dp_calc_pbn_mode(int clock, int bpp) { - fixed20_12 pix_bw; - fixed20_12 fbpp; - fixed20_12 result; - fixed20_12 margin, tmp; - u32 res; - - pix_bw.full = dfixed_const(clock); - fbpp.full = dfixed_const(bpp); - tmp.full = dfixed_const(8); - fbpp.full = dfixed_div(fbpp, tmp); - - result.full = dfixed_mul(pix_bw, fbpp); - margin.full = dfixed_const(54); - tmp.full = dfixed_const(64); - margin.full = dfixed_div(margin, tmp); - result.full = dfixed_div(result, margin); - - margin.full = dfixed_const(1006); - tmp.full = dfixed_const(1000); - margin.full = dfixed_div(margin, tmp); - result.full = dfixed_mul(result, margin); - - result.full = dfixed_div(result, tmp); - result.full = dfixed_ceil(result); - res = dfixed_trunc(result); - return res; + u64 kbps; + s64 peak_kbps; + u32 numerator; + u32 denominator; + + kbps = clock * bpp; + + /* + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on + * common multiplier to render an integer PBN for all link rate/lane + * counts combinations + * calculate + * peak_kbps *= (1006/1000) + * peak_kbps *= (64/54) + * peak_kbps *= 8 convert to bytes + */ + + numerator = 64 * 1006; + denominator = 54 * 8 * 1000 * 1000; + + kbps *= numerator; + peak_kbps = drm_fixp_from_fraction(kbps, denominator); + + return drm_fixp2int_ceil(peak_kbps); } EXPORT_SYMBOL(drm_dp_calc_pbn_mode); @@ -2638,11 +2690,23 @@ static int test_calc_pbn_mode(void) { int ret; ret = drm_dp_calc_pbn_mode(154000, 30); - if (ret != 689) + if (ret != 689) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 154000, 30, 689, ret); return -EINVAL; + } ret = drm_dp_calc_pbn_mode(234000, 30); - if (ret != 1047) + if (ret != 1047) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 234000, 30, 1047, ret); return -EINVAL; + } + ret = drm_dp_calc_pbn_mode(297000, 24); + if (ret != 1063) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 297000, 24, 1063, ret); + return -EINVAL; + } return 0; } @@ -2783,6 +2847,13 @@ static void drm_dp_tx_work(struct work_struct *work) mutex_unlock(&mgr->qlock); } +static void drm_dp_free_mst_port(struct kref *kref) +{ + struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); + kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); + kfree(port); +} + static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); @@ -2803,13 +2874,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) list_del(&port->next); mutex_unlock(&mgr->destroy_connector_lock); + kref_init(&port->kref); + INIT_LIST_HEAD(&port->next); + mgr->cbs->destroy_connector(mgr, port->connector); drm_dp_port_teardown_pdt(port, port->pdt); - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - kfree(port); + if (!port->input && port->vcpi.vcpi > 0) { + if (mgr->mst_state) { + drm_dp_mst_reset_vcpi_slots(mgr, port); + drm_dp_update_payload_part1(mgr); + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + } + } + + kref_put(&port->kref, drm_dp_free_mst_port); send_hotplug = true; } if (send_hotplug) @@ -2847,6 +2927,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; mgr->max_payloads = max_payloads; mgr->conn_base_id = conn_base_id; + if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || + max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) + return -EINVAL; mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); if (!mgr->payloads) return -ENOMEM; @@ -2854,7 +2937,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (!mgr->proposed_vcpis) return -ENOMEM; set_bit(0, &mgr->payload_mask); - test_calc_pbn_mode(); + if (test_calc_pbn_mode() < 0) + DRM_ERROR("MST PBN self-test failed\n"); + return 0; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index c3b80fd65d62..7b30b307674b 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -198,10 +198,7 @@ EXPORT_SYMBOL(drm_ht_remove_item); void drm_ht_remove(struct drm_open_hash *ht) { if (ht->table) { - if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order) - kfree(ht->table); - else - vfree(ht->table); + kvfree(ht->table); ht->table = NULL; } } diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h index 9e585d51fb78..e881482b5971 100644 --- a/drivers/gpu/drm/etnaviv/common.xml.h +++ b/drivers/gpu/drm/etnaviv/common.xml.h @@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) -- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) +- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53) +- common.xml ( 18379 bytes, from 2015-12-12 09:02:53) Copyright (C) 2015 */ @@ -30,15 +30,19 @@ Copyright (C) 2015 #define ENDIAN_MODE_NO_SWAP 0x00000000 #define ENDIAN_MODE_SWAP_16 0x00000001 #define ENDIAN_MODE_SWAP_32 0x00000002 +#define chipModel_GC200 0x00000200 #define chipModel_GC300 0x00000300 #define chipModel_GC320 0x00000320 +#define chipModel_GC328 0x00000328 #define chipModel_GC350 0x00000350 #define chipModel_GC355 0x00000355 #define chipModel_GC400 0x00000400 #define chipModel_GC410 0x00000410 #define chipModel_GC420 0x00000420 +#define chipModel_GC428 0x00000428 #define chipModel_GC450 0x00000450 #define chipModel_GC500 0x00000500 +#define chipModel_GC520 0x00000520 #define chipModel_GC530 0x00000530 #define chipModel_GC600 0x00000600 #define chipModel_GC700 0x00000700 @@ -46,9 +50,16 @@ Copyright (C) 2015 #define chipModel_GC860 0x00000860 #define chipModel_GC880 0x00000880 #define chipModel_GC1000 0x00001000 +#define chipModel_GC1500 0x00001500 #define chipModel_GC2000 0x00002000 #define chipModel_GC2100 0x00002100 +#define chipModel_GC2200 0x00002200 +#define chipModel_GC2500 0x00002500 +#define chipModel_GC3000 0x00003000 #define chipModel_GC4000 0x00004000 +#define chipModel_GC5000 0x00005000 +#define chipModel_GC5200 0x00005200 +#define chipModel_GC6400 0x00006400 #define RGBA_BITS_R 0x00000001 #define RGBA_BITS_G 0x00000002 #define RGBA_BITS_B 0x00000004 @@ -160,7 +171,7 @@ Copyright (C) 2015 #define chipMinorFeatures2_UNK8 0x00000100 #define chipMinorFeatures2_UNK9 0x00000200 #define chipMinorFeatures2_UNK10 0x00000400 -#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800 +#define chipMinorFeatures2_HALTI1 0x00000800 #define chipMinorFeatures2_UNK12 0x00001000 #define chipMinorFeatures2_UNK13 0x00002000 #define chipMinorFeatures2_UNK14 0x00004000 @@ -189,7 +200,7 @@ Copyright (C) 2015 #define chipMinorFeatures3_UNK5 0x00000020 #define chipMinorFeatures3_UNK6 0x00000040 #define chipMinorFeatures3_UNK7 0x00000080 -#define chipMinorFeatures3_UNK8 0x00000100 +#define chipMinorFeatures3_FAST_MSAA 0x00000100 #define chipMinorFeatures3_UNK9 0x00000200 #define chipMinorFeatures3_BUG_FIXES10 0x00000400 #define chipMinorFeatures3_UNK11 0x00000800 @@ -199,7 +210,7 @@ Copyright (C) 2015 #define chipMinorFeatures3_UNK15 0x00008000 #define chipMinorFeatures3_UNK16 0x00010000 #define chipMinorFeatures3_UNK17 0x00020000 -#define chipMinorFeatures3_UNK18 0x00040000 +#define chipMinorFeatures3_ACE 0x00040000 #define chipMinorFeatures3_UNK19 0x00080000 #define chipMinorFeatures3_UNK20 0x00100000 #define chipMinorFeatures3_UNK21 0x00200000 @@ -207,7 +218,7 @@ Copyright (C) 2015 #define chipMinorFeatures3_UNK23 0x00800000 #define chipMinorFeatures3_UNK24 0x01000000 #define chipMinorFeatures3_UNK25 0x02000000 -#define chipMinorFeatures3_UNK26 0x04000000 +#define chipMinorFeatures3_NEW_HZ 0x04000000 #define chipMinorFeatures3_UNK27 0x08000000 #define chipMinorFeatures3_UNK28 0x10000000 #define chipMinorFeatures3_UNK29 0x20000000 @@ -229,9 +240,9 @@ Copyright (C) 2015 #define chipMinorFeatures4_UNK13 0x00002000 #define chipMinorFeatures4_UNK14 0x00004000 #define chipMinorFeatures4_UNK15 0x00008000 -#define chipMinorFeatures4_UNK16 0x00010000 +#define chipMinorFeatures4_HALTI2 0x00010000 #define chipMinorFeatures4_UNK17 0x00020000 -#define chipMinorFeatures4_UNK18 0x00040000 +#define chipMinorFeatures4_SMALL_MSAA 0x00040000 #define chipMinorFeatures4_UNK19 0x00080000 #define chipMinorFeatures4_UNK20 0x00100000 #define chipMinorFeatures4_UNK21 0x00200000 @@ -245,5 +256,37 @@ Copyright (C) 2015 #define chipMinorFeatures4_UNK29 0x20000000 #define chipMinorFeatures4_UNK30 0x40000000 #define chipMinorFeatures4_UNK31 0x80000000 +#define chipMinorFeatures5_UNK0 0x00000001 +#define chipMinorFeatures5_UNK1 0x00000002 +#define chipMinorFeatures5_UNK2 0x00000004 +#define chipMinorFeatures5_UNK3 0x00000008 +#define chipMinorFeatures5_UNK4 0x00000010 +#define chipMinorFeatures5_UNK5 0x00000020 +#define chipMinorFeatures5_UNK6 0x00000040 +#define chipMinorFeatures5_UNK7 0x00000080 +#define chipMinorFeatures5_UNK8 0x00000100 +#define chipMinorFeatures5_HALTI3 0x00000200 +#define chipMinorFeatures5_UNK10 0x00000400 +#define chipMinorFeatures5_UNK11 0x00000800 +#define chipMinorFeatures5_UNK12 0x00001000 +#define chipMinorFeatures5_UNK13 0x00002000 +#define chipMinorFeatures5_UNK14 0x00004000 +#define chipMinorFeatures5_UNK15 0x00008000 +#define chipMinorFeatures5_UNK16 0x00010000 +#define chipMinorFeatures5_UNK17 0x00020000 +#define chipMinorFeatures5_UNK18 0x00040000 +#define chipMinorFeatures5_UNK19 0x00080000 +#define chipMinorFeatures5_UNK20 0x00100000 +#define chipMinorFeatures5_UNK21 0x00200000 +#define chipMinorFeatures5_UNK22 0x00400000 +#define chipMinorFeatures5_UNK23 0x00800000 +#define chipMinorFeatures5_UNK24 0x01000000 +#define chipMinorFeatures5_UNK25 0x02000000 +#define chipMinorFeatures5_UNK26 0x04000000 +#define chipMinorFeatures5_UNK27 0x08000000 +#define chipMinorFeatures5_UNK28 0x10000000 +#define chipMinorFeatures5_UNK29 0x20000000 +#define chipMinorFeatures5_UNK30 0x40000000 +#define chipMinorFeatures5_UNK31 0x80000000 #endif /* COMMON_XML */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 5c89ebb52fd2..e8858985f01e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -668,7 +668,6 @@ static struct platform_driver etnaviv_platform_driver = { .probe = etnaviv_pdev_probe, .remove = etnaviv_pdev_remove, .driver = { - .owner = THIS_MODULE, .name = "etnaviv", .of_match_table = dt_match, }, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index d6bd438bd5be..1cd6046e76b1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -85,7 +85,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int etnaviv_gem_prime_pin(struct drm_gem_object *obj); void etnaviv_gem_prime_unpin(struct drm_gem_object *obj); -void *etnaviv_gem_vaddr(struct drm_gem_object *obj); +void *etnaviv_gem_vmap(struct drm_gem_object *obj); int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, struct timespec *timeout); int etnaviv_gem_cpu_fini(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index bf8fa859e8be..4a29eeadbf1e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -201,7 +201,9 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) obj = vram->object; + mutex_lock(&obj->lock); pages = etnaviv_gem_get_pages(obj); + mutex_unlock(&obj->lock); if (pages) { int j; @@ -213,8 +215,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) iter.hdr->iova = cpu_to_le64(vram->iova); - vaddr = etnaviv_gem_vaddr(&obj->base); - if (vaddr && !IS_ERR(vaddr)) + vaddr = etnaviv_gem_vmap(&obj->base); + if (vaddr) memcpy(iter.data, vaddr, obj->base.size); etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 9f77c3b94cc6..4b519e4309b2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -353,25 +353,39 @@ void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj) drm_gem_object_unreference_unlocked(obj); } -void *etnaviv_gem_vaddr(struct drm_gem_object *obj) +void *etnaviv_gem_vmap(struct drm_gem_object *obj) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - mutex_lock(&etnaviv_obj->lock); - if (!etnaviv_obj->vaddr) { - struct page **pages = etnaviv_gem_get_pages(etnaviv_obj); - - if (IS_ERR(pages)) - return ERR_CAST(pages); + if (etnaviv_obj->vaddr) + return etnaviv_obj->vaddr; - etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, - VM_MAP, pgprot_writecombine(PAGE_KERNEL)); - } + mutex_lock(&etnaviv_obj->lock); + /* + * Need to check again, as we might have raced with another thread + * while waiting for the mutex. + */ + if (!etnaviv_obj->vaddr) + etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); mutex_unlock(&etnaviv_obj->lock); return etnaviv_obj->vaddr; } +static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) +{ + struct page **pages; + + lockdep_assert_held(&obj->lock); + + pages = etnaviv_gem_get_pages(obj); + if (IS_ERR(pages)) + return NULL; + + return vmap(pages, obj->base.size >> PAGE_SHIFT, + VM_MAP, pgprot_writecombine(PAGE_KERNEL)); +} + static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) { if (op & ETNA_PREP_READ) @@ -522,6 +536,7 @@ static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { .get_pages = etnaviv_gem_shmem_get_pages, .release = etnaviv_gem_shmem_release, + .vmap = etnaviv_gem_vmap_impl, }; void etnaviv_gem_free_object(struct drm_gem_object *obj) @@ -866,6 +881,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { .get_pages = etnaviv_gem_userptr_get_pages, .release = etnaviv_gem_userptr_release, + .vmap = etnaviv_gem_vmap_impl, }; int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index a300b4b3d545..ab5df8147a5f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -78,6 +78,7 @@ struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj) struct etnaviv_gem_ops { int (*get_pages)(struct etnaviv_gem_object *); void (*release)(struct etnaviv_gem_object *); + void *(*vmap)(struct etnaviv_gem_object *); }; static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index e94db4f95770..4e67395f5fa1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -31,7 +31,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) { - return etnaviv_gem_vaddr(obj); + return etnaviv_gem_vmap(obj); } void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) @@ -77,9 +77,17 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); } +static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) +{ + lockdep_assert_held(&etnaviv_obj->lock); + + return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf); +} + static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { /* .get_pages should never be called */ .release = etnaviv_gem_prime_release, + .vmap = etnaviv_gem_prime_vmap_impl, }; struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 056a72e6ed26..a33162cf4f4c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -72,6 +72,14 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) *value = gpu->identity.minor_features3; break; + case ETNAVIV_PARAM_GPU_FEATURES_5: + *value = gpu->identity.minor_features4; + break; + + case ETNAVIV_PARAM_GPU_FEATURES_6: + *value = gpu->identity.minor_features5; + break; + case ETNAVIV_PARAM_GPU_STREAM_COUNT: *value = gpu->identity.stream_count; break; @@ -112,6 +120,10 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) *value = gpu->identity.num_constants; break; + case ETNAVIV_PARAM_GPU_NUM_VARYINGS: + *value = gpu->identity.varyings_count; + break; + default: DBG("%s: invalid param: %u", dev_name(gpu->dev), param); return -EINVAL; @@ -120,46 +132,56 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) return 0; } + +#define etnaviv_is_model_rev(gpu, mod, rev) \ + ((gpu)->identity.model == chipModel_##mod && \ + (gpu)->identity.revision == rev) +#define etnaviv_field(val, field) \ + (((val) & field##__MASK) >> field##__SHIFT) + static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) { if (gpu->identity.minor_features0 & chipMinorFeatures0_MORE_MINOR_FEATURES) { - u32 specs[2]; + u32 specs[4]; + unsigned int streams; specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); - - gpu->identity.stream_count = - (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) - >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT; - gpu->identity.register_max = - (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) - >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT; - gpu->identity.thread_count = - (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) - >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT; - gpu->identity.vertex_cache_size = - (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) - >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT; - gpu->identity.shader_core_count = - (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) - >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT; - gpu->identity.pixel_pipes = - (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) - >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT; + specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3); + specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4); + + gpu->identity.stream_count = etnaviv_field(specs[0], + VIVS_HI_CHIP_SPECS_STREAM_COUNT); + gpu->identity.register_max = etnaviv_field(specs[0], + VIVS_HI_CHIP_SPECS_REGISTER_MAX); + gpu->identity.thread_count = etnaviv_field(specs[0], + VIVS_HI_CHIP_SPECS_THREAD_COUNT); + gpu->identity.vertex_cache_size = etnaviv_field(specs[0], + VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE); + gpu->identity.shader_core_count = etnaviv_field(specs[0], + VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT); + gpu->identity.pixel_pipes = etnaviv_field(specs[0], + VIVS_HI_CHIP_SPECS_PIXEL_PIPES); gpu->identity.vertex_output_buffer_size = - (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) - >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT; - - gpu->identity.buffer_size = - (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) - >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT; - gpu->identity.instruction_count = - (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) - >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT; - gpu->identity.num_constants = - (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) - >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT; + etnaviv_field(specs[0], + VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE); + + gpu->identity.buffer_size = etnaviv_field(specs[1], + VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE); + gpu->identity.instruction_count = etnaviv_field(specs[1], + VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT); + gpu->identity.num_constants = etnaviv_field(specs[1], + VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS); + + gpu->identity.varyings_count = etnaviv_field(specs[2], + VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT); + + /* This overrides the value from older register if non-zero */ + streams = etnaviv_field(specs[3], + VIVS_HI_CHIP_SPECS_4_STREAM_COUNT); + if (streams) + gpu->identity.stream_count = streams; } /* Fill in the stream count if not specified */ @@ -173,7 +195,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) /* Convert the register max value */ if (gpu->identity.register_max) gpu->identity.register_max = 1 << gpu->identity.register_max; - else if (gpu->identity.model == 0x0400) + else if (gpu->identity.model == chipModel_GC400) gpu->identity.register_max = 32; else gpu->identity.register_max = 64; @@ -181,10 +203,10 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) /* Convert thread count */ if (gpu->identity.thread_count) gpu->identity.thread_count = 1 << gpu->identity.thread_count; - else if (gpu->identity.model == 0x0400) + else if (gpu->identity.model == chipModel_GC400) gpu->identity.thread_count = 64; - else if (gpu->identity.model == 0x0500 || - gpu->identity.model == 0x0530) + else if (gpu->identity.model == chipModel_GC500 || + gpu->identity.model == chipModel_GC530) gpu->identity.thread_count = 128; else gpu->identity.thread_count = 256; @@ -206,7 +228,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) if (gpu->identity.vertex_output_buffer_size) { gpu->identity.vertex_output_buffer_size = 1 << gpu->identity.vertex_output_buffer_size; - } else if (gpu->identity.model == 0x0400) { + } else if (gpu->identity.model == chipModel_GC400) { if (gpu->identity.revision < 0x4000) gpu->identity.vertex_output_buffer_size = 512; else if (gpu->identity.revision < 0x4200) @@ -219,9 +241,8 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) switch (gpu->identity.instruction_count) { case 0: - if ((gpu->identity.model == 0x2000 && - gpu->identity.revision == 0x5108) || - gpu->identity.model == 0x880) + if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) || + gpu->identity.model == chipModel_GC880) gpu->identity.instruction_count = 512; else gpu->identity.instruction_count = 256; @@ -242,6 +263,30 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) if (gpu->identity.num_constants == 0) gpu->identity.num_constants = 168; + + if (gpu->identity.varyings_count == 0) { + if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0) + gpu->identity.varyings_count = 12; + else + gpu->identity.varyings_count = 8; + } + + /* + * For some cores, two varyings are consumed for position, so the + * maximum varying count needs to be reduced by one. + */ + if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) || + etnaviv_is_model_rev(gpu, GC4000, 0x5222) || + etnaviv_is_model_rev(gpu, GC4000, 0x5245) || + etnaviv_is_model_rev(gpu, GC4000, 0x5208) || + etnaviv_is_model_rev(gpu, GC3000, 0x5435) || + etnaviv_is_model_rev(gpu, GC2200, 0x5244) || + etnaviv_is_model_rev(gpu, GC2100, 0x5108) || + etnaviv_is_model_rev(gpu, GC2000, 0x5108) || + etnaviv_is_model_rev(gpu, GC1500, 0x5246) || + etnaviv_is_model_rev(gpu, GC880, 0x5107) || + etnaviv_is_model_rev(gpu, GC880, 0x5106)) + gpu->identity.varyings_count -= 1; } static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) @@ -251,12 +296,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); /* Special case for older graphic cores. */ - if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) - >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) { - gpu->identity.model = 0x500; /* gc500 */ - gpu->identity.revision = - (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK) - >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT; + if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) { + gpu->identity.model = chipModel_GC500; + gpu->identity.revision = etnaviv_field(chipIdentity, + VIVS_HI_CHIP_IDENTITY_REVISION); } else { gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); @@ -269,13 +312,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) * same. Only for GC400 family. */ if ((gpu->identity.model & 0xff00) == 0x0400 && - gpu->identity.model != 0x0420) { + gpu->identity.model != chipModel_GC420) { gpu->identity.model = gpu->identity.model & 0x0400; } /* Another special case */ - if (gpu->identity.model == 0x300 && - gpu->identity.revision == 0x2201) { + if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) { u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); @@ -295,11 +337,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); /* Disable fast clear on GC700. */ - if (gpu->identity.model == 0x700) + if (gpu->identity.model == chipModel_GC700) gpu->identity.features &= ~chipFeatures_FAST_CLEAR; - if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) || - (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) { + if ((gpu->identity.model == chipModel_GC500 && + gpu->identity.revision < 2) || + (gpu->identity.model == chipModel_GC300 && + gpu->identity.revision < 0x2000)) { /* * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these @@ -309,6 +353,8 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu->identity.minor_features1 = 0; gpu->identity.minor_features2 = 0; gpu->identity.minor_features3 = 0; + gpu->identity.minor_features4 = 0; + gpu->identity.minor_features5 = 0; } else gpu->identity.minor_features0 = gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); @@ -321,6 +367,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); gpu->identity.minor_features3 = gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); + gpu->identity.minor_features4 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4); + gpu->identity.minor_features5 = + gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); } /* GC600 idle register reports zero bits where modules aren't present */ @@ -441,10 +491,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) { u16 prefetch; - if (gpu->identity.model == chipModel_GC320 && - gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 && - (gpu->identity.revision == 0x5007 || - gpu->identity.revision == 0x5220)) { + if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) || + etnaviv_is_model_rev(gpu, GC320, 0x5220)) && + gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) { u32 mc_memory_debug; mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; @@ -466,7 +515,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) VIVS_HI_AXI_CONFIG_ARCACHE(2)); /* GC2000 rev 5108 needs a special bus config */ - if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) { + if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) { u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); @@ -511,8 +560,16 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (gpu->identity.model == 0) { dev_err(gpu->dev, "Unknown GPU model\n"); - pm_runtime_put_autosuspend(gpu->dev); - return -ENXIO; + ret = -ENXIO; + goto fail; + } + + /* Exclude VG cores with FE2.0 */ + if (gpu->identity.features & chipFeatures_PIPE_VG && + gpu->identity.features & chipFeatures_FE20) { + dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n"); + ret = -ENXIO; + goto fail; } ret = etnaviv_hw_reset(gpu); @@ -539,10 +596,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) goto fail; } - /* TODO: we will leak here memory - fix it! */ - gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); if (!gpu->mmu) { + iommu_domain_free(iommu); ret = -ENOMEM; goto fail; } @@ -552,7 +608,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (!gpu->buffer) { ret = -ENOMEM; dev_err(gpu->dev, "could not create command buffer\n"); - goto fail; + goto destroy_iommu; } if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { ret = -EINVAL; @@ -582,6 +638,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) free_buffer: etnaviv_gpu_cmdbuf_free(gpu->buffer); gpu->buffer = NULL; +destroy_iommu: + etnaviv_iommu_destroy(gpu->mmu); + gpu->mmu = NULL; fail: pm_runtime_mark_last_busy(gpu->dev); pm_runtime_put_autosuspend(gpu->dev); @@ -642,6 +701,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) gpu->identity.minor_features2); seq_printf(m, "\t minor_features3: 0x%08x\n", gpu->identity.minor_features3); + seq_printf(m, "\t minor_features4: 0x%08x\n", + gpu->identity.minor_features4); + seq_printf(m, "\t minor_features5: 0x%08x\n", + gpu->identity.minor_features5); seq_puts(m, "\tspecs\n"); seq_printf(m, "\t stream_count: %d\n", @@ -664,6 +727,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) gpu->identity.instruction_count); seq_printf(m, "\t num_constants: %d\n", gpu->identity.num_constants); + seq_printf(m, "\t varyings_count: %d\n", + gpu->identity.varyings_count); seq_printf(m, "\taxi: 0x%08x\n", axi); seq_printf(m, "\tidle: 0x%08x\n", idle); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index c75d50359ab0..f233ac4c7c1c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -46,6 +46,12 @@ struct etnaviv_chip_identity { /* Supported minor feature 3 fields. */ u32 minor_features3; + /* Supported minor feature 4 fields. */ + u32 minor_features4; + + /* Supported minor feature 5 fields. */ + u32 minor_features5; + /* Number of streams supported. */ u32 stream_count; @@ -75,6 +81,9 @@ struct etnaviv_chip_identity { /* Buffer size */ u32 buffer_size; + + /* Number of varyings */ + u8 varyings_count; }; struct etnaviv_event { diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 0064f2640396..6a7de5f1454a 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) -- common.xml ( 18437 bytes, from 2015-03-25 11:27:41) +- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53) +- common.xml ( 18437 bytes, from 2015-12-12 09:02:53) Copyright (C) 2015 */ @@ -182,8 +182,25 @@ Copyright (C) 2015 #define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088 +#define VIVS_HI_CHIP_SPECS_3 0x0000008c +#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK 0x000001f0 +#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT 4 +#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK) +#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK 0x00000007 +#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT 0 +#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK) + #define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094 +#define VIVS_HI_CHIP_SPECS_4 0x0000009c +#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK 0x0001f000 +#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT 12 +#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK) + +#define VIVS_HI_CHIP_MINOR_FEATURE_5 0x000000a0 + +#define VIVS_HI_CHIP_PRODUCT_ID 0x000000a8 + #define VIVS_PM 0x00000000 #define VIVS_PM_POWER_CONTROLS 0x00000100 @@ -206,6 +223,11 @@ Copyright (C) 2015 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SH 0x00000008 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PA 0x00000010 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SE 0x00000020 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_RA 0x00000040 +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX 0x00000080 #define VIVS_PM_PULSE_EATER 0x0000010c diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index b79c316c2ad2..673164b331c8 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = { static int exynos_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL; + struct device_node *np = NULL, *endpoint = NULL; struct exynos_dp_device *dp; int ret; @@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dp); /* This is for the backward compatibility. */ - panel_node = of_parse_phandle(dev->of_node, "panel", 0); - if (panel_node) { - dp->panel = of_drm_find_panel(panel_node); - of_node_put(panel_node); + np = of_parse_phandle(dev->of_node, "panel", 0); + if (np) { + dp->panel = of_drm_find_panel(np); + of_node_put(np); if (!dp->panel) return -EPROBE_DEFER; - } else { - endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); - if (endpoint) { - panel_node = of_graph_get_remote_port_parent(endpoint); - if (panel_node) { - dp->panel = of_drm_find_panel(panel_node); - of_node_put(panel_node); - if (!dp->panel) - return -EPROBE_DEFER; - } else { - DRM_ERROR("no port node for panel device.\n"); - return -EINVAL; - } - } - } - - if (endpoint) goto out; + } endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); if (endpoint) { - bridge_node = of_graph_get_remote_port_parent(endpoint); - if (bridge_node) { - dp->ptn_bridge = of_drm_find_bridge(bridge_node); - of_node_put(bridge_node); - if (!dp->ptn_bridge) - return -EPROBE_DEFER; - } else - return -EPROBE_DEFER; + np = of_graph_get_remote_port_parent(endpoint); + if (np) { + /* The remote port can be either a panel or a bridge */ + dp->panel = of_drm_find_panel(np); + if (!dp->panel) { + dp->ptn_bridge = of_drm_find_bridge(np); + if (!dp->ptn_bridge) { + of_node_put(np); + return -EPROBE_DEFER; + } + } + of_node_put(np); + } else { + DRM_ERROR("no remote endpoint device node found.\n"); + return -EINVAL; + } + } else { + DRM_ERROR("no port endpoint subnode found.\n"); + return -EINVAL; } out: diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index d84a498ef099..e977a81af2e6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1906,8 +1906,7 @@ static int exynos_dsi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int exynos_dsi_suspend(struct device *dev) +static int __maybe_unused exynos_dsi_suspend(struct device *dev) { struct drm_encoder *encoder = dev_get_drvdata(dev); struct exynos_dsi *dsi = encoder_to_dsi(encoder); @@ -1938,7 +1937,7 @@ static int exynos_dsi_suspend(struct device *dev) return 0; } -static int exynos_dsi_resume(struct device *dev) +static int __maybe_unused exynos_dsi_resume(struct device *dev) { struct drm_encoder *encoder = dev_get_drvdata(dev); struct exynos_dsi *dsi = encoder_to_dsi(encoder); @@ -1972,7 +1971,6 @@ err_clk: return ret; } -#endif static const struct dev_pm_ops exynos_dsi_pm_ops = { SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 4aae9dd2b0d1..3a9e75b2cf6b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -12,7 +12,7 @@ */ #ifndef _EXYNOS_DRM_FB_H_ -#define _EXYNOS_DRM_FB_H +#define _EXYNOS_DRM_FB_H_ #include "exynos_drm_gem.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 252eb301470c..32358c5e3db4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -14,6 +14,7 @@ #include <linux/shmem_fs.h> #include <linux/dma-buf.h> +#include <linux/pfn_t.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" @@ -490,7 +491,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } pfn = page_to_pfn(exynos_gem->pages[page_offset]); - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, + __pfn_to_pfn_t(pfn, PFN_DEV)); out: switch (ret) { diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index b5fbc1cbf024..0a5a60005f7e 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_mixer_suspend(struct device *dev) +static int __maybe_unused exynos_mixer_suspend(struct device *dev) { struct mixer_context *ctx = dev_get_drvdata(dev); struct mixer_resources *res = &ctx->mixer_res; @@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev) return 0; } -static int exynos_mixer_resume(struct device *dev) +static int __maybe_unused exynos_mixer_resume(struct device *dev) { struct mixer_context *ctx = dev_get_drvdata(dev); struct mixer_resources *res = &ctx->mixer_res; @@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops exynos_mixer_pm_ops = { SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL) diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index f93654076af0..033d894d030e 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> +#include <linux/pfn_t.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/slab.h> @@ -132,7 +133,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) for (i = 0; i < page_num; i++) { pfn = (phys_addr >> PAGE_SHIFT); - ret = vm_insert_mixed(vma, address, pfn); + ret = vm_insert_mixed(vma, address, + __pfn_to_pfn_t(pfn, PFN_DEV)); if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) break; else if (unlikely(ret != 0)) { diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c index 533d1e3d4a99..a02112ba1c3d 100644 --- a/drivers/gpu/drm/i2c/adv7511.c +++ b/drivers/gpu/drm/i2c/adv7511.c @@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg) case ADV7511_REG_BKSV(3): case ADV7511_REG_BKSV(4): case ADV7511_REG_DDC_STATUS: + case ADV7511_REG_EDID_READ_CTRL: case ADV7511_REG_BSTATUS(0): case ADV7511_REG_BSTATUS(1): case ADV7511_REG_CHIP_ID_HIGH: @@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511) { adv7511->current_edid_segment = -1; - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY); - regmap_write(adv7511->regmap, ADV7511_REG_INT(1), - ADV7511_INT1_DDC_ERROR); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); + if (adv7511->i2c_main->irq) { + /* + * Documentation says the INT_ENABLE registers are reset in + * POWER_DOWN mode. My 7511w preserved the bits, however. + * Still, let's be safe and stick to the documentation. + */ + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), + ADV7511_INT1_DDC_ERROR); + } /* - * Per spec it is allowed to pulse the HDP signal to indicate that the + * Per spec it is allowed to pulse the HPD signal to indicate that the * EDID information has changed. Some monitors do this when they wakeup - * from standby or are enabled. When the HDP goes low the adv7511 is + * from standby or are enabled. When the HPD goes low the adv7511 is * reset and the outputs are disabled which might cause the monitor to - * go to standby again. To avoid this we ignore the HDP pin for the + * go to standby again. To avoid this we ignore the HPD pin for the * first few seconds after enabling the output. */ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HDP_SRC_MASK, - ADV7511_REG_POWER2_HDP_SRC_NONE); + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_NONE); /* * Most of the registers are reset during power down or when HPD is low. @@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511) if (ret < 0) return false; - if (irq0 & ADV7511_INT0_HDP) { + if (irq0 & ADV7511_INT0_HPD) { regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_HDP); + ADV7511_INT0_HPD); return true; } @@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511) regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); - if (irq0 & ADV7511_INT0_HDP && adv7511->encoder) + if (irq0 & ADV7511_INT0_HPD && adv7511->encoder) drm_helper_hpd_irq_event(adv7511->encoder->dev); if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { @@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder, /* Reading the EDID only works if the device is powered */ if (!adv7511->powered) { - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY); - regmap_write(adv7511->regmap, ADV7511_REG_INT(1), - ADV7511_INT1_DDC_ERROR); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); + if (adv7511->i2c_main->irq) { + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), + ADV7511_INT1_DDC_ERROR); + } adv7511->current_edid_segment = -1; } @@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder, if (adv7511->status == connector_status_connected) status = connector_status_disconnected; } else { - /* Renable HDP sensing */ + /* Renable HPD sensing */ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HDP_SRC_MASK, - ADV7511_REG_POWER2_HDP_SRC_BOTH); + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_BOTH); } adv7511->status = status; diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h index 6599ed538426..38515b30cedf 100644 --- a/drivers/gpu/drm/i2c/adv7511.h +++ b/drivers/gpu/drm/i2c/adv7511.h @@ -90,7 +90,7 @@ #define ADV7511_CSC_ENABLE BIT(7) #define ADV7511_CSC_UPDATE_MODE BIT(5) -#define ADV7511_INT0_HDP BIT(7) +#define ADV7511_INT0_HPD BIT(7) #define ADV7511_INT0_VSYNC BIT(5) #define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4) #define ADV7511_INT0_EDID_READY BIT(2) @@ -157,11 +157,11 @@ #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) -#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0 -#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00 -#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40 -#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80 -#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0 +#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0 +#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00 +#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40 +#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80 +#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0 #define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4) #define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index fcd77b27514d..051eab33e4c7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -10,7 +10,6 @@ config DRM_I915 # the shmem_readpage() which depends upon tmpfs select SHMEM select TMPFS - select STOP_MACHINE select DRM_KMS_HELPER select DRM_PANEL select DRM_MIPI_DSI diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0fc38bb7276c..c5db23511184 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1331,7 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) struct intel_engine_cs *ring; u64 acthd[I915_NUM_RINGS]; u32 seqno[I915_NUM_RINGS]; - int i; + u32 instdone[I915_NUM_INSTDONE_REG]; + int i, j; if (!i915.enable_hangcheck) { seq_printf(m, "Hangcheck disabled\n"); @@ -1345,6 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) acthd[i] = intel_ring_get_active_head(ring); } + i915_get_extra_instdone(dev, instdone); + intel_runtime_pm_put(dev_priv); if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { @@ -1365,6 +1368,21 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) (long long)ring->hangcheck.max_acthd); seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); seq_printf(m, "\taction = %d\n", ring->hangcheck.action); + + if (ring->id == RCS) { + seq_puts(m, "\tinstdone read ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", instdone[j]); + + seq_puts(m, "\n\tinstdone accu ="); + + for (j = 0; j < I915_NUM_INSTDONE_REG; j++) + seq_printf(m, " 0x%08x", + ring->hangcheck.instdone[j]); + + seq_puts(m, "\n"); + } } return 0; @@ -1942,11 +1960,8 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_puts(m, "HW context "); describe_ctx(m, ctx); - for_each_ring(ring, dev_priv, i) { - if (ring->default_context == ctx) - seq_printf(m, "(default context %s) ", - ring->name); - } + if (ctx == dev_priv->kernel_context) + seq_printf(m, "(kernel context) "); if (i915.enable_execlists) { seq_putc(m, '\n'); @@ -1976,12 +1991,13 @@ static int i915_context_status(struct seq_file *m, void *unused) } static void i915_dump_lrc_obj(struct seq_file *m, - struct intel_engine_cs *ring, - struct drm_i915_gem_object *ctx_obj) + struct intel_context *ctx, + struct intel_engine_cs *ring) { struct page *page; uint32_t *reg_state; int j; + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; unsigned long ggtt_offset = 0; if (ctx_obj == NULL) { @@ -1991,7 +2007,7 @@ static void i915_dump_lrc_obj(struct seq_file *m, } seq_printf(m, "CONTEXT: %s %u\n", ring->name, - intel_execlists_ctx_id(ctx_obj)); + intel_execlists_ctx_id(ctx, ring)); if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT\n"); @@ -2037,13 +2053,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) if (ret) return ret; - list_for_each_entry(ctx, &dev_priv->context_list, link) { - for_each_ring(ring, dev_priv, i) { - if (ring->default_context != ctx) - i915_dump_lrc_obj(m, ring, - ctx->engine[i].state); - } - } + list_for_each_entry(ctx, &dev_priv->context_list, link) + if (ctx != dev_priv->kernel_context) + for_each_ring(ring, dev_priv, i) + i915_dump_lrc_obj(m, ctx, ring); mutex_unlock(&dev->struct_mutex); @@ -2092,13 +2105,13 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", read_pointer, write_pointer); - for (i = 0; i < 6; i++) { + for (i = 0; i < GEN8_CSB_ENTRIES; i++) { status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); @@ -2115,11 +2128,8 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\t%d requests in queue\n", count); if (head_req) { - struct drm_i915_gem_object *ctx_obj; - - ctx_obj = head_req->ctx->engine[ring_id].state; seq_printf(m, "\tHead request id: %u\n", - intel_execlists_ctx_id(ctx_obj)); + intel_execlists_ctx_id(head_req->ctx, ring)); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3ac616d7363b..11d8414edbbe 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -501,7 +501,9 @@ void intel_detect_pch(struct drm_device *dev) WARN_ON(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev)); } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || - (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) { + ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && + pch->subsystem_vendor == 0x1af4 && + pch->subsystem_device == 0x1100)) { dev_priv->pch_type = intel_virt_detect_pch(dev); } else continue; @@ -1077,7 +1079,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv) */ broxton_init_cdclk(dev); broxton_ddi_phy_init(dev); - intel_prepare_ddi(dev); return 0; } @@ -1336,8 +1337,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, return 0; DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", - wait_for_on ? "on" : "off", - I915_READ(VLV_GTLC_PW_STATUS)); + onoff(wait_for_on), + I915_READ(VLV_GTLC_PW_STATUS)); /* * RC6 transitioning can be delayed up to 2 msec (see @@ -1346,7 +1347,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, err = wait_for(COND, 3); if (err) DRM_ERROR("timeout waiting for GT wells to go %s\n", - wait_for_on ? "on" : "off"); + onoff(wait_for_on)); return err; #undef COND @@ -1357,7 +1358,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) return; - DRM_ERROR("GT register access while GT waking disabled\n"); + DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); } @@ -1501,6 +1502,10 @@ static int intel_runtime_suspend(struct device *device) enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); + + if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) + DRM_ERROR("Unclaimed access detected prior to suspending\n"); + dev_priv->pm.suspended = true; /* @@ -1549,6 +1554,8 @@ static int intel_runtime_resume(struct device *device) intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; + if (intel_uncore_unclaimed_mmio(dev_priv)) + DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); intel_guc_resume(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f0f75d7c0d94..afb0beee9975 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -34,6 +34,7 @@ #include <uapi/drm/drm_fourcc.h> #include <drm/drmP.h> +#include "i915_params.h" #include "i915_reg.h" #include "intel_bios.h" #include "intel_ringbuffer.h" @@ -58,7 +59,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20151218" +#define DRIVER_DATE "20160124" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -69,11 +70,11 @@ BUILD_BUG_ON(__i915_warn_cond); \ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) #else -#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x ) +#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") #endif #undef WARN_ON_ONCE -#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x ) +#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ (long) (x), __func__); @@ -87,31 +88,25 @@ */ #define I915_STATE_WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - if (i915.verbose_state_checks) \ - WARN(1, format); \ - else \ + if (unlikely(__ret_warn_on)) \ + if (!WARN(i915.verbose_state_checks, format)) \ DRM_ERROR(format); \ - } \ unlikely(__ret_warn_on); \ }) -#define I915_STATE_WARN_ON(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) { \ - if (i915.verbose_state_checks) \ - WARN(1, "WARN_ON(" #condition ")\n"); \ - else \ - DRM_ERROR("WARN_ON(" #condition ")\n"); \ - } \ - unlikely(__ret_warn_on); \ -}) +#define I915_STATE_WARN_ON(x) \ + I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") static inline const char *yesno(bool v) { return v ? "yes" : "no"; } +static inline const char *onoff(bool v) +{ + return v ? "on" : "off"; +} + enum pipe { INVALID_PIPE = -1, PIPE_A = 0, @@ -339,7 +334,7 @@ struct drm_i915_file_private { unsigned boosts; } rps; - struct intel_engine_cs *bsd_ring; + unsigned int bsd_ring; }; enum intel_dpll_id { @@ -633,6 +628,7 @@ struct drm_i915_display_funcs { struct dpll *best_clock); int (*compute_pipe_wm)(struct intel_crtc *crtc, struct drm_atomic_state *state); + void (*program_watermarks)(struct intel_crtc_state *cstate); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -657,9 +653,6 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req, uint32_t flags); - void (*update_primary_plane)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y); void (*hpd_irq_setup)(struct drm_device *dev); /* clock updates for mode set */ /* cursor updates */ @@ -726,6 +719,8 @@ struct intel_uncore { i915_reg_t reg_post; u32 val_reset; } fw_domain[FW_DOMAIN_ID_COUNT]; + + int unclaimed_mmio_check; }; /* Iterate over initialised fw domains */ @@ -889,6 +884,9 @@ struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; int pin_count; + struct i915_vma *lrc_vma; + u64 lrc_desc; + uint32_t *lrc_reg_state; } engine[I915_NUM_RINGS]; struct list_head link; @@ -1301,7 +1299,7 @@ struct i915_gem_mm { bool busy; /* the indicator for dispatch video commands on two BSD rings */ - int bsd_ring_dispatch_index; + unsigned int bsd_ring_dispatch_index; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; @@ -1487,7 +1485,7 @@ struct intel_vbt_data { u8 seq_version; u32 size; u8 *data; - u8 *sequence[MIPI_SEQ_MAX]; + const u8 *sequence[MIPI_SEQ_MAX]; } dsi; int crt_ddc_pin; @@ -1784,7 +1782,7 @@ struct drm_i915_private { unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_boot_cdclk; - unsigned int cdclk_freq, max_cdclk_freq; + unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; @@ -1829,8 +1827,13 @@ struct drm_i915_private { struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; #endif + /* dpll and cdclk state is protected by connection_mutex */ int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + + unsigned int active_crtcs; + unsigned int min_pixclk[I915_MAX_PIPES]; + int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; struct i915_workarounds workarounds; @@ -1945,6 +1948,8 @@ struct drm_i915_private { void (*stop_ring)(struct intel_engine_cs *ring); } gt; + struct intel_context *kernel_context; + bool edp_low_vswing; /* perform PHY state sanity checks? */ @@ -2265,9 +2270,9 @@ struct drm_i915_gem_request { }; -int i915_gem_request_alloc(struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_request **req_out); +struct drm_i915_gem_request * __must_check +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct intel_context *ctx); void i915_gem_request_cancel(struct drm_i915_gem_request *req); void i915_gem_request_free(struct kref *req_ref); int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, @@ -2576,6 +2581,11 @@ struct drm_i915_cmd_table { /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) + +/* WaRsDisableCoarsePowerGating:skl,bxt */ +#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ + ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ + IS_SKL_REVID(dev, 0, SKL_REVID_F0))) /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts * even when in MSI mode. This results in spurious interrupt warnings if the @@ -2665,44 +2675,7 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); -/* i915_params.c */ -struct i915_params { - int modeset; - int panel_ignore_lid; - int semaphores; - int lvds_channel_mode; - int panel_use_ssc; - int vbt_sdvo_panel_type; - int enable_rc6; - int enable_dc; - int enable_fbc; - int enable_ppgtt; - int enable_execlists; - int enable_psr; - unsigned int preliminary_hw_support; - int disable_power_well; - int enable_ips; - int invert_brightness; - int enable_cmd_parser; - /* leave bools at the end to not create holes */ - bool enable_hangcheck; - bool fastboot; - bool prefault_disable; - bool load_detect_test; - bool reset; - bool disable_display; - bool disable_vtd_wa; - bool enable_guc_submission; - int guc_log_level; - int use_mmio_flip; - int mmio_debug; - bool verbose_state_checks; - bool nuclear_pageflip; - int edp_vswing; -}; -extern struct i915_params i915 __read_mostly; - - /* i915_dma.c */ +/* i915_dma.c */ extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); @@ -2745,7 +2718,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev); extern void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake); extern void intel_uncore_init(struct drm_device *dev); -extern void intel_uncore_check_errors(struct drm_device *dev); +extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); +extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); extern void intel_uncore_fini(struct drm_device *dev); extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ddc21d4b388d..371bbb28c471 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1251,7 +1251,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; - s64 before, now; + s64 before = 0; /* Only to silence a compiler warning. */ int ret; WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); @@ -1271,14 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req, return -ETIME; timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); + + /* + * Record current time in case interrupted by signal, or wedged. + */ + before = ktime_get_raw_ns(); } if (INTEL_INFO(dev_priv)->gen >= 6) gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); - /* Record current time in case interrupted by signal, or wedged */ trace_i915_gem_request_wait_begin(req); - before = ktime_get_raw_ns(); /* Optimistic spin for the next jiffie before touching IRQs */ ret = __i915_spin_request(req, state); @@ -1343,11 +1346,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req, finish_wait(&ring->irq_queue, &wait); out: - now = ktime_get_raw_ns(); trace_i915_gem_request_wait_end(req); if (timeout) { - s64 tres = *timeout - (now - before); + s64 tres = *timeout - (ktime_get_raw_ns() - before); *timeout = tres < 0 ? 0 : tres; @@ -2677,10 +2679,8 @@ void i915_gem_request_free(struct kref *req_ref) i915_gem_request_remove_from_client(req); if (ctx) { - if (i915.enable_execlists) { - if (ctx != req->ring->default_context) - intel_lr_context_unpin(req); - } + if (i915.enable_execlists && ctx != req->i915->kernel_context) + intel_lr_context_unpin(req); i915_gem_context_unreference(ctx); } @@ -2688,9 +2688,10 @@ void i915_gem_request_free(struct kref *req_ref) kmem_cache_free(req->i915->requests, req); } -int i915_gem_request_alloc(struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_request **req_out) +static inline int +__i915_gem_request_alloc(struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_request **req_out) { struct drm_i915_private *dev_priv = to_i915(ring->dev); struct drm_i915_gem_request *req; @@ -2753,6 +2754,31 @@ err: return ret; } +/** + * i915_gem_request_alloc - allocate a request structure + * + * @engine: engine that we wish to issue the request on. + * @ctx: context that the request will be associated with. + * This can be NULL if the request is not directly related to + * any specific user context, in which case this function will + * choose an appropriate context to use. + * + * Returns a pointer to the allocated request if successful, + * or an error code if not. + */ +struct drm_i915_gem_request * +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct intel_context *ctx) +{ + struct drm_i915_gem_request *req; + int err; + + if (ctx == NULL) + ctx = to_i915(engine->dev)->kernel_context; + err = __i915_gem_request_alloc(engine, ctx, &req); + return err ? ERR_PTR(err) : req; +} + void i915_gem_request_cancel(struct drm_i915_gem_request *req) { intel_ring_reserved_space_cancel(req->ringbuf); @@ -3170,9 +3196,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, return 0; if (*to_req == NULL) { - ret = i915_gem_request_alloc(to, to->default_context, to_req); - if (ret) - return ret; + struct drm_i915_gem_request *req; + + req = i915_gem_request_alloc(to, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); + + *to_req = req; } trace_i915_gem_ring_sync_to(*to_req, from, from_req); @@ -3372,9 +3402,9 @@ int i915_gpu_idle(struct drm_device *dev) if (!i915.enable_execlists) { struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = i915_switch_context(req); if (ret) { @@ -4328,10 +4358,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (ret) goto unref; - BUILD_BUG_ON(I915_NUM_RINGS > 16); - args->busy = obj->active << 16; - if (obj->last_write_req) - args->busy |= obj->last_write_req->ring->id; + args->busy = 0; + if (obj->active) { + int i; + + for (i = 0; i < I915_NUM_RINGS; i++) { + struct drm_i915_gem_request *req; + + req = obj->last_read_req[i]; + if (req) + args->busy |= 1 << (16 + req->ring->exec_id); + } + if (obj->last_write_req) + args->busy |= obj->last_write_req->ring->exec_id; + } unref: drm_gem_object_unreference(&obj->base); @@ -4832,7 +4872,7 @@ i915_gem_init_hw(struct drm_device *dev) */ init_unused_rings(dev); - BUG_ON(!dev_priv->ring[RCS].default_context); + BUG_ON(!dev_priv->kernel_context); ret = i915_ppgtt_init_hw(dev); if (ret) { @@ -4869,10 +4909,9 @@ i915_gem_init_hw(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { struct drm_i915_gem_request *req; - WARN_ON(!ring->default_context); - - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) { + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); i915_gem_cleanup_ringbuffer(dev); goto out; } @@ -5112,6 +5151,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); + file_priv->bsd_ring = -1; + ret = i915_gem_context_open(dev, file); if (ret) kfree(file_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index c25083c78ba7..6a4f64b03db6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -347,22 +347,20 @@ void i915_gem_context_reset(struct drm_device *dev) i915_gem_context_unreference(lctx); ring->last_context = NULL; } - - /* Force the GPU state to be reinitialised on enabling */ - if (ring->default_context) - ring->default_context->legacy_hw_ctx.initialized = false; } + + /* Force the GPU state to be reinitialised on enabling */ + dev_priv->kernel_context->legacy_hw_ctx.initialized = false; } int i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_context *ctx; - int i; /* Init should only be called once per module load. Eventually the * restriction on the context_disabled check can be loosened. */ - if (WARN_ON(dev_priv->ring[RCS].default_context)) + if (WARN_ON(dev_priv->kernel_context)) return 0; if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { @@ -392,12 +390,7 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } - for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_engine_cs *ring = &dev_priv->ring[i]; - - /* NB: RCS will hold a ref for all rings */ - ring->default_context = ctx; - } + dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", i915.enable_execlists ? "LR" : @@ -408,7 +401,7 @@ int i915_gem_context_init(struct drm_device *dev) void i915_gem_context_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *dctx = dev_priv->ring[RCS].default_context; + struct intel_context *dctx = dev_priv->kernel_context; int i; if (dctx->legacy_hw_ctx.rcs_state) { @@ -435,17 +428,17 @@ void i915_gem_context_fini(struct drm_device *dev) i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = I915_NUM_RINGS; --i >= 0;) { struct intel_engine_cs *ring = &dev_priv->ring[i]; - if (ring->last_context) + if (ring->last_context) { i915_gem_context_unreference(ring->last_context); - - ring->default_context = NULL; - ring->last_context = NULL; + ring->last_context = NULL; + } } i915_gem_context_unreference(dctx); + dev_priv->kernel_context = NULL; } int i915_gem_context_enable(struct drm_i915_gem_request *req) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index dccb517361b3..2dc08ce1079a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) return eb->lut[handle]; } else { struct hlist_head *head; - struct hlist_node *node; + struct i915_vma *vma; head = &eb->buckets[handle & eb->and]; - hlist_for_each(node, head) { - struct i915_vma *vma; - - vma = hlist_entry(node, struct i915_vma, exec_node); + hlist_for_each_entry(vma, head, exec_node) { if (vma->exec_handle == handle) return vma; } @@ -1309,6 +1306,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, exec_start = params->batch_obj_vm_offset + params->args_batch_start_offset; + if (exec_len == 0) + exec_len = params->batch_obj->base.size; + ret = ring->dispatch_execbuffer(params->request, exec_start, exec_len, params->dispatch_flags); @@ -1325,33 +1325,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, /** * Find one BSD ring to dispatch the corresponding BSD command. - * The Ring ID is returned. + * The ring index is returned. */ -static int gen8_dispatch_bsd_ring(struct drm_device *dev, - struct drm_file *file) +static unsigned int +gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_file_private *file_priv = file->driver_priv; - /* Check whether the file_priv is using one ring */ - if (file_priv->bsd_ring) - return file_priv->bsd_ring->id; - else { - /* If no, use the ping-pong mechanism to select one ring */ - int ring_id; - - mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.bsd_ring_dispatch_index == 0) { - ring_id = VCS; - dev_priv->mm.bsd_ring_dispatch_index = 1; - } else { - ring_id = VCS2; - dev_priv->mm.bsd_ring_dispatch_index = 0; - } - file_priv->bsd_ring = &dev_priv->ring[ring_id]; - mutex_unlock(&dev->struct_mutex); - return ring_id; + /* Check whether the file_priv has already selected one ring. */ + if ((int)file_priv->bsd_ring < 0) { + /* If not, use the ping-pong mechanism to select one. */ + mutex_lock(&dev_priv->dev->struct_mutex); + file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; + dev_priv->mm.bsd_ring_dispatch_index ^= 1; + mutex_unlock(&dev_priv->dev->struct_mutex); } + + return file_priv->bsd_ring; } static struct drm_i915_gem_object * @@ -1374,6 +1364,63 @@ eb_get_batch(struct eb_vmas *eb) return vma->obj; } +#define I915_USER_RINGS (4) + +static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = { + [I915_EXEC_DEFAULT] = RCS, + [I915_EXEC_RENDER] = RCS, + [I915_EXEC_BLT] = BCS, + [I915_EXEC_BSD] = VCS, + [I915_EXEC_VEBOX] = VECS +}; + +static int +eb_select_ring(struct drm_i915_private *dev_priv, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args, + struct intel_engine_cs **ring) +{ + unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; + + if (user_ring_id > I915_USER_RINGS) { + DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); + return -EINVAL; + } + + if ((user_ring_id != I915_EXEC_BSD) && + ((args->flags & I915_EXEC_BSD_MASK) != 0)) { + DRM_DEBUG("execbuf with non bsd ring but with invalid " + "bsd dispatch flags: %d\n", (int)(args->flags)); + return -EINVAL; + } + + if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) { + unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; + + if (bsd_idx == I915_EXEC_BSD_DEFAULT) { + bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file); + } else if (bsd_idx >= I915_EXEC_BSD_RING1 && + bsd_idx <= I915_EXEC_BSD_RING2) { + bsd_idx--; + } else { + DRM_DEBUG("execbuf with unknown bsd ring: %u\n", + bsd_idx); + return -EINVAL; + } + + *ring = &dev_priv->ring[_VCS(bsd_idx)]; + } else { + *ring = &dev_priv->ring[user_ring_map[user_ring_id]]; + } + + if (!intel_ring_initialized(*ring)) { + DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id); + return -EINVAL; + } + + return 0; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1381,6 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_request *req = NULL; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; @@ -1411,51 +1459,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) dispatch_flags |= I915_DISPATCH_PINNED; - if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { - DRM_DEBUG("execbuf with unknown ring: %d\n", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } - - if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) && - ((args->flags & I915_EXEC_BSD_MASK) != 0)) { - DRM_DEBUG("execbuf with non bsd ring but with invalid " - "bsd dispatch flags: %d\n", (int)(args->flags)); - return -EINVAL; - } - - if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) - ring = &dev_priv->ring[RCS]; - else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) { - if (HAS_BSD2(dev)) { - int ring_id; - - switch (args->flags & I915_EXEC_BSD_MASK) { - case I915_EXEC_BSD_DEFAULT: - ring_id = gen8_dispatch_bsd_ring(dev, file); - ring = &dev_priv->ring[ring_id]; - break; - case I915_EXEC_BSD_RING1: - ring = &dev_priv->ring[VCS]; - break; - case I915_EXEC_BSD_RING2: - ring = &dev_priv->ring[VCS2]; - break; - default: - DRM_DEBUG("execbuf with unknown bsd ring: %d\n", - (int)(args->flags & I915_EXEC_BSD_MASK)); - return -EINVAL; - } - } else - ring = &dev_priv->ring[VCS]; - } else - ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; - - if (!intel_ring_initialized(ring)) { - DRM_DEBUG("execbuf with invalid ring: %d\n", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } + ret = eb_select_ring(dev_priv, file, args, &ring); + if (ret) + return ret; if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); @@ -1602,11 +1608,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); /* Allocate a request for this batch buffer nice and early. */ - ret = i915_gem_request_alloc(ring, ctx, ¶ms->request); - if (ret) + req = i915_gem_request_alloc(ring, ctx); + if (IS_ERR(req)) { + ret = PTR_ERR(req); goto err_batch_unpin; + } - ret = i915_gem_request_add_to_client(params->request, file); + ret = i915_gem_request_add_to_client(req, file); if (ret) goto err_batch_unpin; @@ -1622,6 +1630,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->dispatch_flags = dispatch_flags; params->batch_obj = batch_obj; params->ctx = ctx; + params->request = req; ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); @@ -1645,8 +1654,8 @@ err: * must be freed again. If it was submitted then it is being tracked * on the active request list and no clean up is required here. */ - if (ret && params->request) - i915_gem_request_cancel(params->request); + if (ret && req) + i915_gem_request_cancel(req); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56f4f2e58d53..7377b6725c33 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -96,9 +96,11 @@ static int i915_get_ggtt_vma_pages(struct i915_vma *vma); -const struct i915_ggtt_view i915_ggtt_view_normal; +const struct i915_ggtt_view i915_ggtt_view_normal = { + .type = I915_GGTT_VIEW_NORMAL, +}; const struct i915_ggtt_view i915_ggtt_view_rotated = { - .type = I915_GGTT_VIEW_ROTATED + .type = I915_GGTT_VIEW_ROTATED, }; static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) @@ -3329,7 +3331,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, } static struct scatterlist * -rotate_pages(dma_addr_t *in, unsigned int offset, +rotate_pages(const dma_addr_t *in, unsigned int offset, unsigned int width, unsigned int height, struct sg_table *st, struct scatterlist *sg) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index b448ad832dcf..e5737963ab79 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t; #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) - /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f7df54a8ee2b..16da9c1422cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) #endif } +static int num_vma_bound(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + int count = 0; + + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (drm_mm_node_allocated(&vma->node)) + count++; + if (vma->pin_count) + count++; + } + + return count; +} + +static bool swap_available(void) +{ + return get_nr_swap_pages() > 0; +} + +static bool can_release_pages(struct drm_i915_gem_object *obj) +{ + /* Only report true if by unbinding the object and putting its pages + * we can actually make forward progress towards freeing physical + * pages. + * + * If the pages are pinned for any other reason than being bound + * to the GPU, simply unbinding from the GPU is not going to succeed + * in releasing our pin count on the pages themselves. + */ + if (obj->pages_pin_count != num_vma_bound(obj)) + return false; + + /* We can only return physical pages to the system if we can either + * discard the contents (because the user has marked them as being + * purgeable) or if we can move their contents out to swap. + */ + return swap_available() || obj->madv == I915_MADV_DONTNEED; +} + /** * i915_gem_shrink - Shrink buffer object caches * @dev_priv: i915 device @@ -129,6 +169,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) continue; + if (!can_release_pages(obj)) + continue; + drm_gem_object_reference(&obj->base); /* For the unbound phase, this should be a no-op! */ @@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) return true; } -static int num_vma_bound(struct drm_i915_gem_object *obj) -{ - struct i915_vma *vma; - int count = 0; - - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (drm_mm_node_allocated(&vma->node)) - count++; - if (vma->pin_count) - count++; - } - - return count; -} - static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { @@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!obj->active && obj->pages_pin_count == num_vma_bound(obj)) + if (!obj->active && can_release_pages(obj)) count += obj->base.size >> PAGE_SHIFT; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 3476877fc0d6..c384dc9c8a63 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -569,6 +569,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev, if (obj->pages == NULL) goto cleanup; + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + i915_gem_object_pin_pages(obj); obj->stolen = stolen; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 06ca4082735b..7eeb24427785 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1050,7 +1050,7 @@ static void i915_gem_record_rings(struct drm_device *dev, if (request) rbuf = request->ctx->engine[ring->id].ringbuf; else - rbuf = ring->default_context->engine[ring->id].ringbuf; + rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf; } else rbuf = ring->buffer; diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index 685c7991e24f..e4ba5822289b 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -40,6 +40,7 @@ #define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) +#define SOFT_SCRATCH_COUNT 16 #define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) #define UOS_RSA_SCRATCH_MAX_COUNT 64 diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 05aa7e61cbe0..51ae5c1f806d 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6(dev_priv->dev) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1) || - (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) || - (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0))) + if (!intel_enable_rc6(dev) || + NEEDS_WaRsDisableCoarsePowerGating(dev)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) db_exc.cookie = 1; } + /* Finally, update the cached copy of the GuC's WQ head */ + gc->wq_head = desc->head; + kunmap_atomic(base); return ret; } @@ -471,28 +472,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, sizeof(desc) * client->ctx_index); } -/* Get valid workqueue item and return it back to offset */ -static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset) +int i915_guc_wq_check_space(struct i915_guc_client *gc) { struct guc_process_desc *desc; void *base; u32 size = sizeof(struct guc_wq_item); int ret = -ETIMEDOUT, timeout_counter = 200; + if (!gc) + return 0; + + /* Quickly return if wq space is available since last time we cache the + * head position. */ + if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) + return 0; + base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0)); desc = base + gc->proc_desc_offset; while (timeout_counter-- > 0) { - if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { - *offset = gc->wq_tail; - - /* advance the tail for next workqueue item */ - gc->wq_tail += size; - gc->wq_tail &= gc->wq_size - 1; + gc->wq_head = desc->head; - /* this will break the loop */ - timeout_counter = 0; + if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) { ret = 0; + break; } if (timeout_counter) @@ -510,12 +513,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, enum intel_ring_id ring_id = rq->ring->id; struct guc_wq_item *wqi; void *base; - u32 tail, wq_len, wq_off = 0; - int ret; + u32 tail, wq_len, wq_off, space; + + space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size); + if (WARN_ON(space < sizeof(struct guc_wq_item))) + return -ENOSPC; /* shouldn't happen */ - ret = guc_get_workqueue_space(gc, &wq_off); - if (ret) - return ret; + /* postincrement WQ tail for next time */ + wq_off = gc->wq_tail; + gc->wq_tail += sizeof(struct guc_wq_item); + gc->wq_tail &= gc->wq_size - 1; /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -832,6 +839,96 @@ static void guc_create_log(struct intel_guc *guc) guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; } +static void init_guc_policies(struct guc_policies *policies) +{ + struct guc_policy *policy; + u32 p, i; + + policies->dpc_promote_time = 500000; + policies->max_num_work_items = POLICY_MAX_NUM_WI; + + for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) { + for (i = 0; i < I915_NUM_RINGS; i++) { + policy = &policies->policy[p][i]; + + policy->execution_quantum = 1000000; + policy->preemption_time = 500000; + policy->fault_time = 250000; + policy->policy_flags = 0; + } + } + + policies->is_valid = 1; +} + +static void guc_create_ads(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_gem_object *obj; + struct guc_ads *ads; + struct guc_policies *policies; + struct guc_mmio_reg_state *reg_state; + struct intel_engine_cs *ring; + struct page *page; + u32 size, i; + + /* The ads obj includes the struct itself and buffers passed to GuC */ + size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + + sizeof(struct guc_mmio_reg_state) + + GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; + + obj = guc->ads_obj; + if (!obj) { + obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); + if (!obj) + return; + + guc->ads_obj = obj; + } + + page = i915_gem_object_get_page(obj, 0); + ads = kmap(page); + + /* + * The GuC requires a "Golden Context" when it reinitialises + * engines after a reset. Here we use the Render ring default + * context, which must already exist and be pinned in the GGTT, + * so its address won't change after we've told the GuC where + * to find it. + */ + ring = &dev_priv->ring[RCS]; + ads->golden_context_lrca = ring->status_page.gfx_addr; + + for_each_ring(ring, dev_priv, i) + ads->eng_state_size[i] = intel_lr_context_size(ring); + + /* GuC scheduling policies */ + policies = (void *)ads + sizeof(struct guc_ads); + init_guc_policies(policies); + + ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) + + sizeof(struct guc_ads); + + /* MMIO reg state */ + reg_state = (void *)policies + sizeof(struct guc_policies); + + for (i = 0; i < I915_NUM_RINGS; i++) { + reg_state->mmio_white_list[i].mmio_start = + dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START; + + /* Nothing to be saved or restored for now. */ + reg_state->mmio_white_list[i].count = 0; + } + + ads->reg_state_addr = ads->scheduler_policies + + sizeof(struct guc_policies); + + ads->reg_state_buffer = ads->reg_state_addr + + sizeof(struct guc_mmio_reg_state); + + kunmap(page); +} + /* * Set up the memory resources to be shared with the GuC. At this point, * we require just one object that can be mapped through the GGTT. @@ -858,6 +955,8 @@ int i915_guc_submission_init(struct drm_device *dev) guc_create_log(guc); + guc_create_ads(guc); + return 0; } @@ -865,7 +964,7 @@ int i915_guc_submission_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx = dev_priv->ring[RCS].default_context; + struct intel_context *ctx = dev_priv->kernel_context; struct i915_guc_client *client; /* client for execbuf submission */ @@ -896,6 +995,9 @@ void i915_guc_submission_fini(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; + gem_release_guc_obj(dev_priv->guc.ads_obj); + guc->ads_obj = NULL; + gem_release_guc_obj(dev_priv->guc.log_obj); guc->log_obj = NULL; @@ -919,7 +1021,7 @@ int intel_guc_suspend(struct drm_device *dev) if (!i915.enable_guc_submission) return 0; - ctx = dev_priv->ring[RCS].default_context; + ctx = dev_priv->kernel_context; data[0] = HOST2GUC_ACTION_ENTER_S_STATE; /* any value greater than GUC_POWER_D0 */ @@ -945,7 +1047,7 @@ int intel_guc_resume(struct drm_device *dev) if (!i915.enable_guc_submission) return 0; - ctx = dev_priv->ring[RCS].default_context; + ctx = dev_priv->kernel_context; data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index fa8afa7860ae..25a89373df63 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2188,10 +2188,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - /* We get interrupts on unclaimed registers, so check for this before we - * do any I915_{READ,WRITE}. */ - intel_uncore_check_errors(dev); - /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); @@ -2268,43 +2264,20 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, intel_hpd_irq_handler(dev, pin_mask, long_mask); } -static irqreturn_t gen8_irq_handler(int irq, void *arg) +static irqreturn_t +gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 master_ctl; + struct drm_device *dev = dev_priv->dev; irqreturn_t ret = IRQ_NONE; - uint32_t tmp = 0; + u32 iir; enum pipe pipe; - u32 aux_mask = GEN8_AUX_CHANNEL_A; - - if (!intel_irqs_enabled(dev_priv)) - return IRQ_NONE; - - /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); - - if (INTEL_INFO(dev_priv)->gen >= 9) - aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - - master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); - master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; - if (!master_ctl) - goto out; - - I915_WRITE_FW(GEN8_MASTER_IRQ, 0); - - /* Find, clear, then process each source of interrupt */ - - ret = gen8_gt_irq_handler(dev_priv, master_ctl); if (master_ctl & GEN8_DE_MISC_IRQ) { - tmp = I915_READ(GEN8_DE_MISC_IIR); - if (tmp) { - I915_WRITE(GEN8_DE_MISC_IIR, tmp); + iir = I915_READ(GEN8_DE_MISC_IIR); + if (iir) { + I915_WRITE(GEN8_DE_MISC_IIR, iir); ret = IRQ_HANDLED; - if (tmp & GEN8_DE_MISC_GSE) + if (iir & GEN8_DE_MISC_GSE) intel_opregion_asle_intr(dev); else DRM_ERROR("Unexpected DE Misc interrupt\n"); @@ -2314,33 +2287,40 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } if (master_ctl & GEN8_DE_PORT_IRQ) { - tmp = I915_READ(GEN8_DE_PORT_IIR); - if (tmp) { + iir = I915_READ(GEN8_DE_PORT_IIR); + if (iir) { + u32 tmp_mask; bool found = false; - u32 hotplug_trigger = 0; - - if (IS_BROXTON(dev_priv)) - hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK; - else if (IS_BROADWELL(dev_priv)) - hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG; - I915_WRITE(GEN8_DE_PORT_IIR, tmp); + I915_WRITE(GEN8_DE_PORT_IIR, iir); ret = IRQ_HANDLED; - if (tmp & aux_mask) { + tmp_mask = GEN8_AUX_CHANNEL_A; + if (INTEL_INFO(dev_priv)->gen >= 9) + tmp_mask |= GEN9_AUX_CHANNEL_B | + GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + + if (iir & tmp_mask) { dp_aux_irq_handler(dev); found = true; } - if (hotplug_trigger) { - if (IS_BROXTON(dev)) - bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt); - else - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw); - found = true; + if (IS_BROXTON(dev_priv)) { + tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; + if (tmp_mask) { + bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); + found = true; + } + } else if (IS_BROADWELL(dev_priv)) { + tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; + if (tmp_mask) { + ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); + found = true; + } } - if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { + if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { gmbus_irq_handler(dev); found = true; } @@ -2353,49 +2333,51 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } for_each_pipe(dev_priv, pipe) { - uint32_t pipe_iir, flip_done = 0, fault_errors = 0; + u32 flip_done, fault_errors; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue; - pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); - if (pipe_iir) { - ret = IRQ_HANDLED; - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); + if (!iir) { + DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); + continue; + } - if (pipe_iir & GEN8_PIPE_VBLANK && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); - if (INTEL_INFO(dev_priv)->gen >= 9) - flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; - else - flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; + if (iir & GEN8_PIPE_VBLANK && + intel_pipe_handle_vblank(dev, pipe)) + intel_check_page_flip(dev, pipe); - if (flip_done) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + flip_done = iir; + if (INTEL_INFO(dev_priv)->gen >= 9) + flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; + else + flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; - if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + if (flip_done) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) - intel_cpu_fifo_underrun_irq_handler(dev_priv, - pipe); + if (iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); + if (iir & GEN8_PIPE_FIFO_UNDERRUN) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - if (INTEL_INFO(dev_priv)->gen >= 9) - fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - else - fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + fault_errors = iir; + if (INTEL_INFO(dev_priv)->gen >= 9) + fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - if (fault_errors) - DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", - pipe_name(pipe), - pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } else - DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); + if (fault_errors) + DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", + pipe_name(pipe), + fault_errors); } if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && @@ -2405,15 +2387,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) * scheme also closed the SDE interrupt handling race we've seen * on older pch-split platforms. But this needs testing. */ - u32 pch_iir = I915_READ(SDEIIR); - if (pch_iir) { - I915_WRITE(SDEIIR, pch_iir); + iir = I915_READ(SDEIIR); + if (iir) { + I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; if (HAS_PCH_SPT(dev_priv)) - spt_irq_handler(dev, pch_iir); + spt_irq_handler(dev, iir); else - cpt_irq_handler(dev, pch_iir); + cpt_irq_handler(dev, iir); } else { /* * Like on previous PCH there seems to be something @@ -2423,10 +2405,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } } + return ret; +} + +static irqreturn_t gen8_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = arg; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 master_ctl; + irqreturn_t ret; + + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + + master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); + master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; + if (!master_ctl) + return IRQ_NONE; + + I915_WRITE_FW(GEN8_MASTER_IRQ, 0); + + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ + disable_rpm_wakeref_asserts(dev_priv); + + /* Find, clear, then process each source of interrupt */ + ret = gen8_gt_irq_handler(dev_priv, master_ctl); + ret |= gen8_de_irq_handler(dev_priv, master_ctl); + I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); POSTING_READ_FW(GEN8_MASTER_IRQ); -out: enable_rpm_wakeref_asserts(dev_priv); return ret; @@ -2949,14 +2957,44 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) ring->hangcheck.deadlock = 0; } -static enum intel_ring_hangcheck_action -ring_stuck(struct intel_engine_cs *ring, u64 acthd) +static bool subunits_stuck(struct intel_engine_cs *ring) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp; + u32 instdone[I915_NUM_INSTDONE_REG]; + bool stuck; + int i; + + if (ring->id != RCS) + return true; + + i915_get_extra_instdone(ring->dev, instdone); + + /* There might be unstable subunit states even when + * actual head is not moving. Filter out the unstable ones by + * accumulating the undone -> done transitions and only + * consider those as progress. + */ + stuck = true; + for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { + const u32 tmp = instdone[i] | ring->hangcheck.instdone[i]; + + if (tmp != ring->hangcheck.instdone[i]) + stuck = false; + + ring->hangcheck.instdone[i] |= tmp; + } + return stuck; +} + +static enum intel_ring_hangcheck_action +head_stuck(struct intel_engine_cs *ring, u64 acthd) +{ if (acthd != ring->hangcheck.acthd) { + + /* Clear subunit states on head movement */ + memset(ring->hangcheck.instdone, 0, + sizeof(ring->hangcheck.instdone)); + if (acthd > ring->hangcheck.max_acthd) { ring->hangcheck.max_acthd = acthd; return HANGCHECK_ACTIVE; @@ -2965,6 +3003,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) return HANGCHECK_ACTIVE_LOOP; } + if (!subunits_stuck(ring)) + return HANGCHECK_ACTIVE; + + return HANGCHECK_HUNG; +} + +static enum intel_ring_hangcheck_action +ring_stuck(struct intel_engine_cs *ring, u64 acthd) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_ring_hangcheck_action ha; + u32 tmp; + + ha = head_stuck(ring, acthd); + if (ha != HANGCHECK_HUNG) + return ha; + if (IS_GEN2(dev)) return HANGCHECK_HUNG; @@ -3032,6 +3088,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work) */ DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); + /* As enabling the GPU requires fairly extensive mmio access, + * periodically arm the mmio checker to see if we are triggering + * any invalid access. + */ + intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + for_each_ring(ring, dev_priv, i) { u64 acthd; u32 seqno; @@ -3106,7 +3168,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (ring->hangcheck.score > 0) ring->hangcheck.score--; + /* Clear head and subunit states on seqno movement */ ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; + + memset(ring->hangcheck.instdone, 0, + sizeof(ring->hangcheck.instdone)); } ring->hangcheck.seqno = seqno; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 835d6099c769..8d90c256520a 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -22,6 +22,7 @@ * IN THE SOFTWARE. */ +#include "i915_params.h" #include "i915_drv.h" struct i915_params i915 __read_mostly = { diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h new file mode 100644 index 000000000000..529929073120 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_params.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _I915_PARAMS_H_ +#define _I915_PARAMS_H_ + +#include <linux/cache.h> /* for __read_mostly */ + +struct i915_params { + int modeset; + int panel_ignore_lid; + int semaphores; + int lvds_channel_mode; + int panel_use_ssc; + int vbt_sdvo_panel_type; + int enable_rc6; + int enable_dc; + int enable_fbc; + int enable_ppgtt; + int enable_execlists; + int enable_psr; + unsigned int preliminary_hw_support; + int disable_power_well; + int enable_ips; + int invert_brightness; + int enable_cmd_parser; + int guc_log_level; + int use_mmio_flip; + int mmio_debug; + int edp_vswing; + /* leave bools at the end to not create holes */ + bool enable_hangcheck; + bool fastboot; + bool prefault_disable; + bool load_detect_test; + bool reset; + bool disable_display; + bool disable_vtd_wa; + bool enable_guc_submission; + bool verbose_state_checks; + bool nuclear_pageflip; +}; + +extern struct i915_params i915 __read_mostly; + +#endif + diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 007ae83a4086..0a988895165f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1711,6 +1711,11 @@ enum skl_disp_power_wells { #define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM (1<<31) +#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) +#define CLAIM_ER_CLR (1 << 31) +#define CLAIM_ER_OVERFLOW (1 << 16) +#define CLAIM_ER_CTR_MASK 0xffff + #define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ #define DERRMR_PIPEA_SCANLINE (1<<0) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 37e3f0ddf8e0..c6188dddb341 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -164,7 +164,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; @@ -200,7 +200,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; @@ -521,7 +521,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, loff_t off, size_t count) { - struct device *kdev = container_of(kobj, struct device, kobj); + struct device *kdev = kobj_to_dev(kobj); struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct i915_error_state_file_priv error_priv; @@ -556,7 +556,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - struct device *kdev = container_of(kobj, struct device, kobj); + struct device *kdev = kobj_to_dev(kobj); struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; int ret; diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index d0b1c9afa35e..4625f8a9ba12 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -308,5 +308,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) { struct intel_atomic_state *state = to_intel_atomic_state(s); drm_atomic_state_default_clear(&state->base); - state->dpll_set = false; + state->dpll_set = state->modeset = false; } diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index c6bb0fc1edfb..e0b851a0004a 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane, intel_state->clip.x1 = 0; intel_state->clip.y1 = 0; intel_state->clip.x2 = - crtc_state->base.active ? crtc_state->pipe_src_w : 0; + crtc_state->base.enable ? crtc_state->pipe_src_w : 0; intel_state->clip.y2 = - crtc_state->base.active ? crtc_state->pipe_src_h : 0; + crtc_state->base.enable ? crtc_state->pipe_src_h : 0; if (state->fb && intel_rotation_90_or_270(state->rotation)) { if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || @@ -194,8 +194,16 @@ static void intel_plane_atomic_update(struct drm_plane *plane, struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane_state *intel_state = to_intel_plane_state(plane->state); + struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + struct drm_crtc_state *crtc_state = + drm_atomic_get_existing_crtc_state(old_state->state, crtc); - intel_plane->commit_plane(plane, intel_state); + if (intel_state->visible) + intel_plane->update_plane(plane, + to_intel_crtc_state(crtc_state), + intel_state); + else + intel_plane->disable_plane(plane, crtc); } const struct drm_plane_helper_funcs intel_plane_helper_funcs = { diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index eba3e0f87181..bf62a19c8f69 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -31,11 +31,49 @@ #include "i915_drv.h" #include "intel_bios.h" +/** + * DOC: Video BIOS Table (VBT) + * + * The Video BIOS Table, or VBT, provides platform and board specific + * configuration information to the driver that is not discoverable or available + * through other means. The configuration is mostly related to display + * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in + * the PCI ROM. + * + * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB + * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that + * contain the actual configuration information. The VBT Header, and thus the + * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the + * BDB Header. The data blocks are concatenated after the BDB Header. The data + * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of + * data. (Block 53, the MIPI Sequence Block is an exception.) + * + * The driver parses the VBT during load. The relevant information is stored in + * driver private data for ease of use, and the actual VBT is not read after + * that. + */ + #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 static int panel_type; +/* Get BDB block size given a pointer to Block ID. */ +static u32 _get_blocksize(const u8 *block_base) +{ + /* The MIPI Sequence Block v3+ has a separate size field. */ + if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3) + return *((const u32 *)(block_base + 4)); + else + return *((const u16 *)(block_base + 1)); +} + +/* Get BDB block size give a pointer to data after Block ID and Block Size. */ +static u32 get_blocksize(const void *block_data) +{ + return _get_blocksize(block_data - 3); +} + static const void * find_section(const void *_bdb, int section_id) { @@ -52,14 +90,8 @@ find_section(const void *_bdb, int section_id) /* walk the sections looking for section_id */ while (index + 3 < total) { current_id = *(base + index); - index++; - - current_size = *((const u16 *)(base + index)); - index += 2; - - /* The MIPI Sequence Block v3+ has a separate size field. */ - if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3) - current_size = *((const u32 *)(base + index + 1)); + current_size = _get_blocksize(base + index); + index += 3; if (index + current_size > total) return NULL; @@ -73,16 +105,6 @@ find_section(const void *_bdb, int section_id) return NULL; } -static u16 -get_blocksize(const void *p) -{ - u16 *block_ptr, block_size; - - block_ptr = (u16 *)((char *)p - 2); - block_size = *block_ptr; - return block_size; -} - static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, const struct lvds_dvo_timing *dvo_timing) @@ -675,84 +697,13 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time; } -static u8 *goto_next_sequence(u8 *data, int *size) -{ - u16 len; - int tmp = *size; - - if (--tmp < 0) - return NULL; - - /* goto first element */ - data++; - while (1) { - switch (*data) { - case MIPI_SEQ_ELEM_SEND_PKT: - /* - * skip by this element payload size - * skip elem id, command flag and data type - */ - tmp -= 5; - if (tmp < 0) - return NULL; - - data += 3; - len = *((u16 *)data); - - tmp -= len; - if (tmp < 0) - return NULL; - - /* skip by len */ - data = data + 2 + len; - break; - case MIPI_SEQ_ELEM_DELAY: - /* skip by elem id, and delay is 4 bytes */ - tmp -= 5; - if (tmp < 0) - return NULL; - - data += 5; - break; - case MIPI_SEQ_ELEM_GPIO: - tmp -= 3; - if (tmp < 0) - return NULL; - - data += 3; - break; - default: - DRM_ERROR("Unknown element\n"); - return NULL; - } - - /* end of sequence ? */ - if (*data == 0) - break; - } - - /* goto next sequence or end of block byte */ - if (--tmp < 0) - return NULL; - - data++; - - /* update amount of data left for the sequence block to be parsed */ - *size = tmp; - return data; -} - static void -parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) +parse_mipi_config(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) { const struct bdb_mipi_config *start; - const struct bdb_mipi_sequence *sequence; const struct mipi_config *config; const struct mipi_pps_data *pps; - u8 *data; - const u8 *seq_data; - int i, panel_id, seq_size; - u16 block_size; /* parse MIPI blocks only if LFP type is MIPI */ if (!dev_priv->vbt.has_mipi) @@ -798,104 +749,233 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) /* We have mandatory mipi config blocks. Initialize as generic panel */ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; +} - /* Check if we have sequence block as well */ - sequence = find_section(bdb, BDB_MIPI_SEQUENCE); - if (!sequence) { - DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n"); - return; +/* Find the sequence block and size for the given panel. */ +static const u8 * +find_panel_sequence_block(const struct bdb_mipi_sequence *sequence, + u16 panel_id, u32 *seq_size) +{ + u32 total = get_blocksize(sequence); + const u8 *data = &sequence->data[0]; + u8 current_id; + u32 current_size; + int header_size = sequence->version >= 3 ? 5 : 3; + int index = 0; + int i; + + /* skip new block size */ + if (sequence->version >= 3) + data += 4; + + for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) { + if (index + header_size > total) { + DRM_ERROR("Invalid sequence block (header)\n"); + return NULL; + } + + current_id = *(data + index); + if (sequence->version >= 3) + current_size = *((const u32 *)(data + index + 1)); + else + current_size = *((const u16 *)(data + index + 1)); + + index += header_size; + + if (index + current_size > total) { + DRM_ERROR("Invalid sequence block\n"); + return NULL; + } + + if (current_id == panel_id) { + *seq_size = current_size; + return data + index; + } + + index += current_size; } - /* Fail gracefully for forward incompatible sequence block. */ - if (sequence->version >= 3) { - DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n"); - return; + DRM_ERROR("Sequence block detected but no valid configuration\n"); + + return NULL; +} + +static int goto_next_sequence(const u8 *data, int index, int total) +{ + u16 len; + + /* Skip Sequence Byte. */ + for (index = index + 1; index < total; index += len) { + u8 operation_byte = *(data + index); + index++; + + switch (operation_byte) { + case MIPI_SEQ_ELEM_END: + return index; + case MIPI_SEQ_ELEM_SEND_PKT: + if (index + 4 > total) + return 0; + + len = *((const u16 *)(data + index + 2)) + 4; + break; + case MIPI_SEQ_ELEM_DELAY: + len = 4; + break; + case MIPI_SEQ_ELEM_GPIO: + len = 2; + break; + case MIPI_SEQ_ELEM_I2C: + if (index + 7 > total) + return 0; + len = *(data + index + 6) + 7; + break; + default: + DRM_ERROR("Unknown operation byte\n"); + return 0; + } } - DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); + return 0; +} - block_size = get_blocksize(sequence); +static int goto_next_sequence_v3(const u8 *data, int index, int total) +{ + int seq_end; + u16 len; + u32 size_of_sequence; /* - * parse the sequence block for individual sequences + * Could skip sequence based on Size of Sequence alone, but also do some + * checking on the structure. */ - dev_priv->vbt.dsi.seq_version = sequence->version; + if (total < 5) { + DRM_ERROR("Too small sequence size\n"); + return 0; + } - seq_data = &sequence->data[0]; + /* Skip Sequence Byte. */ + index++; /* - * sequence block is variable length and hence we need to parse and - * get the sequence data for specific panel id + * Size of Sequence. Excludes the Sequence Byte and the size itself, + * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END + * byte. */ - for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) { - panel_id = *seq_data; - seq_size = *((u16 *) (seq_data + 1)); - if (panel_id == panel_type) - break; + size_of_sequence = *((const uint32_t *)(data + index)); + index += 4; - /* skip the sequence including seq header of 3 bytes */ - seq_data = seq_data + 3 + seq_size; - if ((seq_data - &sequence->data[0]) > block_size) { - DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n"); - return; + seq_end = index + size_of_sequence; + if (seq_end > total) { + DRM_ERROR("Invalid sequence size\n"); + return 0; + } + + for (; index < total; index += len) { + u8 operation_byte = *(data + index); + index++; + + if (operation_byte == MIPI_SEQ_ELEM_END) { + if (index != seq_end) { + DRM_ERROR("Invalid element structure\n"); + return 0; + } + return index; + } + + len = *(data + index); + index++; + + /* + * FIXME: Would be nice to check elements like for v1/v2 in + * goto_next_sequence() above. + */ + switch (operation_byte) { + case MIPI_SEQ_ELEM_SEND_PKT: + case MIPI_SEQ_ELEM_DELAY: + case MIPI_SEQ_ELEM_GPIO: + case MIPI_SEQ_ELEM_I2C: + case MIPI_SEQ_ELEM_SPI: + case MIPI_SEQ_ELEM_PMIC: + break; + default: + DRM_ERROR("Unknown operation byte %u\n", + operation_byte); + break; } } - if (i == MAX_MIPI_CONFIGURATIONS) { - DRM_ERROR("Sequence block detected but no valid configuration\n"); + return 0; +} + +static void +parse_mipi_sequence(struct drm_i915_private *dev_priv, + const struct bdb_header *bdb) +{ + const struct bdb_mipi_sequence *sequence; + const u8 *seq_data; + u32 seq_size; + u8 *data; + int index = 0; + + /* Only our generic panel driver uses the sequence block. */ + if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) + return; + + sequence = find_section(bdb, BDB_MIPI_SEQUENCE); + if (!sequence) { + DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n"); return; } - /* check if found sequence is completely within the sequence block - * just being paranoid */ - if (seq_size > block_size) { - DRM_ERROR("Corrupted sequence/size, bailing out\n"); + /* Fail gracefully for forward incompatible sequence block. */ + if (sequence->version >= 4) { + DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n", + sequence->version); return; } - /* skip the panel id(1 byte) and seq size(2 bytes) */ - dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL); - if (!dev_priv->vbt.dsi.data) + DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version); + + seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size); + if (!seq_data) return; - /* - * loop into the sequence data and split into multiple sequneces - * There are only 5 types of sequences as of now - */ - data = dev_priv->vbt.dsi.data; - dev_priv->vbt.dsi.size = seq_size; + data = kmemdup(seq_data, seq_size, GFP_KERNEL); + if (!data) + return; - /* two consecutive 0x00 indicate end of all sequences */ - while (1) { - int seq_id = *data; - if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) { - dev_priv->vbt.dsi.sequence[seq_id] = data; - DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id); - } else { - DRM_ERROR("undefined sequence\n"); + /* Parse the sequences, store pointers to each sequence. */ + for (;;) { + u8 seq_id = *(data + index); + if (seq_id == MIPI_SEQ_END) + break; + + if (seq_id >= MIPI_SEQ_MAX) { + DRM_ERROR("Unknown sequence %u\n", seq_id); goto err; } - /* partial parsing to skip elements */ - data = goto_next_sequence(data, &seq_size); + dev_priv->vbt.dsi.sequence[seq_id] = data + index; - if (data == NULL) { - DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n"); + if (sequence->version >= 3) + index = goto_next_sequence_v3(data, index, seq_size); + else + index = goto_next_sequence(data, index, seq_size); + if (!index) { + DRM_ERROR("Invalid sequence %u\n", seq_id); goto err; } - - if (*data == 0) - break; /* end of sequence reached */ } - DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n"); + dev_priv->vbt.dsi.data = data; + dev_priv->vbt.dsi.size = seq_size; + dev_priv->vbt.dsi.seq_version = sequence->version; + + DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); return; -err: - kfree(dev_priv->vbt.dsi.data); - dev_priv->vbt.dsi.data = NULL; - /* error during parsing so set all pointers to null - * because of partial parsing */ +err: + kfree(data); memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence)); } @@ -1088,7 +1168,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } - if (bdb->version < 195) { + if (bdb->version < 106) { + expected_size = 22; + } else if (bdb->version < 109) { + expected_size = 27; + } else if (bdb->version < 195) { + BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); expected_size = sizeof(struct old_child_dev_config); } else if (bdb->version == 195) { expected_size = 37; @@ -1101,18 +1186,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv, bdb->version, expected_size); } - /* The legacy sized child device config is the minimum we need. */ - if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { - DRM_ERROR("Child device config size %u is too small.\n", - p_defs->child_dev_size); - return; - } - /* Flag an error for unexpected size, but continue anyway. */ if (p_defs->child_dev_size != expected_size) DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n", p_defs->child_dev_size, expected_size, bdb->version); + /* The legacy sized child device config is the minimum we need. */ + if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { + DRM_DEBUG_KMS("Child device config size %u is too small.\n", + p_defs->child_dev_size); + return; + } + /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ @@ -1285,7 +1370,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size) /** * intel_bios_init - find VBT and initialize settings from the BIOS - * @dev: DRM device + * @dev_priv: i915 device instance * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. @@ -1337,7 +1422,8 @@ intel_bios_init(struct drm_i915_private *dev_priv) parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); parse_psr(dev_priv, bdb); - parse_mipi(dev_priv, bdb); + parse_mipi_config(dev_priv, bdb); + parse_mipi_sequence(dev_priv, bdb); parse_ddi_ports(dev_priv, bdb); if (bios) diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 54eac1003a1e..350d4e0f75a4 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -25,25 +25,43 @@ * */ -#ifndef _I830_BIOS_H_ -#define _I830_BIOS_H_ - +#ifndef _INTEL_BIOS_H_ +#define _INTEL_BIOS_H_ + +/** + * struct vbt_header - VBT Header structure + * @signature: VBT signature, always starts with "$VBT" + * @version: Version of this structure + * @header_size: Size of this structure + * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks) + * @vbt_checksum: Checksum + * @reserved0: Reserved + * @bdb_offset: Offset of &struct bdb_header from beginning of VBT + * @aim_offset: Offsets of add-in data blocks from beginning of VBT + */ struct vbt_header { - u8 signature[20]; /**< Always starts with 'VBT$' */ - u16 version; /**< decimal */ - u16 header_size; /**< in bytes */ - u16 vbt_size; /**< in bytes */ + u8 signature[20]; + u16 version; + u16 header_size; + u16 vbt_size; u8 vbt_checksum; u8 reserved0; - u32 bdb_offset; /**< from beginning of VBT */ - u32 aim_offset[4]; /**< from beginning of VBT */ + u32 bdb_offset; + u32 aim_offset[4]; } __packed; +/** + * struct bdb_header - BDB Header structure + * @signature: BDB signature "BIOS_DATA_BLOCK" + * @version: Version of the data block definitions + * @header_size: Size of this structure + * @bdb_size: Size of BDB (BDB Header and data blocks) + */ struct bdb_header { - u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */ - u16 version; /**< decimal */ - u16 header_size; /**< in bytes */ - u16 bdb_size; /**< in bytes */ + u8 signature[16]; + u16 version; + u16 header_size; + u16 bdb_size; } __packed; /* strictly speaking, this is a "skip" block, but it has interesting info */ @@ -936,21 +954,29 @@ struct bdb_mipi_sequence { /* MIPI Sequnece Block definitions */ enum mipi_seq { - MIPI_SEQ_UNDEFINED = 0, + MIPI_SEQ_END = 0, MIPI_SEQ_ASSERT_RESET, MIPI_SEQ_INIT_OTP, MIPI_SEQ_DISPLAY_ON, MIPI_SEQ_DISPLAY_OFF, MIPI_SEQ_DEASSERT_RESET, + MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ + MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ + MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ + MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */ + MIPI_SEQ_POWER_ON, /* sequence block v3+ */ + MIPI_SEQ_POWER_OFF, /* sequence block v3+ */ MIPI_SEQ_MAX }; enum mipi_seq_element { - MIPI_SEQ_ELEM_UNDEFINED = 0, + MIPI_SEQ_ELEM_END = 0, MIPI_SEQ_ELEM_SEND_PKT, MIPI_SEQ_ELEM_DELAY, MIPI_SEQ_ELEM_GPIO, - MIPI_SEQ_ELEM_STATUS, + MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */ + MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */ + MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */ MIPI_SEQ_ELEM_MAX }; @@ -965,4 +991,4 @@ enum mipi_gpio_pin_index { MIPI_GPIO_MAX }; -#endif /* _I830_BIOS_H_ */ +#endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 9bb63a85997a..5c2f9a40c81b 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -44,6 +44,8 @@ #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" +#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" + MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_BXT); @@ -278,10 +280,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; - if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) { + if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && + csr->version < SKL_CSR_VERSION_REQUIRED) { DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u," " please upgrade to v%u.%u or later" - " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n", + " [" FIRMWARE_URL "].\n", CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version), CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED), @@ -399,7 +402,10 @@ out: CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); } else { - DRM_ERROR("Failed to load DMC firmware, disabling rpm\n"); + dev_notice(dev_priv->dev->dev, + "Failed to load DMC firmware" + " [" FIRMWARE_URL "]," + " disabling runtime power management.\n"); } release_firmware(fw); @@ -421,7 +427,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (!HAS_CSR(dev_priv)) return; - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e6408e5583d7..1f9a3687b540 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -133,38 +133,38 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { { 0x00002016, 0x000000A0, 0x0 }, { 0x00005012, 0x0000009B, 0x0 }, { 0x00007011, 0x00000088, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80009010, 0x000000C0, 0x1 }, { 0x00002016, 0x0000009B, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80007011, 0x000000C0, 0x1 }, { 0x00002016, 0x000000DF, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, }; /* Skylake U */ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80007011, 0x000000CD, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, { 0x0000201B, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ - { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, + { 0x80007011, 0x000000C0, 0x1 }, { 0x00002016, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */ + { 0x80005012, 0x000000C0, 0x1 }, }; /* Skylake Y */ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x00007011, 0x00000087, 0x0 }, - { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80007011, 0x000000CD, 0x0 }, + { 0x80009010, 0x000000C0, 0x3 }, { 0x00000018, 0x0000009D, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ - { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, { 0x00000018, 0x00000088, 0x0 }, - { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */ + { 0x80005012, 0x000000C0, 0x3 }, }; /* @@ -226,26 +226,26 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { { 0x00000018, 0x000000A1, 0x0 }, { 0x00000018, 0x00000098, 0x0 }, { 0x00004013, 0x00000088, 0x0 }, - { 0x00006012, 0x00000087, 0x0 }, + { 0x80006012, 0x000000CD, 0x1 }, { 0x00000018, 0x000000DF, 0x0 }, - { 0x00003015, 0x00000087, 0x0 }, /* Default */ - { 0x00003015, 0x000000C7, 0x0 }, - { 0x00000018, 0x000000C7, 0x0 }, + { 0x80003015, 0x000000CD, 0x1 }, /* Default */ + { 0x80003015, 0x000000C0, 0x1 }, + { 0x80000018, 0x000000C0, 0x1 }, }; /* Skylake Y */ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { { 0x00000018, 0x000000A1, 0x0 }, { 0x00005012, 0x000000DF, 0x0 }, - { 0x00007011, 0x00000084, 0x0 }, + { 0x80007011, 0x000000CB, 0x3 }, { 0x00000018, 0x000000A4, 0x0 }, { 0x00000018, 0x0000009D, 0x0 }, { 0x00004013, 0x00000080, 0x0 }, - { 0x00006013, 0x000000C7, 0x0 }, + { 0x80006013, 0x000000C0, 0x3 }, { 0x00000018, 0x0000008A, 0x0 }, - { 0x00003015, 0x000000C7, 0x0 }, /* Default */ - { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */ - { 0x00000018, 0x000000C7, 0x0 }, + { 0x80003015, 0x000000C0, 0x3 }, /* Default */ + { 0x80003015, 0x000000C0, 0x3 }, + { 0x80000018, 0x000000C0, 0x3 }, }; struct bxt_ddi_buf_trans { @@ -301,8 +301,8 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */ }; -static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, - enum port port, int type); +static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type); static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, struct intel_digital_port **dig_port, @@ -342,81 +342,50 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) return port; } -static bool -intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) -{ - return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg); -} - -static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, - int *n_entries) +static const struct ddi_buf_trans * +skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) { - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - ddi_translations = skl_y_ddi_translations_dp; + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { - ddi_translations = skl_u_ddi_translations_dp; + return skl_y_ddi_translations_dp; + } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); + return skl_u_ddi_translations_dp; } else { - ddi_translations = skl_ddi_translations_dp; *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_dp; } - - return ddi_translations; } -static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev, - int *n_entries) +static const struct ddi_buf_trans * +skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { - struct drm_i915_private *dev_priv = dev->dev_private; - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_y_ddi_translations_edp; + if (dev_priv->edp_low_vswing) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); - } else { - ddi_translations = skl_y_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); - } - } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_u_ddi_translations_edp; + return skl_y_ddi_translations_edp; + } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); + return skl_u_ddi_translations_edp; } else { - ddi_translations = skl_u_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); - } - } else { - if (dev_priv->edp_low_vswing) { - ddi_translations = skl_ddi_translations_edp; *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); - } else { - ddi_translations = skl_ddi_translations_dp; - *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_edp; } } - return ddi_translations; + return skl_get_buf_trans_dp(dev_priv, n_entries); } static const struct ddi_buf_trans * -skl_get_buf_trans_hdmi(struct drm_device *dev, - int *n_entries) +skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) { - const struct ddi_buf_trans *ddi_translations; - - if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) { - ddi_translations = skl_y_ddi_translations_hdmi; + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); + return skl_y_ddi_translations_hdmi; } else { - ddi_translations = skl_ddi_translations_hdmi; *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); + return skl_ddi_translations_hdmi; } - - return ddi_translations; } /* @@ -426,42 +395,52 @@ skl_get_buf_trans_hdmi(struct drm_device *dev, * in either FDI or DP modes only, as HDMI connections will work with both * of those */ -static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, - bool supports_hdmi) +void intel_prepare_ddi_buffer(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, size; - int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + int hdmi_level; + enum port port; const struct ddi_buf_trans *ddi_translations_fdi; const struct ddi_buf_trans *ddi_translations_dp; const struct ddi_buf_trans *ddi_translations_edp; const struct ddi_buf_trans *ddi_translations_hdmi; const struct ddi_buf_trans *ddi_translations; - if (IS_BROXTON(dev)) { - if (!supports_hdmi) + port = intel_ddi_get_encoder_port(encoder); + hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + + if (IS_BROXTON(dev_priv)) { + if (encoder->type != INTEL_OUTPUT_HDMI) return; /* Vswing programming for HDMI */ - bxt_ddi_vswing_sequence(dev, hdmi_level, port, + bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port, INTEL_OUTPUT_HDMI); return; - } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + } + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ddi_translations_fdi = NULL; ddi_translations_dp = - skl_get_buf_trans_dp(dev, &n_dp_entries); + skl_get_buf_trans_dp(dev_priv, &n_dp_entries); ddi_translations_edp = - skl_get_buf_trans_edp(dev, &n_edp_entries); + skl_get_buf_trans_edp(dev_priv, &n_edp_entries); ddi_translations_hdmi = - skl_get_buf_trans_hdmi(dev, &n_hdmi_entries); + skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); hdmi_default_entry = 8; /* If we're boosting the current, set bit 31 of trans1 */ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || dev_priv->vbt.ddi_port_info[port].dp_boost_level) iboost_bit = 1<<31; - } else if (IS_BROADWELL(dev)) { + + if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP && + port != PORT_A && port != PORT_E && + n_edp_entries > 9)) + n_edp_entries = 9; + } else if (IS_BROADWELL(dev_priv)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_edp = bdw_ddi_translations_edp; @@ -470,7 +449,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); hdmi_default_entry = 7; - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev_priv)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp; @@ -490,30 +469,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, hdmi_default_entry = 7; } - switch (port) { - case PORT_A: + switch (encoder->type) { + case INTEL_OUTPUT_EDP: ddi_translations = ddi_translations_edp; size = n_edp_entries; break; - case PORT_B: - case PORT_C: + case INTEL_OUTPUT_DISPLAYPORT: + case INTEL_OUTPUT_HDMI: ddi_translations = ddi_translations_dp; size = n_dp_entries; break; - case PORT_D: - if (intel_dp_is_edp(dev, PORT_D)) { - ddi_translations = ddi_translations_edp; - size = n_edp_entries; - } else { - ddi_translations = ddi_translations_dp; - size = n_dp_entries; - } - break; - case PORT_E: - if (ddi_translations_fdi) - ddi_translations = ddi_translations_fdi; - else - ddi_translations = ddi_translations_dp; + case INTEL_OUTPUT_ANALOG: + ddi_translations = ddi_translations_fdi; size = n_dp_entries; break; default: @@ -527,7 +494,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ddi_translations[i].trans2); } - if (!supports_hdmi) + if (encoder->type != INTEL_OUTPUT_HDMI) return; /* Choose a good default if VBT is badly populated */ @@ -542,37 +509,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ddi_translations_hdmi[hdmi_level].trans2); } -/* Program DDI buffers translations for DP. By default, program ports A-D in DP - * mode and port E for FDI. - */ -void intel_prepare_ddi(struct drm_device *dev) -{ - struct intel_encoder *intel_encoder; - bool visited[I915_MAX_PORTS] = { 0, }; - - if (!HAS_DDI(dev)) - return; - - for_each_intel_encoder(dev, intel_encoder) { - struct intel_digital_port *intel_dig_port; - enum port port; - bool supports_hdmi; - - if (intel_encoder->type == INTEL_OUTPUT_DSI) - continue; - - ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port); - if (visited[port]) - continue; - - supports_hdmi = intel_dig_port && - intel_dig_port_supports_hdmi(intel_dig_port); - - intel_prepare_ddi_buffers(dev, port, supports_hdmi); - visited[port] = true; - } -} - static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port) { @@ -601,8 +537,14 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; u32 temp, i, rx_ctl_val; + for_each_encoder_on_crtc(dev, crtc, encoder) { + WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG); + intel_prepare_ddi_buffer(encoder); + } + /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the * mode set "sequence for CRT port" document: * - TP1 to TP2 time with the default value @@ -2085,10 +2027,9 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) TRANS_CLK_SEL_DISABLED); } -static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, - enum port port, int type) +static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct ddi_buf_trans *ddi_translations; uint8_t iboost; uint8_t dp_iboost, hdmi_iboost; @@ -2103,21 +2044,26 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, if (dp_iboost) { iboost = dp_iboost; } else { - ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); + ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries); iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_EDP) { if (dp_iboost) { iboost = dp_iboost; } else { - ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); + ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries); + + if (WARN_ON(port != PORT_A && + port != PORT_E && n_entries > 9)) + n_entries = 9; + iboost = ddi_translations[level].i_boost; } } else if (type == INTEL_OUTPUT_HDMI) { if (hdmi_iboost) { iboost = hdmi_iboost; } else { - ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); + ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries); iboost = ddi_translations[level].i_boost; } } else { @@ -2142,10 +2088,9 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); } -static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level, - enum port port, int type) +static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type) { - struct drm_i915_private *dev_priv = dev->dev_private; const struct bxt_ddi_buf_trans *ddi_translations; u32 n_entries, i; uint32_t val; @@ -2260,7 +2205,7 @@ static uint32_t translate_signal_level(int signal_levels) uint32_t ddi_signal_levels(struct intel_dp *intel_dp) { struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = dport->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); struct intel_encoder *encoder = &dport->base; uint8_t train_set = intel_dp->train_set[0]; int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | @@ -2270,10 +2215,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) level = translate_signal_level(signal_levels); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - skl_ddi_set_iboost(dev, level, port, encoder->type); - else if (IS_BROXTON(dev)) - bxt_ddi_vswing_sequence(dev, level, port, encoder->type); + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_ddi_set_iboost(dev_priv, level, port, encoder->type); + else if (IS_BROXTON(dev_priv)) + bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); return DDI_BUF_TRANS_SELECT(level); } @@ -2325,12 +2270,12 @@ void intel_ddi_clk_select(struct intel_encoder *encoder, static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); enum port port = intel_ddi_get_encoder_port(intel_encoder); int type = intel_encoder->type; - int hdmi_level; + + intel_prepare_ddi_buffer(intel_encoder); if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2348,17 +2293,11 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); - if (port != PORT_A || INTEL_INFO(dev)->gen >= 9) + if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9) intel_dp_stop_link_train(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - if (IS_BROXTON(dev)) { - hdmi_level = dev_priv->vbt. - ddi_port_info[port].hdmi_level_shift; - bxt_ddi_vswing_sequence(dev, hdmi_level, port, - INTEL_OUTPUT_HDMI); - } intel_hdmi->set_infoframes(encoder, crtc->config->has_hdmi_sink, &crtc->config->base.adjusted_mode); @@ -3282,6 +3221,33 @@ void intel_ddi_init(struct drm_device *dev, enum port port) struct intel_encoder *intel_encoder; struct drm_encoder *encoder; bool init_hdmi, init_dp; + int max_lanes; + + if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) { + switch (port) { + case PORT_A: + max_lanes = 4; + break; + case PORT_E: + max_lanes = 0; + break; + default: + max_lanes = 4; + break; + } + } else { + switch (port) { + case PORT_A: + max_lanes = 2; + break; + case PORT_E: + max_lanes = 2; + break; + default: + max_lanes = 4; + break; + } + } init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); @@ -3315,6 +3281,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); + intel_dig_port->max_lanes = max_lanes; /* * Bspec says that DDI_A_4_LANES is the only supported configuration diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8816ba1d6d53..7a5ed95f2cd9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -85,8 +85,6 @@ static const uint32_t intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); - static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); static void ironlake_pch_clock_get(struct intel_crtc *crtc, @@ -1152,11 +1150,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) } } -static const char *state_string(bool enabled) -{ - return enabled ? "on" : "off"; -} - /* Only for pre-ILK configs */ void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1168,7 +1161,7 @@ void assert_pll(struct drm_i915_private *dev_priv, cur_state = !!(val & DPLL_VCO_ENABLE); I915_STATE_WARN(cur_state != state, "PLL state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } /* XXX: the dsi pll is shared between MIPI DSI ports */ @@ -1184,7 +1177,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) cur_state = val & DSI_PLL_VCO_EN; I915_STATE_WARN(cur_state != state, "DSI PLL state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) @@ -1208,14 +1201,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool cur_state; struct intel_dpll_hw_state hw_state; - if (WARN (!pll, - "asserting DPLL %s with no DPLL\n", state_string(state))) + if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) return; cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); I915_STATE_WARN(cur_state != state, "%s assertion failure (expected %s, current %s)\n", - pll->name, state_string(state), state_string(cur_state)); + pll->name, onoff(state), onoff(cur_state)); } static void assert_fdi_tx(struct drm_i915_private *dev_priv, @@ -1235,7 +1227,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, } I915_STATE_WARN(cur_state != state, "FDI TX state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) @@ -1250,7 +1242,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, cur_state = !!(val & FDI_RX_ENABLE); I915_STATE_WARN(cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) @@ -1282,7 +1274,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, cur_state = !!(val & FDI_RX_PLL_ENABLE); I915_STATE_WARN(cur_state != state, "FDI RX PLL assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } void assert_panel_unlocked(struct drm_i915_private *dev_priv, @@ -1340,7 +1332,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv, I915_STATE_WARN(cur_state != state, "cursor on pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), state_string(state), state_string(cur_state)); + pipe_name(pipe), onoff(state), onoff(cur_state)); } #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) @@ -1367,7 +1359,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, I915_STATE_WARN(cur_state != state, "pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), state_string(state), state_string(cur_state)); + pipe_name(pipe), onoff(state), onoff(cur_state)); } static void assert_plane(struct drm_i915_private *dev_priv, @@ -1380,7 +1372,7 @@ static void assert_plane(struct drm_i915_private *dev_priv, cur_state = !!(val & DISPLAY_PLANE_ENABLE); I915_STATE_WARN(cur_state != state, "plane %c assertion failure (expected %s, current %s)\n", - plane_name(plane), state_string(state), state_string(cur_state)); + plane_name(plane), onoff(state), onoff(cur_state)); } #define assert_plane_enabled(d, p) assert_plane(d, p, true) @@ -2153,6 +2145,17 @@ static void intel_enable_pipe(struct intel_crtc *crtc) I915_WRITE(reg, val | PIPECONF_ENABLE); POSTING_READ(reg); + + /* + * Until the pipe starts DSL will read as 0, which would cause + * an apparent vblank timestamp jump, which messes up also the + * frame count when it's derived from the timestamps. So let's + * wait for the pipe to start properly before we call + * drm_crtc_vblank_on() + */ + if (dev->max_vblank_count == 0 && + wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) + DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); } /** @@ -2214,67 +2217,75 @@ static bool need_vtd_wa(struct drm_device *dev) return false; } -unsigned int -intel_tile_height(struct drm_device *dev, uint32_t pixel_format, - uint64_t fb_format_modifier, unsigned int plane) +static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) { - unsigned int tile_height; - uint32_t pixel_bytes; + return IS_GEN2(dev_priv) ? 2048 : 4096; +} - switch (fb_format_modifier) { +static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, unsigned int cpp) +{ + switch (fb_modifier) { case DRM_FORMAT_MOD_NONE: - tile_height = 1; - break; + return cpp; case I915_FORMAT_MOD_X_TILED: - tile_height = IS_GEN2(dev) ? 16 : 8; - break; + if (IS_GEN2(dev_priv)) + return 128; + else + return 512; case I915_FORMAT_MOD_Y_TILED: - tile_height = 32; - break; + if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) + return 128; + else + return 512; case I915_FORMAT_MOD_Yf_TILED: - pixel_bytes = drm_format_plane_cpp(pixel_format, plane); - switch (pixel_bytes) { - default: + switch (cpp) { case 1: - tile_height = 64; - break; + return 64; case 2: case 4: - tile_height = 32; - break; + return 128; case 8: - tile_height = 16; - break; case 16: - WARN_ONCE(1, - "128-bit pixels are not supported for display!"); - tile_height = 16; - break; + return 256; + default: + MISSING_CASE(cpp); + return cpp; } break; default: - MISSING_CASE(fb_format_modifier); - tile_height = 1; - break; + MISSING_CASE(fb_modifier); + return cpp; } +} - return tile_height; +unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, unsigned int cpp) +{ + if (fb_modifier == DRM_FORMAT_MOD_NONE) + return 1; + else + return intel_tile_size(dev_priv) / + intel_tile_width(dev_priv, fb_modifier, cpp); } unsigned int intel_fb_align_height(struct drm_device *dev, unsigned int height, - uint32_t pixel_format, uint64_t fb_format_modifier) + uint32_t pixel_format, uint64_t fb_modifier) { - return ALIGN(height, intel_tile_height(dev, pixel_format, - fb_format_modifier, 0)); + unsigned int cpp = drm_format_plane_cpp(pixel_format, 0); + unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp); + + return ALIGN(height, tile_height); } static void intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, const struct drm_plane_state *plane_state) { + struct drm_i915_private *dev_priv = to_i915(fb->dev); struct intel_rotation_info *info = &view->params.rotation_info; - unsigned int tile_height, tile_pitch; + unsigned int tile_size, tile_width, tile_height, cpp; *view = i915_ggtt_view_normal; @@ -2292,26 +2303,28 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, info->uv_offset = fb->offsets[1]; info->fb_modifier = fb->modifier[0]; - tile_height = intel_tile_height(fb->dev, fb->pixel_format, - fb->modifier[0], 0); - tile_pitch = PAGE_SIZE / tile_height; - info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch); + tile_size = intel_tile_size(dev_priv); + + cpp = drm_format_plane_cpp(fb->pixel_format, 0); + tile_width = intel_tile_width(dev_priv, cpp, fb->modifier[0]); + tile_height = tile_size / tile_width; + + info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width); info->height_pages = DIV_ROUND_UP(fb->height, tile_height); - info->size = info->width_pages * info->height_pages * PAGE_SIZE; + info->size = info->width_pages * info->height_pages * tile_size; if (info->pixel_format == DRM_FORMAT_NV12) { - tile_height = intel_tile_height(fb->dev, fb->pixel_format, - fb->modifier[0], 1); - tile_pitch = PAGE_SIZE / tile_height; - info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch); - info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, - tile_height); - info->size_uv = info->width_pages_uv * info->height_pages_uv * - PAGE_SIZE; + cpp = drm_format_plane_cpp(fb->pixel_format, 1); + tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp); + tile_height = tile_size / tile_width; + + info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width); + info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height); + info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size; } } -static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) +static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) { if (INTEL_INFO(dev_priv)->gen >= 9) return 256 * 1024; @@ -2324,6 +2337,25 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) return 0; } +static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier) +{ + switch (fb_modifier) { + case DRM_FORMAT_MOD_NONE: + return intel_linear_alignment(dev_priv); + case I915_FORMAT_MOD_X_TILED: + if (INTEL_INFO(dev_priv)->gen >= 9) + return 256 * 1024; + return 0; + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + return 1 * 1024 * 1024; + default: + MISSING_CASE(fb_modifier); + return 0; + } +} + int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, @@ -2338,29 +2370,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - switch (fb->modifier[0]) { - case DRM_FORMAT_MOD_NONE: - alignment = intel_linear_alignment(dev_priv); - break; - case I915_FORMAT_MOD_X_TILED: - if (INTEL_INFO(dev)->gen >= 9) - alignment = 256 * 1024; - else { - /* pin() will align the object as required by fence */ - alignment = 0; - } - break; - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - if (WARN_ONCE(INTEL_INFO(dev)->gen < 9, - "Y tiling bo slipped through, driver bug!\n")) - return -EINVAL; - alignment = 1 * 1024 * 1024; - break; - default: - MISSING_CASE(fb->modifier[0]); - return -EINVAL; - } + alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); intel_fill_fb_ggtt_view(&view, fb, plane_state); @@ -2438,22 +2448,27 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb, /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel * is assumed to be a power-of-two. */ -unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - unsigned int tiling_mode, - unsigned int cpp, - unsigned int pitch) -{ - if (tiling_mode != I915_TILING_NONE) { +unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv, + int *x, int *y, + uint64_t fb_modifier, + unsigned int cpp, + unsigned int pitch) +{ + if (fb_modifier != DRM_FORMAT_MOD_NONE) { + unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles; - tile_rows = *y / 8; - *y %= 8; + tile_size = intel_tile_size(dev_priv); + tile_width = intel_tile_width(dev_priv, fb_modifier, cpp); + tile_height = tile_size / tile_width; - tiles = *x / (512/cpp); - *x %= 512/cpp; + tile_rows = *y / tile_height; + *y %= tile_height; - return tile_rows * pitch * 8 + tiles * 4096; + tiles = *x / (tile_width/cpp); + *x %= tile_width/cpp; + + return tile_rows * pitch * tile_height + tiles * tile_size; } else { unsigned int alignment = intel_linear_alignment(dev_priv) - 1; unsigned int offset; @@ -2598,6 +2613,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct drm_plane_state *plane_state = primary->state; struct drm_crtc_state *crtc_state = intel_crtc->base.state; struct intel_plane *intel_plane = to_intel_plane(primary); + struct intel_plane_state *intel_state = + to_intel_plane_state(plane_state); struct drm_framebuffer *fb; if (!plane_config->fb) @@ -2659,6 +2676,15 @@ valid_fb: plane_state->crtc_w = fb->width; plane_state->crtc_h = fb->height; + intel_state->src.x1 = plane_state->src_x; + intel_state->src.y1 = plane_state->src_y; + intel_state->src.x2 = plane_state->src_x + plane_state->src_w; + intel_state->src.y2 = plane_state->src_y + plane_state->src_h; + intel_state->dst.x1 = plane_state->crtc_x; + intel_state->dst.y1 = plane_state->crtc_y; + intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w; + intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; + obj = intel_fb_obj(fb); if (obj->tiling_mode != I915_TILING_NONE) dev_priv->preserve_bios_swizzle = true; @@ -2670,36 +2696,23 @@ valid_fb: obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; } -static void i9xx_update_primary_plane(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y) +static void i9xx_update_primary_plane(struct drm_plane *primary, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = primary->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_plane *primary = crtc->primary; - bool visible = to_intel_plane_state(primary->state)->visible; - struct drm_i915_gem_object *obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; unsigned long linear_offset; + int x = plane_state->src.x1 >> 16; + int y = plane_state->src.y1 >> 16; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); int pixel_size; - if (!visible || !fb) { - I915_WRITE(reg, 0); - if (INTEL_INFO(dev)->gen >= 4) - I915_WRITE(DSPSURF(plane), 0); - else - I915_WRITE(DSPADDR(plane), 0); - POSTING_READ(reg); - return; - } - - obj = intel_fb_obj(fb); - if (WARN_ON(obj == NULL)) - return; - pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -2714,13 +2727,13 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, * which should always be the user's requested size. */ I915_WRITE(DSPSIZE(plane), - ((intel_crtc->config->pipe_src_h - 1) << 16) | - (intel_crtc->config->pipe_src_w - 1)); + ((crtc_state->pipe_src_h - 1) << 16) | + (crtc_state->pipe_src_w - 1)); I915_WRITE(DSPPOS(plane), 0); } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { I915_WRITE(PRIMSIZE(plane), - ((intel_crtc->config->pipe_src_h - 1) << 16) | - (intel_crtc->config->pipe_src_w - 1)); + ((crtc_state->pipe_src_h - 1) << 16) | + (crtc_state->pipe_src_w - 1)); I915_WRITE(PRIMPOS(plane), 0); I915_WRITE(PRIMCNSTALPHA(plane), 0); } @@ -2762,26 +2775,26 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, if (INTEL_INFO(dev)->gen >= 4) { intel_crtc->dspaddr_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, - fb->pitches[0]); + intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], + pixel_size, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; } else { intel_crtc->dspaddr_offset = linear_offset; } - if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { dspcntr |= DISPPLANE_ROTATE_180; - x += (intel_crtc->config->pipe_src_w - 1); - y += (intel_crtc->config->pipe_src_h - 1); + x += (crtc_state->pipe_src_w - 1); + y += (crtc_state->pipe_src_h - 1); /* Finding the last pixel of the last line of the display data and adding to linear_offset*/ linear_offset += - (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + - (intel_crtc->config->pipe_src_w - 1) * pixel_size; + (crtc_state->pipe_src_h - 1) * fb->pitches[0] + + (crtc_state->pipe_src_w - 1) * pixel_size; } intel_crtc->adjusted_x = x; @@ -2800,37 +2813,40 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(reg); } -static void ironlake_update_primary_plane(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y) +static void i9xx_disable_primary_plane(struct drm_plane *primary, + struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_plane *primary = crtc->primary; - bool visible = to_intel_plane_state(primary->state)->visible; - struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; - unsigned long linear_offset; - u32 dspcntr; - i915_reg_t reg = DSPCNTR(plane); - int pixel_size; - if (!visible || !fb) { - I915_WRITE(reg, 0); + I915_WRITE(DSPCNTR(plane), 0); + if (INTEL_INFO(dev_priv)->gen >= 4) I915_WRITE(DSPSURF(plane), 0); - POSTING_READ(reg); - return; - } - - obj = intel_fb_obj(fb); - if (WARN_ON(obj == NULL)) - return; + else + I915_WRITE(DSPADDR(plane), 0); + POSTING_READ(DSPCNTR(plane)); +} - pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); +static void ironlake_update_primary_plane(struct drm_plane *primary, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_device *dev = primary->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + int plane = intel_crtc->plane; + unsigned long linear_offset; + u32 dspcntr; + i915_reg_t reg = DSPCNTR(plane); + int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + int x = plane_state->src.x1 >> 16; + int y = plane_state->src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; - dspcntr |= DISPLAY_PLANE_ENABLE; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) @@ -2867,23 +2883,23 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, linear_offset = y * fb->pitches[0] + x * pixel_size; intel_crtc->dspaddr_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, - fb->pitches[0]); + intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], + pixel_size, + fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; - if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { dspcntr |= DISPPLANE_ROTATE_180; if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { - x += (intel_crtc->config->pipe_src_w - 1); - y += (intel_crtc->config->pipe_src_h - 1); + x += (crtc_state->pipe_src_w - 1); + y += (crtc_state->pipe_src_h - 1); /* Finding the last pixel of the last line of the display data and adding to linear_offset*/ linear_offset += - (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + - (intel_crtc->config->pipe_src_w - 1) * pixel_size; + (crtc_state->pipe_src_h - 1) * fb->pitches[0] + + (crtc_state->pipe_src_w - 1) * pixel_size; } } @@ -2904,37 +2920,15 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(reg); } -u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, - uint32_t pixel_format) +u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, uint32_t pixel_format) { - u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; - - /* - * The stride is either expressed as a multiple of 64 bytes - * chunks for linear buffers or in number of tiles for tiled - * buffers. - */ - switch (fb_modifier) { - case DRM_FORMAT_MOD_NONE: - return 64; - case I915_FORMAT_MOD_X_TILED: - if (INTEL_INFO(dev)->gen == 2) - return 128; - return 512; - case I915_FORMAT_MOD_Y_TILED: - /* No need to check for old gens and Y tiling since this is - * about the display engine and those will be blocked before - * we get here. - */ - return 128; - case I915_FORMAT_MOD_Yf_TILED: - if (bits_per_pixel == 8) - return 64; - else - return 128; - default: - MISSING_CASE(fb_modifier); + if (fb_modifier == DRM_FORMAT_MOD_NONE) { return 64; + } else { + int cpp = drm_format_plane_cpp(pixel_format, 0); + + return intel_tile_width(dev_priv, fb_modifier, cpp); } } @@ -2946,7 +2940,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, struct i915_vma *vma; u64 offset; - intel_fill_fb_ggtt_view(&view, intel_plane->base.fb, + intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, intel_plane->base.state); vma = i915_gem_obj_to_ggtt_view(obj, &view); @@ -3074,36 +3068,30 @@ u32 skl_plane_ctl_rotation(unsigned int rotation) return 0; } -static void skylake_update_primary_plane(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y) +static void skylake_update_primary_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_plane *plane = crtc->primary; - bool visible = to_intel_plane_state(plane->state)->visible; - struct drm_i915_gem_object *obj; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_crtc->pipe; u32 plane_ctl, stride_div, stride; u32 tile_height, plane_offset, plane_size; - unsigned int rotation; + unsigned int rotation = plane_state->base.rotation; int x_offset, y_offset; u32 surf_addr; - struct intel_crtc_state *crtc_state = intel_crtc->config; - struct intel_plane_state *plane_state; - int src_x = 0, src_y = 0, src_w = 0, src_h = 0; - int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0; - int scaler_id = -1; - - plane_state = to_intel_plane_state(plane->state); - - if (!visible || !fb) { - I915_WRITE(PLANE_CTL(pipe, 0), 0); - I915_WRITE(PLANE_SURF(pipe, 0), 0); - POSTING_READ(PLANE_CTL(pipe, 0)); - return; - } + int scaler_id = plane_state->scaler_id; + int src_x = plane_state->src.x1 >> 16; + int src_y = plane_state->src.y1 >> 16; + int src_w = drm_rect_width(&plane_state->src) >> 16; + int src_h = drm_rect_height(&plane_state->src) >> 16; + int dst_x = plane_state->dst.x1; + int dst_y = plane_state->dst.y1; + int dst_w = drm_rect_width(&plane_state->dst); + int dst_h = drm_rect_height(&plane_state->dst); plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -3112,41 +3100,27 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, plane_ctl |= skl_plane_ctl_format(fb->pixel_format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; - - rotation = plane->state->rotation; plane_ctl |= skl_plane_ctl_rotation(rotation); - obj = intel_fb_obj(fb); - stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], + stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], fb->pixel_format); surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); WARN_ON(drm_rect_width(&plane_state->src) == 0); - scaler_id = plane_state->scaler_id; - src_x = plane_state->src.x1 >> 16; - src_y = plane_state->src.y1 >> 16; - src_w = drm_rect_width(&plane_state->src) >> 16; - src_h = drm_rect_height(&plane_state->src) >> 16; - dst_x = plane_state->dst.x1; - dst_y = plane_state->dst.y1; - dst_w = drm_rect_width(&plane_state->dst); - dst_h = drm_rect_height(&plane_state->dst); - - WARN_ON(x != src_x || y != src_y); - if (intel_rotation_90_or_270(rotation)) { + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev, fb->pixel_format, - fb->modifier[0], 0); + tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); stride = DIV_ROUND_UP(fb->height, tile_height); - x_offset = stride * tile_height - y - src_h; - y_offset = x; + x_offset = stride * tile_height - src_y - src_h; + y_offset = src_x; plane_size = (src_w - 1) << 16 | (src_h - 1); } else { stride = fb->pitches[0] / stride_div; - x_offset = x; - y_offset = y; + x_offset = src_x; + y_offset = src_y; plane_size = (src_h - 1) << 16 | (src_w - 1); } plane_offset = y_offset << 16 | x_offset; @@ -3179,20 +3153,30 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(PLANE_SURF(pipe, 0)); } -/* Assume fb object is pinned & idle & fenced and just update base pointers */ -static int -intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y, enum mode_set_atomic state) +static void skylake_disable_primary_plane(struct drm_plane *primary, + struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = to_intel_crtc(crtc)->pipe; if (dev_priv->fbc.deactivate) dev_priv->fbc.deactivate(dev_priv); - dev_priv->display.update_primary_plane(crtc, fb, x, y); + I915_WRITE(PLANE_CTL(pipe, 0), 0); + I915_WRITE(PLANE_SURF(pipe, 0), 0); + POSTING_READ(PLANE_SURF(pipe, 0)); +} + +/* Assume fb object is pinned & idle & fenced and just update base pointers */ +static int +intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + /* Support for kgdboc is disabled, this needs a major rework. */ + DRM_ERROR("legacy panic handler not supported any more.\n"); - return 0; + return -ENODEV; } static void intel_complete_page_flips(struct drm_device *dev) @@ -3219,8 +3203,10 @@ static void intel_update_primary_planes(struct drm_device *dev) drm_modeset_lock_crtc(crtc, &plane->base); plane_state = to_intel_plane_state(plane->base.state); - if (crtc->state->active && plane_state->base.fb) - plane->commit_plane(&plane->base, plane_state); + if (plane_state->visible) + plane->update_plane(&plane->base, + to_intel_crtc_state(crtc->state), + plane_state); drm_modeset_unlock_crtc(crtc); } @@ -4452,7 +4438,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, - &state->scaler_state.scaler_id, DRM_ROTATE_0, + &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); } @@ -5370,6 +5356,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) { + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long put_domains[I915_MAX_PIPES] = {}; @@ -5383,13 +5370,9 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) modeset_get_crtc_power_domains(crtc); } - if (dev_priv->display.modeset_commit_cdclk) { - unsigned int cdclk = to_intel_atomic_state(state)->cdclk; - - if (cdclk != dev_priv->cdclk_freq && - !WARN_ON(!state->allow_modeset)) - dev_priv->display.modeset_commit_cdclk(state); - } + if (dev_priv->display.modeset_commit_cdclk && + intel_state->dev_cdclk != dev_priv->cdclk_freq) + dev_priv->display.modeset_commit_cdclk(state); for (i = 0; i < I915_MAX_PIPES; i++) if (put_domains[i]) @@ -6063,22 +6046,31 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, static int intel_mode_max_pixclk(struct drm_device *dev, struct drm_atomic_state *state) { - struct intel_crtc *intel_crtc; - struct intel_crtc_state *crtc_state; - int max_pixclk = 0; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + unsigned max_pixclk = 0, i; + enum pipe pipe; - for_each_intel_crtc(dev, intel_crtc) { - crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, + sizeof(intel_state->min_pixclk)); - if (!crtc_state->base.enable) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) { + int pixclk = 0; - max_pixclk = max(max_pixclk, - crtc_state->base.adjusted_mode.crtc_clock); + if (crtc_state->enable) + pixclk = crtc_state->adjusted_mode.crtc_clock; + + intel_state->min_pixclk[i] = pixclk; } + if (!intel_state->active_crtcs) + return 0; + + for_each_pipe(dev_priv, pipe) + max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk); + return max_pixclk; } @@ -6087,13 +6079,18 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_pixclk = intel_mode_max_pixclk(dev, state); + struct intel_atomic_state *intel_state = + to_intel_atomic_state(state); if (max_pixclk < 0) return max_pixclk; - to_intel_atomic_state(state)->cdclk = + intel_state->cdclk = intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0); + return 0; } @@ -6102,13 +6099,18 @@ static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_pixclk = intel_mode_max_pixclk(dev, state); + struct intel_atomic_state *intel_state = + to_intel_atomic_state(state); if (max_pixclk < 0) return max_pixclk; - to_intel_atomic_state(state)->cdclk = + intel_state->cdclk = intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); + return 0; } @@ -6151,8 +6153,10 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); + unsigned req_cdclk = old_intel_state->dev_cdclk; /* * FIXME: We can end up here with all power domains off, yet @@ -6383,6 +6387,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) for_each_power_domain(domain, domains) intel_display_power_put(dev_priv, domain); intel_crtc->enabled_power_domains = 0; + + dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); + dev_priv->min_pixclk[intel_crtc->pipe] = 0; } /* @@ -7593,26 +7600,34 @@ static void chv_prepare_pll(struct intel_crtc *crtc, * in cases where we need the PLL enabled even when @pipe is not going to * be enabled. */ -void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, - const struct dpll *dpll) +int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, + const struct dpll *dpll) { struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); - struct intel_crtc_state pipe_config = { - .base.crtc = &crtc->base, - .pixel_multiplier = 1, - .dpll = *dpll, - }; + struct intel_crtc_state *pipe_config; + + pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); + if (!pipe_config) + return -ENOMEM; + + pipe_config->base.crtc = &crtc->base; + pipe_config->pixel_multiplier = 1; + pipe_config->dpll = *dpll; if (IS_CHERRYVIEW(dev)) { - chv_compute_dpll(crtc, &pipe_config); - chv_prepare_pll(crtc, &pipe_config); - chv_enable_pll(crtc, &pipe_config); + chv_compute_dpll(crtc, pipe_config); + chv_prepare_pll(crtc, pipe_config); + chv_enable_pll(crtc, pipe_config); } else { - vlv_compute_dpll(crtc, &pipe_config); - vlv_prepare_pll(crtc, &pipe_config); - vlv_enable_pll(crtc, &pipe_config); + vlv_compute_dpll(crtc, pipe_config); + vlv_prepare_pll(crtc, pipe_config); + vlv_enable_pll(crtc, pipe_config); } + + kfree(pipe_config); + + return 0; } /** @@ -9246,7 +9261,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->width = ((val >> 0) & 0x1fff) + 1; val = I915_READ(PLANE_STRIDE(pipe, 0)); - stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], + stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0], fb->pixel_format); fb->pitches[0] = (val & 0x3ff) * stride_mult; @@ -9662,14 +9677,14 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) val |= PCH_LP_PARTITION_LEVEL_DISABLE; I915_WRITE(SOUTH_DSPCLK_GATE_D, val); } - - intel_prepare_ddi(dev); } static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); + unsigned int req_cdclk = old_intel_state->dev_cdclk; broxton_set_cdclk(dev, req_cdclk); } @@ -9677,29 +9692,41 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) /* compute the max rate for new configuration */ static int ilk_max_pixel_rate(struct drm_atomic_state *state) { - struct intel_crtc *intel_crtc; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; struct intel_crtc_state *crtc_state; - int max_pixel_rate = 0; + unsigned max_pixel_rate = 0, i; + enum pipe pipe; - for_each_intel_crtc(state->dev, intel_crtc) { - int pixel_rate; + memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, + sizeof(intel_state->min_pixclk)); - crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + for_each_crtc_in_state(state, crtc, cstate, i) { + int pixel_rate; - if (!crtc_state->base.enable) + crtc_state = to_intel_crtc_state(cstate); + if (!crtc_state->base.enable) { + intel_state->min_pixclk[i] = 0; continue; + } pixel_rate = ilk_pipe_pixel_rate(crtc_state); /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled) + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); - max_pixel_rate = max(max_pixel_rate, pixel_rate); + intel_state->min_pixclk[i] = pixel_rate; } + if (!intel_state->active_crtcs) + return 0; + + for_each_pipe(dev_priv, pipe) + max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate); + return max_pixel_rate; } @@ -9783,6 +9810,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); int max_pixclk = ilk_max_pixel_rate(state); int cdclk; @@ -9805,7 +9833,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) return -EINVAL; } - to_intel_atomic_state(state)->cdclk = cdclk; + intel_state->cdclk = intel_state->dev_cdclk = cdclk; + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = 337500; return 0; } @@ -9813,7 +9843,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); + unsigned req_cdclk = old_intel_state->dev_cdclk; broadwell_set_cdclk(dev, req_cdclk); } @@ -10026,16 +10058,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, return true; } -static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) +static void i845_update_cursor(struct drm_crtc *crtc, u32 base, + const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; - if (on) { - unsigned int width = intel_crtc->base.cursor->state->crtc_w; - unsigned int height = intel_crtc->base.cursor->state->crtc_h; + if (plane_state && plane_state->visible) { + unsigned int width = plane_state->base.crtc_w; + unsigned int height = plane_state->base.crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; switch (stride) { @@ -10088,7 +10121,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) } } -static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, + const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -10096,9 +10130,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) int pipe = intel_crtc->pipe; uint32_t cntl = 0; - if (on) { + if (plane_state && plane_state->visible) { cntl = MCURSOR_GAMMA_ENABLE; - switch (intel_crtc->base.cursor->state->crtc_w) { + switch (plane_state->base.crtc_w) { case 64: cntl |= CURSOR_MODE_64_ARGB_AX; break; @@ -10109,17 +10143,17 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) cntl |= CURSOR_MODE_256_ARGB_AX; break; default: - MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); + MISSING_CASE(plane_state->base.crtc_w); return; } cntl |= pipe << 28; /* Connect to correct pipe */ if (HAS_DDI(dev)) cntl |= CURSOR_PIPE_CSC_ENABLE; - } - if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) - cntl |= CURSOR_ROTATE_180; + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) + cntl |= CURSOR_ROTATE_180; + } if (intel_crtc->cursor_cntl != cntl) { I915_WRITE(CURCNTR(pipe), cntl); @@ -10136,56 +10170,45 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ static void intel_crtc_update_cursor(struct drm_crtc *crtc, - bool on) + const struct intel_plane_state *plane_state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - struct drm_plane_state *cursor_state = crtc->cursor->state; - int x = cursor_state->crtc_x; - int y = cursor_state->crtc_y; - u32 base = 0, pos = 0; - - base = intel_crtc->cursor_addr; - - if (x >= intel_crtc->config->pipe_src_w) - on = false; + u32 base = intel_crtc->cursor_addr; + u32 pos = 0; - if (y >= intel_crtc->config->pipe_src_h) - on = false; + if (plane_state) { + int x = plane_state->base.crtc_x; + int y = plane_state->base.crtc_y; - if (x < 0) { - if (x + cursor_state->crtc_w <= 0) - on = false; - - pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; - x = -x; - } - pos |= x << CURSOR_X_SHIFT; + if (x < 0) { + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + x = -x; + } + pos |= x << CURSOR_X_SHIFT; - if (y < 0) { - if (y + cursor_state->crtc_h <= 0) - on = false; + if (y < 0) { + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + y = -y; + } + pos |= y << CURSOR_Y_SHIFT; - pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; - y = -y; + /* ILK+ do this automagically */ + if (HAS_GMCH_DISPLAY(dev) && + plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + base += (plane_state->base.crtc_h * + plane_state->base.crtc_w - 1) * 4; + } } - pos |= y << CURSOR_Y_SHIFT; I915_WRITE(CURPOS(pipe), pos); - /* ILK+ do this automagically */ - if (HAS_GMCH_DISPLAY(dev) && - crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { - base += (cursor_state->crtc_h * - cursor_state->crtc_w - 1) * 4; - } - if (IS_845G(dev) || IS_I865G(dev)) - i845_update_cursor(crtc, base, on); + i845_update_cursor(crtc, base, plane_state); else - i9xx_update_cursor(crtc, base, on); + i9xx_update_cursor(crtc, base, plane_state); } static bool cursor_size_ok(struct drm_device *dev, @@ -10778,7 +10801,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; struct drm_display_mode *mode; - struct intel_crtc_state pipe_config; + struct intel_crtc_state *pipe_config; int htot = I915_READ(HTOTAL(cpu_transcoder)); int hsync = I915_READ(HSYNC(cpu_transcoder)); int vtot = I915_READ(VTOTAL(cpu_transcoder)); @@ -10789,6 +10812,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, if (!mode) return NULL; + pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); + if (!pipe_config) { + kfree(mode); + return NULL; + } + /* * Construct a pipe_config sufficient for getting the clock info * back out of crtc_clock_get. @@ -10796,14 +10825,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need * to use a real value here instead. */ - pipe_config.cpu_transcoder = (enum transcoder) pipe; - pipe_config.pixel_multiplier = 1; - pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); - pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); - pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); - i9xx_crtc_clock_get(intel_crtc, &pipe_config); - - mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; + pipe_config->cpu_transcoder = (enum transcoder) pipe; + pipe_config->pixel_multiplier = 1; + pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); + pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); + pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); + i9xx_crtc_clock_get(intel_crtc, pipe_config); + + mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; @@ -10815,6 +10844,8 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, drm_mode_set_name(mode); + kfree(pipe_config); + return mode; } @@ -11319,13 +11350,12 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, */ if (intel_rotation_90_or_270(rotation)) { /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev, fb->pixel_format, - fb->modifier[0], 0); + tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); stride = DIV_ROUND_UP(fb->height, tile_height); } else { stride = fb->pitches[0] / - intel_fb_stride_alignment(dev, fb->modifier[0], - fb->pixel_format); + intel_fb_stride_alignment(dev_priv, fb->modifier[0], + fb->pixel_format); } /* @@ -11660,9 +11690,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj->last_write_req); } else { if (!request) { - ret = i915_gem_request_alloc(ring, ring->default_context, &request); - if (ret) + request = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(request)) { + ret = PTR_ERR(request); goto cleanup_unpin; + } } ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, @@ -11831,8 +11863,13 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, if (!was_crtc_enabled && WARN_ON(was_visible)) was_visible = false; - if (!is_crtc_enabled && WARN_ON(visible)) - visible = false; + /* + * Visibility is calculated as if the crtc was on, but + * after scaler setup everything depends on it being off + * when the crtc isn't active. + */ + if (!is_crtc_enabled) + to_intel_plane_state(plane_state)->visible = visible = false; if (!was_visible && !visible) return 0; @@ -12075,11 +12112,21 @@ connected_sink_compute_bpp(struct intel_connector *connector, pipe_config->pipe_bpp = connector->base.display_info.bpc*3; } - /* Clamp bpp to 8 on screens without EDID 1.4 */ - if (connector->base.display_info.bpc == 0 && bpp > 24) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", - bpp); - pipe_config->pipe_bpp = 24; + /* Clamp bpp to default limit on screens without EDID 1.4 */ + if (connector->base.display_info.bpc == 0) { + int type = connector->base.connector_type; + int clamp_bpp = 24; + + /* Fall back to 18 bpp when DP sink capability is unknown. */ + if (type == DRM_MODE_CONNECTOR_DisplayPort || + type == DRM_MODE_CONNECTOR_eDP) + clamp_bpp = 18; + + if (bpp > clamp_bpp) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", + bpp, clamp_bpp); + pipe_config->pipe_bpp = clamp_bpp; + } } } @@ -12268,18 +12315,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, static bool check_digital_port_conflicts(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct intel_encoder *encoder; struct drm_connector *connector; - struct drm_connector_state *connector_state; unsigned int used_ports = 0; - int i; /* * Walk the connector list instead of the encoder * list to detect the problem on ddi platforms * where there's just one encoder per digital port. */ - for_each_connector_in_state(state, connector, connector_state, i) { + drm_for_each_connector(connector, dev) { + struct drm_connector_state *connector_state; + struct intel_encoder *encoder; + + connector_state = drm_atomic_get_existing_connector_state(state, connector); + if (!connector_state) + connector_state = connector->state; + if (!connector_state->best_encoder) continue; @@ -12515,19 +12566,22 @@ intel_compare_m_n(unsigned int m, unsigned int n, BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); - if (m > m2) { - while (m > m2) { + if (n > n2) { + while (n > n2) { m2 <<= 1; n2 <<= 1; } - } else if (m < m2) { - while (m < m2) { + } else if (n < n2) { + while (n < n2) { m <<= 1; n <<= 1; } } - return m == m2 && n == n2; + if (n != n2) + return false; + + return intel_fuzzy_clock_check(m, m2); } static bool @@ -13202,15 +13256,27 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) static int intel_modeset_checks(struct drm_atomic_state *state) { - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = state->dev->dev_private; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int ret = 0, i; if (!check_digital_port_conflicts(state)) { DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); return -EINVAL; } + intel_state->modeset = true; + intel_state->active_crtcs = dev_priv->active_crtcs; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (crtc_state->active) + intel_state->active_crtcs |= 1 << i; + else + intel_state->active_crtcs &= ~(1 << i); + } + /* * See if the config requires any additional preparation, e.g. * to adjust global state with pipes off. We need to do this @@ -13219,22 +13285,19 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * adjusted_mode bits in the crtc directly. */ if (dev_priv->display.modeset_calc_cdclk) { - unsigned int cdclk; - ret = dev_priv->display.modeset_calc_cdclk(state); - cdclk = to_intel_atomic_state(state)->cdclk; - if (!ret && cdclk != dev_priv->cdclk_freq) + if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) ret = intel_modeset_all_pipes(state); if (ret < 0) return ret; } else - to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq; + to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; intel_modeset_clear_plls(state); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) return haswell_mode_set_planes_workaround(state); return 0; @@ -13452,12 +13515,12 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async) { + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; - int ret = 0; - int i; - bool any_ms = false; + int ret = 0, i; + bool hw_check = intel_state->modeset; ret = intel_atomic_prepare_commit(dev, state, async); if (ret) { @@ -13468,13 +13531,19 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_swap_state(dev, state); dev_priv->wm.config = to_intel_atomic_state(state)->wm_config; + if (intel_state->modeset) { + memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, + sizeof(intel_state->min_pixclk)); + dev_priv->active_crtcs = intel_state->active_crtcs; + dev_priv->atomic_cdclk_freq = intel_state->cdclk; + } + for_each_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (!needs_modeset(crtc->state)) continue; - any_ms = true; intel_pre_plane_update(intel_crtc); if (crtc_state->active) { @@ -13499,7 +13568,7 @@ static int intel_atomic_commit(struct drm_device *dev, * update the the output configuration. */ intel_modeset_update_crtc_state(state); - if (any_ms) { + if (intel_state->modeset) { intel_shared_dpll_commit(state); drm_atomic_helper_update_legacy_modeset_state(state->dev, state); @@ -13526,7 +13595,7 @@ static int intel_atomic_commit(struct drm_device *dev, put_domains = modeset_get_crtc_power_domains(crtc); /* make sure intel_modeset_check_state runs */ - any_ms = true; + hw_check = true; } if (!modeset) @@ -13553,11 +13622,24 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_cleanup_planes(dev, state); mutex_unlock(&dev->struct_mutex); - if (any_ms) + if (hw_check) intel_modeset_check_state(dev, state); drm_atomic_state_free(state); + /* As one of the primary mmio accessors, KMS has a high likelihood + * of triggering bugs in unclaimed access. After we finish + * modesetting, see if an error has been flagged, and if so + * enable debugging for the next modeset - and hope we catch + * the culprit. + * + * XXX note that we assume display power is on at this point. + * This might hold true now but we need to add pm helper to check + * unclaimed only when the hardware is on, as atomic commits + * can happen also when the device is completely off. + */ + intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + return 0; } @@ -13846,7 +13928,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state struct drm_i915_private *dev_priv; int crtc_clock, cdclk; - if (!intel_crtc || !crtc_state) + if (!intel_crtc || !crtc_state->base.enable) return DRM_PLANE_HELPER_NO_SCALING; dev = intel_crtc->base.dev; @@ -13879,11 +13961,12 @@ intel_check_primary_plane(struct drm_plane *plane, int max_scale = DRM_PLANE_HELPER_NO_SCALING; bool can_position = false; - /* use scaler when colorkey is not required */ - if (INTEL_INFO(plane->dev)->gen >= 9 && - state->ckey.flags == I915_SET_COLORKEY_NONE) { - min_scale = 1; - max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); + if (INTEL_INFO(plane->dev)->gen >= 9) { + /* use scaler when colorkey is not required */ + if (state->ckey.flags == I915_SET_COLORKEY_NONE) { + min_scale = 1; + max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); + } can_position = true; } @@ -13894,32 +13977,6 @@ intel_check_primary_plane(struct drm_plane *plane, &state->visible); } -static void -intel_commit_primary_plane(struct drm_plane *plane, - struct intel_plane_state *state) -{ - struct drm_crtc *crtc = state->base.crtc; - struct drm_framebuffer *fb = state->base.fb; - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - crtc = crtc ? crtc : plane->crtc; - - dev_priv->display.update_primary_plane(crtc, fb, - state->src.x1 >> 16, - state->src.y1 >> 16); -} - -static void -intel_disable_primary_plane(struct drm_plane *plane, - struct drm_crtc *crtc) -{ - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - dev_priv->display.update_primary_plane(crtc, NULL, 0, 0); -} - static void intel_begin_crtc_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -14004,20 +14061,33 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->plane = pipe; primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; - primary->commit_plane = intel_commit_primary_plane; - primary->disable_plane = intel_disable_primary_plane; if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) primary->plane = !pipe; if (INTEL_INFO(dev)->gen >= 9) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); + + primary->update_plane = skylake_update_primary_plane; + primary->disable_plane = skylake_disable_primary_plane; + } else if (HAS_PCH_SPLIT(dev)) { + intel_primary_formats = i965_primary_formats; + num_formats = ARRAY_SIZE(i965_primary_formats); + + primary->update_plane = ironlake_update_primary_plane; + primary->disable_plane = i9xx_disable_primary_plane; } else if (INTEL_INFO(dev)->gen >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); + + primary->update_plane = i9xx_update_primary_plane; + primary->disable_plane = i9xx_disable_primary_plane; } else { intel_primary_formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); + + primary->update_plane = i9xx_update_primary_plane; + primary->disable_plane = i9xx_disable_primary_plane; } drm_universal_plane_init(dev, &primary->base, 0, @@ -14116,22 +14186,23 @@ static void intel_disable_cursor_plane(struct drm_plane *plane, struct drm_crtc *crtc) { - intel_crtc_update_cursor(crtc, false); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + intel_crtc->cursor_addr = 0; + intel_crtc_update_cursor(crtc, NULL); } static void -intel_commit_cursor_plane(struct drm_plane *plane, - struct intel_plane_state *state) +intel_update_cursor_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *state) { - struct drm_crtc *crtc = state->base.crtc; + struct drm_crtc *crtc = crtc_state->base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = plane->dev; - struct intel_crtc *intel_crtc; struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); uint32_t addr; - crtc = crtc ? crtc : plane->crtc; - intel_crtc = to_intel_crtc(crtc); - if (!obj) addr = 0; else if (!INTEL_INFO(dev)->cursor_needs_physical) @@ -14140,9 +14211,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, addr = obj->phys_handle->busaddr; intel_crtc->cursor_addr = addr; - - if (crtc->state->active) - intel_crtc_update_cursor(crtc, state->visible); + intel_crtc_update_cursor(crtc, state); } static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, @@ -14168,7 +14237,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, cursor->plane = pipe; cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); cursor->check_plane = intel_check_cursor_plane; - cursor->commit_plane = intel_commit_cursor_plane; + cursor->update_plane = intel_update_cursor_plane; cursor->disable_plane = intel_disable_cursor_plane; drm_universal_plane_init(dev, &cursor->base, 0, @@ -14642,6 +14711,7 @@ static int intel_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = to_i915(dev); unsigned int aligned_height; int ret; u32 pitch_limit, stride_alignment; @@ -14683,7 +14753,8 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } - stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], + stride_alignment = intel_fb_stride_alignment(dev_priv, + mode_cmd->modifier[0], mode_cmd->pixel_format); if (mode_cmd->pitches[0] & (stride_alignment - 1)) { DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", @@ -14775,7 +14846,6 @@ static int intel_framebuffer_init(struct drm_device *dev, drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; - intel_fb->obj->framebuffer_references++; ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { @@ -14783,6 +14853,8 @@ static int intel_framebuffer_init(struct drm_device *dev, return ret; } + intel_fb->obj->framebuffer_references++; + return 0; } @@ -14846,8 +14918,6 @@ static void intel_init_display(struct drm_device *dev) haswell_crtc_compute_clock; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.update_primary_plane = - skylake_update_primary_plane; } else if (HAS_DDI(dev)) { dev_priv->display.get_pipe_config = haswell_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14856,8 +14926,6 @@ static void intel_init_display(struct drm_device *dev) haswell_crtc_compute_clock; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.update_primary_plane = - ironlake_update_primary_plane; } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14866,8 +14934,6 @@ static void intel_init_display(struct drm_device *dev) ironlake_crtc_compute_clock; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; - dev_priv->display.update_primary_plane = - ironlake_update_primary_plane; } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14875,8 +14941,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; dev_priv->display.crtc_enable = valleyview_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; - dev_priv->display.update_primary_plane = - i9xx_update_primary_plane; } else { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = @@ -14884,8 +14948,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; - dev_priv->display.update_primary_plane = - i9xx_update_primary_plane; } /* Returns the core display clock speed */ @@ -15191,12 +15253,89 @@ static void i915_disable_vga(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + intel_update_cdclk(dev); - intel_prepare_ddi(dev); + + dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; + intel_init_clock_gating(dev); intel_enable_gt_powersave(dev); } +/* + * Calculate what we think the watermarks should be for the state we've read + * out of the hardware and then immediately program those watermarks so that + * we ensure the hardware settings match our internal state. + * + * We can calculate what we think WM's should be by creating a duplicate of the + * current state (which was constructed during hardware readout) and running it + * through the atomic check code to calculate new watermark values in the + * state object. + */ +static void sanitize_watermarks(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_atomic_state *state; + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + struct drm_modeset_acquire_ctx ctx; + int ret; + int i; + + /* Only supported on platforms that use atomic watermark design */ + if (!dev_priv->display.program_watermarks) + return; + + /* + * We need to hold connection_mutex before calling duplicate_state so + * that the connector loop is protected. + */ + drm_modeset_acquire_init(&ctx, 0); +retry: + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } else if (WARN_ON(ret)) { + goto fail; + } + + state = drm_atomic_helper_duplicate_state(dev, &ctx); + if (WARN_ON(IS_ERR(state))) + goto fail; + + ret = intel_atomic_check(dev, state); + if (ret) { + /* + * If we fail here, it means that the hardware appears to be + * programmed in a way that shouldn't be possible, given our + * understanding of watermark requirements. This might mean a + * mistake in the hardware readout code or a mistake in the + * watermark calculations for a given platform. Raise a WARN + * so that this is noticeable. + * + * If this actually happens, we'll have to just leave the + * BIOS-programmed watermarks untouched and hope for the best. + */ + WARN(true, "Could not determine valid watermarks for inherited state\n"); + goto fail; + } + + /* Write calculated watermark values back */ + to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config; + for_each_crtc_in_state(state, crtc, cstate, i) { + struct intel_crtc_state *cs = to_intel_crtc_state(cstate); + + dev_priv->display.program_watermarks(cs); + } + + drm_atomic_state_free(state); +fail: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + void intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -15317,6 +15456,13 @@ void intel_modeset_init(struct drm_device *dev) */ intel_find_initial_plane_obj(crtc, &plane_config); } + + /* + * Make sure hardware watermarks really match the state we read out. + * Note that we need to do this after reconstructing the BIOS fb's + * since the watermark calculation done here will use pstate->fb. + */ + sanitize_watermarks(dev); } static void intel_enable_pipe_a(struct drm_device *dev) @@ -15589,16 +15735,40 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_connector *connector; int i; + dev_priv->active_crtcs = 0; + for_each_intel_crtc(dev, crtc) { - __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state); - memset(crtc->config, 0, sizeof(*crtc->config)); - crtc->config->base.crtc = &crtc->base; + struct intel_crtc_state *crtc_state = crtc->config; + int pixclk = 0; - crtc->active = dev_priv->display.get_pipe_config(crtc, - crtc->config); + __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base); + memset(crtc_state, 0, sizeof(*crtc_state)); + crtc_state->base.crtc = &crtc->base; - crtc->base.state->active = crtc->active; - crtc->base.enabled = crtc->active; + crtc_state->base.active = crtc_state->base.enable = + dev_priv->display.get_pipe_config(crtc, crtc_state); + + crtc->base.enabled = crtc_state->base.enable; + crtc->active = crtc_state->base.active; + + if (crtc_state->base.active) { + dev_priv->active_crtcs |= 1 << crtc->pipe; + + if (IS_BROADWELL(dev_priv)) { + pixclk = ilk_pipe_pixel_rate(crtc_state); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (crtc_state->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); + } else if (IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv) || + IS_BROXTON(dev_priv)) + pixclk = crtc_state->base.adjusted_mode.crtc_clock; + else + WARN_ON(dev_priv->display.modeset_calc_cdclk); + } + + dev_priv->min_pixclk[crtc->pipe] = pixclk; readout_plane_state(crtc); @@ -16102,7 +16272,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, for_each_pipe(dev_priv, i) { err_printf(m, "Pipe [%d]:\n", i); err_printf(m, " Power: %s\n", - error->pipe[i].power_domain_on ? "on" : "off"); + onoff(error->pipe[i].power_domain_on)); err_printf(m, " SRC: %08x\n", error->pipe[i].source); err_printf(m, " STAT: %08x\n", error->pipe[i].stat); @@ -16130,7 +16300,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, "CPU transcoder: %c\n", transcoder_name(error->transcoder[i].cpu_transcoder)); err_printf(m, " Power: %s\n", - error->transcoder[i].power_domain_on ? "on" : "off"); + onoff(error->transcoder[i].power_domain_on)); err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 796e3d313cb9..e2bea710614f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -157,14 +157,9 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; u8 source_max, sink_max; - source_max = 4; - if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && - (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) - source_max = 2; - + source_max = intel_dig_port->max_lanes; sink_max = drm_dp_max_lane_count(intel_dp->dpcd); return min(source_max, sink_max); @@ -340,8 +335,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) release_cl_override = IS_CHERRYVIEW(dev) && !chv_phy_powergate_ch(dev_priv, phy, ch, true); - vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? - &chv_dpll[0].dpll : &vlv_dpll[0].dpll); + if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ? + &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { + DRM_ERROR("Failed to force on pll for pipe %c!\n", + pipe_name(pipe)); + return; + } } /* @@ -2243,11 +2242,6 @@ static void intel_edp_backlight_power(struct intel_connector *connector, _intel_edp_backlight_off(intel_dp); } -static const char *state_string(bool enabled) -{ - return enabled ? "on" : "off"; -} - static void assert_dp_port(struct intel_dp *intel_dp, bool state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -2257,7 +2251,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) I915_STATE_WARN(cur_state != state, "DP port %c state assertion failure (expected %s, current %s)\n", port_name(dig_port->port), - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_dp_port_disabled(d) assert_dp_port((d), false) @@ -2267,7 +2261,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) I915_STATE_WARN(cur_state != state, "eDP PLL state assertion failure (expected %s, current %s)\n", - state_string(state), state_string(cur_state)); + onoff(state), onoff(cur_state)); } #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) @@ -5839,6 +5833,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_dig_port->port; int type, ret; + if (WARN(intel_dig_port->max_lanes < 1, + "Not enough lanes (%d) for DP on port %c\n", + intel_dig_port->max_lanes, port_name(port))) + return false; + intel_dp->pps_pipe = INVALID_PIPE; /* intel_dp vfuncs */ @@ -6037,6 +6036,7 @@ intel_dp_init(struct drm_device *dev, intel_dig_port->port = port; dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->dp.output_reg = output_reg; + intel_dig_port->max_lanes = 4; intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; if (IS_CHERRYVIEW(dev)) { diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index fa0dabf578dc..2a2ab306ad84 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -184,7 +184,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) intel_mst->port = found->port; if (intel_dp->active_mst_links == 0) { - intel_ddi_clk_select(encoder, intel_crtc->config); + intel_prepare_ddi_buffer(&intel_dig_port->base); + + intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config); intel_dp_set_link_params(intel_dp, intel_crtc->config); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 98e434537dd2..bf6f98134b50 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -246,7 +246,18 @@ struct intel_atomic_state { struct drm_atomic_state base; unsigned int cdclk; - bool dpll_set; + + /* + * Calculated device cdclk, can be different from cdclk + * only when all crtc's are DPMS off. + */ + unsigned int dev_cdclk; + + bool dpll_set, modeset; + + unsigned int active_crtcs; + unsigned int min_pixclk[I915_MAX_PIPES]; + struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; struct intel_wm_config wm_config; }; @@ -647,23 +658,17 @@ struct intel_plane { /* * NOTE: Do not place new plane state fields here (e.g., when adding * new plane properties). New runtime state should now be placed in - * the intel_plane_state structure and accessed via drm_plane->state. + * the intel_plane_state structure and accessed via plane_state. */ void (*update_plane)(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h); + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); void (*disable_plane)(struct drm_plane *plane, struct drm_crtc *crtc); int (*check_plane)(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state); - void (*commit_plane)(struct drm_plane *plane, - struct intel_plane_state *state); }; struct intel_watermark_params { @@ -817,6 +822,7 @@ struct intel_digital_port { struct intel_hdmi hdmi; enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); bool release_cl2_override; + uint8_t max_lanes; /* for communication with audio component; protected by av_mutex */ const struct drm_connector *audio_connector; }; @@ -996,7 +1002,7 @@ void intel_crt_init(struct drm_device *dev); /* intel_ddi.c */ void intel_ddi_clk_select(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); -void intel_prepare_ddi(struct drm_device *dev); +void intel_prepare_ddi_buffer(struct intel_encoder *encoder); void hsw_fdi_link_train(struct drm_crtc *crtc); void intel_ddi_init(struct drm_device *dev, enum port port); enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); @@ -1041,8 +1047,8 @@ unsigned int intel_fb_align_height(struct drm_device *dev, uint64_t fb_format_modifier); void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire, enum fb_op_origin origin); -u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, - uint32_t pixel_format); +u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, uint32_t pixel_format); /* intel_audio.c */ void intel_init_audio(struct drm_device *dev); @@ -1126,9 +1132,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state); -unsigned int -intel_tile_height(struct drm_device *dev, uint32_t pixel_format, - uint64_t fb_format_modifier, unsigned int plane); +unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, unsigned int cpp); static inline bool intel_rotation_90_or_270(unsigned int rotation) @@ -1149,8 +1154,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, struct intel_crtc_state *state); -void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, - const struct dpll *dpll); +int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, + const struct dpll *dpll); void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe); /* modesetting asserts */ @@ -1167,11 +1172,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) -unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - unsigned int tiling_mode, - unsigned int bpp, - unsigned int pitch); +unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv, + int *x, int *y, + uint64_t fb_modifier, + unsigned int cpp, + unsigned int pitch); void intel_prepare_reset(struct drm_device *dev); void intel_finish_reset(struct drm_device *dev); void hsw_enable_pc8(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 44742fa2f616..91cef3525c93 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -702,7 +702,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - u32 pclk = 0; + u32 pclk; DRM_DEBUG_KMS("\n"); pipe_config->has_dsi_encoder = true; @@ -713,12 +713,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, */ pipe_config->dpll_hw_state.dpll_md = 0; - if (IS_BROXTON(encoder->base.dev)) - pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp); - else if (IS_VALLEYVIEW(encoder->base.dev) || - IS_CHERRYVIEW(encoder->base.dev)) - pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); - + pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp); if (!pclk) return; diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 02551ff228c2..de7be7f3fb42 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -126,8 +126,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) extern void intel_enable_dsi_pll(struct intel_encoder *encoder); extern void intel_disable_dsi_pll(struct intel_encoder *encoder); -extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); -extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); +extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp); extern void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index a5e99ac305da..1d43e6f37fc1 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -229,14 +229,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) return data; } +static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data) +{ + return data + *(data + 6) + 7; +} + typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, const u8 *data); static const fn_mipi_elem_exec exec_elem[] = { - NULL, /* reserved */ - mipi_exec_send_packet, - mipi_exec_delay, - mipi_exec_gpio, - NULL, /* status read; later */ + [MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet, + [MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay, + [MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio, + [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip, }; /* @@ -246,107 +250,114 @@ static const fn_mipi_elem_exec exec_elem[] = { */ static const char * const seq_name[] = { - "UNDEFINED", - "MIPI_SEQ_ASSERT_RESET", - "MIPI_SEQ_INIT_OTP", - "MIPI_SEQ_DISPLAY_ON", - "MIPI_SEQ_DISPLAY_OFF", - "MIPI_SEQ_DEASSERT_RESET" + [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", + [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", + [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", + [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF", + [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", + [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON", + [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF", + [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON", + [MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF", + [MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON", + [MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF", }; -static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data) +static const char *sequence_name(enum mipi_seq seq_id) { + if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id]) + return seq_name[seq_id]; + else + return "(unknown)"; +} + +static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id) +{ + struct vbt_panel *vbt_panel = to_vbt_panel(panel); + struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + const u8 *data; fn_mipi_elem_exec mipi_elem_exec; - int index; - if (!data) + if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence))) + return; + + data = dev_priv->vbt.dsi.sequence[seq_id]; + if (!data) { + DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n", + seq_id, sequence_name(seq_id)); return; + } - DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]); + WARN_ON(*data != seq_id); - /* go to the first element of the sequence */ - data++; + DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n", + seq_id, sequence_name(seq_id)); - /* parse each byte till we reach end of sequence byte - 0x00 */ - while (1) { - index = *data; - mipi_elem_exec = exec_elem[index]; - if (!mipi_elem_exec) { - DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n"); - return; - } + /* Skip Sequence Byte. */ + data++; - /* goto element payload */ - data++; + /* Skip Size of Sequence. */ + if (dev_priv->vbt.dsi.seq_version >= 3) + data += 4; - /* execute the element specific rotines */ - data = mipi_elem_exec(intel_dsi, data); + while (1) { + u8 operation_byte = *data++; + u8 operation_size = 0; - /* - * After processing the element, data should point to - * next element or end of sequence - * check if have we reached end of sequence - */ - if (*data == 0x00) + if (operation_byte == MIPI_SEQ_ELEM_END) break; + + if (operation_byte < ARRAY_SIZE(exec_elem)) + mipi_elem_exec = exec_elem[operation_byte]; + else + mipi_elem_exec = NULL; + + /* Size of Operation. */ + if (dev_priv->vbt.dsi.seq_version >= 3) + operation_size = *data++; + + if (mipi_elem_exec) { + data = mipi_elem_exec(intel_dsi, data); + } else if (operation_size) { + /* We have size, skip. */ + DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n", + operation_byte); + data += operation_size; + } else { + /* No size, can't skip without parsing. */ + DRM_ERROR("Unsupported MIPI operation byte %u\n", + operation_byte); + return; + } } } static int vbt_panel_prepare(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET]; - generic_exec_sequence(intel_dsi, sequence); - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET); + generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP); return 0; } static int vbt_panel_unprepare(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET); return 0; } static int vbt_panel_enable(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON); return 0; } static int vbt_panel_disable(struct drm_panel *panel) { - struct vbt_panel *vbt_panel = to_vbt_panel(panel); - struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const u8 *sequence; - - sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF]; - generic_exec_sequence(intel_dsi, sequence); + generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF); return 0; } @@ -666,6 +677,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) /* This is cheating a bit with the cleanup. */ vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL); + if (!vbt_panel) + return NULL; vbt_panel->intel_dsi = intel_dsi; drm_panel_init(&vbt_panel->panel); diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index fbd2b51810ca..bb5e95a1a453 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -30,14 +30,6 @@ #include "i915_drv.h" #include "intel_dsi.h" -#define DSI_HSS_PACKET_SIZE 4 -#define DSI_HSE_PACKET_SIZE 4 -#define DSI_HSA_PACKET_EXTRA_SIZE 6 -#define DSI_HBP_PACKET_EXTRA_SIZE 6 -#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6 -#define DSI_HFP_PACKET_EXTRA_SIZE 6 -#define DSI_EOTP_PACKET_SIZE 4 - static int dsi_pixel_format_bpp(int pixel_format) { int bpp; @@ -71,77 +63,6 @@ static const u32 lfsr_converts[] = { 71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */ }; -#ifdef DSI_CLK_FROM_RR - -static u32 dsi_rr_formula(const struct drm_display_mode *mode, - int pixel_format, int video_mode_format, - int lane_count, bool eotp) -{ - u32 bpp; - u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp; - u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes; - u32 bytes_per_line, bytes_per_frame; - u32 num_frames; - u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes; - u32 dsi_bit_clock_hz; - u32 dsi_clk; - - bpp = dsi_pixel_format_bpp(pixel_format); - - hactive = mode->hdisplay; - vactive = mode->vdisplay; - hfp = mode->hsync_start - mode->hdisplay; - hsync = mode->hsync_end - mode->hsync_start; - hbp = mode->htotal - mode->hsync_end; - - vfp = mode->vsync_start - mode->vdisplay; - vsync = mode->vsync_end - mode->vsync_start; - vbp = mode->vtotal - mode->vsync_end; - - hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8); - hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8); - hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8); - hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8); - - bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes + - DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE + - hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE + - hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE + - hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE; - - /* - * XXX: Need to accurately calculate LP to HS transition timeout and add - * it to bytes_per_line/bytes_per_frame. - */ - - if (eotp && video_mode_format == VIDEO_MODE_BURST) - bytes_per_line += DSI_EOTP_PACKET_SIZE; - - bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line + - vactive * bytes_per_line + vfp * bytes_per_line; - - if (eotp && - (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE || - video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS)) - bytes_per_frame += DSI_EOTP_PACKET_SIZE; - - num_frames = drm_mode_vrefresh(mode); - bytes_per_x_frames = num_frames * bytes_per_frame; - - bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count; - - /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */ - dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8; - dsi_clk = dsi_bit_clock_hz / 1000; - - if (eotp && video_mode_format == VIDEO_MODE_BURST) - dsi_clk *= 2; - - return dsi_clk; -} - -#else - /* Get DSI clock from pixel clock */ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) { @@ -155,8 +76,6 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) return dsi_clk_khz; } -#endif - static int dsi_calc_mnp(struct drm_i915_private *dev_priv, struct dsi_mnp *dsi_mnp, int target_dsi_clk) { @@ -322,7 +241,7 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) bpp, pipe_bpp); } -u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) +static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -384,7 +303,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) return pclk; } -u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) +static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) { u32 pclk; u32 dsi_clk; @@ -419,6 +338,14 @@ u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) return pclk; } +u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) +{ + if (IS_BROXTON(encoder->base.dev)) + return bxt_dsi_get_pclk(encoder, pipe_bpp); + else + return vlv_dsi_get_pclk(encoder, pipe_bpp); +} + static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index bea75cafc623..09840f4380f9 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, { struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper); - struct drm_framebuffer *fb = NULL; + struct drm_framebuffer *fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_fb_cmd2 mode_cmd = {}; @@ -171,8 +171,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper, out: mutex_unlock(&dev->struct_mutex); - if (!IS_ERR_OR_NULL(fb)) - drm_framebuffer_unreference(fb); return ret; } diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 822952235dcf..045b1491ff7a 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -43,6 +43,7 @@ struct i915_guc_client { uint32_t wq_offset; uint32_t wq_size; uint32_t wq_tail; + uint32_t wq_head; /* GuC submission statistics & status */ uint64_t submissions[I915_NUM_RINGS]; @@ -88,6 +89,8 @@ struct intel_guc { uint32_t log_flags; struct drm_i915_gem_object *log_obj; + struct drm_i915_gem_object *ads_obj; + struct drm_i915_gem_object *ctx_pool_obj; struct ida ctx_ids; @@ -122,5 +125,6 @@ int i915_guc_submit(struct i915_guc_client *client, struct drm_i915_gem_request *rq); void i915_guc_submission_disable(struct drm_device *dev); void i915_guc_submission_fini(struct drm_device *dev); +int i915_guc_wq_check_space(struct i915_guc_client *client); #endif diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 40b2ea572e16..1856a4740b83 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -39,6 +39,7 @@ #define GUC_CTX_PRIORITY_HIGH 1 #define GUC_CTX_PRIORITY_KMD_NORMAL 2 #define GUC_CTX_PRIORITY_NORMAL 3 +#define GUC_CTX_PRIORITY_NUM 4 #define GUC_MAX_GPU_CONTEXTS 1024 #define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS @@ -81,11 +82,14 @@ #define GUC_CTL_CTXINFO 0 #define GUC_CTL_CTXNUM_IN16_SHIFT 0 #define GUC_CTL_BASE_ADDR_SHIFT 12 + #define GUC_CTL_ARAT_HIGH 1 #define GUC_CTL_ARAT_LOW 2 + #define GUC_CTL_DEVICE_INFO 3 #define GUC_CTL_GTTYPE_SHIFT 0 #define GUC_CTL_COREFAMILY_SHIFT 7 + #define GUC_CTL_LOG_PARAMS 4 #define GUC_LOG_VALID (1 << 0) #define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) @@ -97,9 +101,12 @@ #define GUC_LOG_ISR_PAGES 3 #define GUC_LOG_ISR_SHIFT 9 #define GUC_LOG_BUF_ADDR_SHIFT 12 + #define GUC_CTL_PAGE_FAULT_CONTROL 5 + #define GUC_CTL_WA 6 #define GUC_CTL_WA_UK_BY_DRIVER (1 << 3) + #define GUC_CTL_FEATURE 7 #define GUC_CTL_VCS2_ENABLED (1 << 0) #define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1) @@ -109,6 +116,7 @@ #define GUC_CTL_PREEMPTION_LOG (1 << 5) #define GUC_CTL_ENABLE_SLPC (1 << 7) #define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8) + #define GUC_CTL_DEBUG 8 #define GUC_LOG_VERBOSITY_SHIFT 0 #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) @@ -118,9 +126,19 @@ /* Verbosity range-check limits, without the shift */ #define GUC_LOG_VERBOSITY_MIN 0 #define GUC_LOG_VERBOSITY_MAX 3 +#define GUC_LOG_VERBOSITY_MASK 0x0000000f +#define GUC_LOG_DESTINATION_MASK (3 << 4) +#define GUC_LOG_DISABLED (1 << 6) +#define GUC_PROFILE_ENABLED (1 << 7) +#define GUC_WQ_TRACK_ENABLED (1 << 8) +#define GUC_ADS_ENABLED (1 << 9) +#define GUC_DEBUG_RESERVED (1 << 10) +#define GUC_ADS_ADDR_SHIFT 11 +#define GUC_ADS_ADDR_MASK 0xfffff800 + #define GUC_CTL_RSRVD 9 -#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1) +#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ /** * DOC: GuC Firmware Layout @@ -299,6 +317,99 @@ struct guc_context_desc { #define GUC_POWER_D2 3 #define GUC_POWER_D3 4 +/* Scheduling policy settings */ + +/* Reset engine upon preempt failure */ +#define POLICY_RESET_ENGINE (1<<0) +/* Preempt to idle on quantum expiry */ +#define POLICY_PREEMPT_TO_IDLE (1<<1) + +#define POLICY_MAX_NUM_WI 15 + +struct guc_policy { + /* Time for one workload to execute. (in micro seconds) */ + u32 execution_quantum; + u32 reserved1; + + /* Time to wait for a preemption request to completed before issuing a + * reset. (in micro seconds). */ + u32 preemption_time; + + /* How much time to allow to run after the first fault is observed. + * Then preempt afterwards. (in micro seconds) */ + u32 fault_time; + + u32 policy_flags; + u32 reserved[2]; +} __packed; + +struct guc_policies { + struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS]; + + /* In micro seconds. How much time to allow before DPC processing is + * called back via interrupt (to prevent DPC queue drain starving). + * Typically 1000s of micro seconds (example only, not granularity). */ + u32 dpc_promote_time; + + /* Must be set to take these new values. */ + u32 is_valid; + + /* Max number of WIs to process per call. A large value may keep CS + * idle. */ + u32 max_num_work_items; + + u32 reserved[19]; +} __packed; + +/* GuC MMIO reg state struct */ + +#define GUC_REGSET_FLAGS_NONE 0x0 +#define GUC_REGSET_POWERCYCLE 0x1 +#define GUC_REGSET_MASKED 0x2 +#define GUC_REGSET_ENGINERESET 0x4 +#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8 +#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10 + +#define GUC_REGSET_MAX_REGISTERS 25 +#define GUC_MMIO_WHITE_LIST_START 0x24d0 +#define GUC_MMIO_WHITE_LIST_MAX 12 +#define GUC_S3_SAVE_SPACE_PAGES 10 + +struct guc_mmio_regset { + struct __packed { + u32 offset; + u32 value; + u32 flags; + } registers[GUC_REGSET_MAX_REGISTERS]; + + u32 values_valid; + u32 number_of_registers; +} __packed; + +struct guc_mmio_reg_state { + struct guc_mmio_regset global_reg; + struct guc_mmio_regset engine_reg[I915_NUM_RINGS]; + + /* MMIO registers that are set as non privileged */ + struct __packed { + u32 mmio_start; + u32 offsets[GUC_MMIO_WHITE_LIST_MAX]; + u32 count; + } mmio_white_list[I915_NUM_RINGS]; +} __packed; + +/* GuC Additional Data Struct */ + +struct guc_ads { + u32 reg_state_addr; + u32 reg_state_buffer; + u32 golden_context_lrca; + u32 scheduler_policies; + u32 reserved0[3]; + u32 eng_state_size[I915_NUM_RINGS]; + u32 reserved2[4]; +} __packed; + /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ enum host2guc_action { HOST2GUC_ACTION_DEFAULT = 0x0, diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 550921f2ef7d..3accd914490f 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -165,6 +165,13 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv) i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } + if (guc->ads_obj) { + u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj) + >> PAGE_SHIFT; + params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; + params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; + } + /* If GuC submission is enabled, set up additional parameters here */ if (i915.enable_guc_submission) { u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj); @@ -438,6 +445,7 @@ fail: direct_interrupts_to_host(dev_priv); i915_guc_submission_disable(dev); + i915_guc_submission_fini(dev); return err; } @@ -554,10 +562,12 @@ fail: DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n", guc_fw->guc_fw_path, err); + mutex_lock(&dev->struct_mutex); obj = guc_fw->guc_fw_obj; if (obj) drm_gem_object_unreference(&obj->base); guc_fw->guc_fw_obj = NULL; + mutex_unlock(&dev->struct_mutex); release_firmware(fw); /* OK even if fw is NULL */ guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; @@ -624,10 +634,11 @@ void intel_guc_ucode_fini(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; + mutex_lock(&dev->struct_mutex); direct_interrupts_to_host(dev_priv); + i915_guc_submission_disable(dev); i915_guc_submission_fini(dev); - mutex_lock(&dev->struct_mutex); if (guc_fw->guc_fw_obj) drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); guc_fw->guc_fw_obj = NULL; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 62141751c2f0..8698a643d027 100644..100755 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1392,7 +1392,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - for (try = 0; !live_status && try < 4; try++) { + for (try = 0; !live_status && try < 9; try++) { if (try) msleep(10); live_status = intel_digital_port_connected(dev_priv, @@ -2033,6 +2033,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_dig_port->port; uint8_t alternate_ddc_pin; + if (WARN(intel_dig_port->max_lanes < 4, + "Not enough lanes (%d) for HDMI on port %c\n", + intel_dig_port->max_lanes, port_name(port))) + return; + drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); @@ -2218,6 +2223,7 @@ void intel_hdmi_init(struct drm_device *dev, dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->hdmi.hdmi_reg = hdmi_reg; intel_dig_port->dp.output_reg = INVALID_MMIO_REG; + intel_dig_port->max_lanes = 4; intel_hdmi_init_connector(intel_dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3aa614731d7e..73d4347429df 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -263,65 +263,92 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists return 0; } +static void +logical_ring_init_platform_invariants(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + + ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && + (ring->id == VCS || ring->id == VCS2); + + ring->ctx_desc_template = GEN8_CTX_VALID; + ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << + GEN8_CTX_ADDRESSING_MODE_SHIFT; + if (IS_GEN8(dev)) + ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; + ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE; + + /* TODO: WaDisableLiteRestore when we start using semaphore + * signalling between Command Streamers */ + /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */ + + /* WaEnableForceRestoreInCtxtDescForVCS:skl */ + /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ + if (ring->disable_lite_restore_wa) + ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; +} + /** - * intel_execlists_ctx_id() - get the Execlists Context ID - * @ctx_obj: Logical Ring Context backing object. + * intel_lr_context_descriptor_update() - calculate & cache the descriptor + * descriptor for a pinned context * - * Do not confuse with ctx->id! Unfortunately we have a name overload - * here: the old context ID we pass to userspace as a handler so that - * they can refer to a context, and the new context ID we pass to the - * ELSP so that the GPU can inform us of the context status via - * interrupts. + * @ctx: Context to work on + * @ring: Engine the descriptor will be used with * - * Return: 20-bits globally unique context ID. + * The context descriptor encodes various attributes of a context, + * including its GTT address and some flags. Because it's fairly + * expensive to calculate, we'll just do it once and cache the result, + * which remains valid until the context is unpinned. + * + * This is what a descriptor looks like, from LSB to MSB: + * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) + * bits 12-31: LRCA, GTT address of (the HWSP of) this context + * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) + * bits 52-63: reserved, may encode the engine ID (for GuC) */ -u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) +static void +intel_lr_context_descriptor_update(struct intel_context *ctx, + struct intel_engine_cs *ring) { - u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) + - LRC_PPHWSP_PN * PAGE_SIZE; + uint64_t lrca, desc; - /* LRCA is required to be 4K aligned so the more significant 20 bits - * are globally unique */ - return lrca >> 12; -} + lrca = ctx->engine[ring->id].lrc_vma->node.start + + LRC_PPHWSP_PN * PAGE_SIZE; -static bool disable_lite_restore_wa(struct intel_engine_cs *ring) -{ - struct drm_device *dev = ring->dev; + desc = ring->ctx_desc_template; /* bits 0-11 */ + desc |= lrca; /* bits 12-31 */ + desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ - return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && - (ring->id == VCS || ring->id == VCS2); + ctx->engine[ring->id].lrc_desc = desc; } uint64_t intel_lr_context_descriptor(struct intel_context *ctx, struct intel_engine_cs *ring) { - struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; - uint64_t desc; - uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + - LRC_PPHWSP_PN * PAGE_SIZE; - - WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); - - desc = GEN8_CTX_VALID; - desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(ctx_obj->base.dev)) - desc |= GEN8_CTX_L3LLC_COHERENT; - desc |= GEN8_CTX_PRIVILEGE; - desc |= lrca; - desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT; - - /* TODO: WaDisableLiteRestore when we start using semaphore - * signalling between Command Streamers */ - /* desc |= GEN8_CTX_FORCE_RESTORE; */ - - /* WaEnableForceRestoreInCtxtDescForVCS:skl */ - /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ - if (disable_lite_restore_wa(ring)) - desc |= GEN8_CTX_FORCE_RESTORE; + return ctx->engine[ring->id].lrc_desc; +} - return desc; +/** + * intel_execlists_ctx_id() - get the Execlists Context ID + * @ctx: Context to get the ID for + * @ring: Engine to get the ID for + * + * Do not confuse with ctx->id! Unfortunately we have a name overload + * here: the old context ID we pass to userspace as a handler so that + * they can refer to a context, and the new context ID we pass to the + * ELSP so that the GPU can inform us of the context status via + * interrupts. + * + * The context ID is a portion of the context descriptor, so we can + * just extract the required part from the cached descriptor. + * + * Return: 20-bits globally unique context ID. + */ +u32 intel_execlists_ctx_id(struct intel_context *ctx, + struct intel_engine_cs *ring) +{ + return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT; } static void execlists_elsp_write(struct drm_i915_gem_request *rq0, @@ -363,20 +390,10 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) { struct intel_engine_cs *ring = rq->ring; struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; - struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; - struct page *page; - uint32_t *reg_state; - - BUG_ON(!ctx_obj); - WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); - WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - - page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - reg_state = kmap_atomic(page); + uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state; reg_state[CTX_RING_TAIL+1] = rq->tail; - reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); + reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start; if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { /* True 32b PPGTT with dynamic page allocation: update PDP @@ -390,8 +407,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) ASSIGN_CTX_PDP(ppgtt, reg_state, 0); } - kunmap_atomic(reg_state); - return 0; } @@ -431,9 +446,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) /* Same ctx: ignore first request, as second request * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; - list_del(&req0->execlist_link); - list_add_tail(&req0->execlist_link, - &ring->execlist_retired_req_list); + list_move_tail(&req0->execlist_link, + &ring->execlist_retired_req_list); req0 = cursor; } else { req1 = cursor; @@ -478,16 +492,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring, execlist_link); if (head_req != NULL) { - struct drm_i915_gem_object *ctx_obj = - head_req->ctx->engine[ring->id].state; - if (intel_execlists_ctx_id(ctx_obj) == request_id) { + if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) { WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); if (--head_req->elsp_submitted <= 0) { - list_del(&head_req->execlist_link); - list_add_tail(&head_req->execlist_link, - &ring->execlist_retired_req_list); + list_move_tail(&head_req->execlist_link, + &ring->execlist_retired_req_list); return true; } } @@ -496,6 +507,19 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring, return false; } +static void get_context_status(struct intel_engine_cs *ring, + u8 read_pointer, + u32 *status, u32 *context_id) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES)) + return; + + *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer)); + *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer)); +} + /** * intel_lrc_irq_handler() - handle Context Switch interrupts * @ring: Engine Command Streamer to handle. @@ -516,16 +540,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & GEN8_CSB_PTR_MASK; + write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { - read_pointer++; - status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES)); - status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES)); + + get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES, + &status, &status_id); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -538,14 +562,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) WARN(1, "Preemption without Lite Restore\n"); } - if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) || - (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) { + if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) || + (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) { if (execlists_check_remove_request(ring, status_id)) submit_contexts++; } } - if (disable_lite_restore_wa(ring)) { + if (ring->disable_lite_restore_wa) { /* Prevent a ctx to preempt itself */ if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && (submit_contexts != 0)) @@ -556,13 +580,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); - WARN(submit_contexts > 2, "More than two context complete events?\n"); + if (unlikely(submit_contexts > 2)) + DRM_ERROR("More than two context complete events?\n"); + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; + /* Update the read pointer to the old write pointer. Manual ringbuffer + * management ftw </sarcasm> */ I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, - ((u32)ring->next_context_status_buffer & - GEN8_CSB_PTR_MASK) << 8)); + _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, + ring->next_context_status_buffer << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -571,7 +598,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) struct drm_i915_gem_request *cursor; int num_elements = 0; - if (request->ctx != ring->default_context) + if (request->ctx != request->i915->kernel_context) intel_lr_context_pin(request); i915_gem_request_reference(request); @@ -592,9 +619,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) if (request->ctx == tail_req->ctx) { WARN(tail_req->elsp_submitted != 0, "More than 2 already-submitted reqs queued\n"); - list_del(&tail_req->execlist_link); - list_add_tail(&tail_req->execlist_link, - &ring->execlist_retired_req_list); + list_move_tail(&tail_req->execlist_link, + &ring->execlist_retired_req_list); } } @@ -660,17 +686,27 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - int ret; + int ret = 0; request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; - if (request->ctx != request->ring->default_context) { - ret = intel_lr_context_pin(request); + if (i915.enable_guc_submission) { + /* + * Check that the GuC has space for the request before + * going any further, as the i915_add_request() call + * later on mustn't fail ... + */ + struct intel_guc *guc = &request->i915->guc; + + ret = i915_guc_wq_check_space(guc->execbuf_client); if (ret) return ret; } - return 0; + if (request->ctx != request->i915->kernel_context) + ret = intel_lr_context_pin(request); + + return ret; } static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, @@ -724,23 +760,34 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, * on a queue waiting for the ELSP to be ready to accept a new context submission. At that * point, the tail *inside* the context is updated and the ELSP written to. */ -static void +static int intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { - struct intel_engine_cs *ring = request->ring; + struct intel_ringbuffer *ringbuf = request->ringbuf; struct drm_i915_private *dev_priv = request->i915; - intel_logical_ring_advance(request->ringbuf); + intel_logical_ring_advance(ringbuf); + request->tail = ringbuf->tail; - request->tail = request->ringbuf->tail; + /* + * Here we add two extra NOOPs as padding to avoid + * lite restore of a context with HEAD==TAIL. + * + * Caller must reserve WA_TAIL_DWORDS for us! + */ + intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_logical_ring_advance(ringbuf); - if (intel_ring_stopped(ring)) - return; + if (intel_ring_stopped(request->ring)) + return 0; if (dev_priv->guc.execbuf_client) i915_guc_submit(dev_priv->guc.execbuf_client, request); else execlists_context_queue(request); + + return 0; } static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) @@ -967,7 +1014,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; - if (ctx_obj && (ctx != ring->default_context)) + if (ctx_obj && (ctx != req->i915->kernel_context)) intel_lr_context_unpin(req); list_del(&req->execlist_link); i915_gem_request_unreference(req); @@ -1013,23 +1060,35 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) } static int intel_lr_context_do_pin(struct intel_engine_cs *ring, - struct drm_i915_gem_object *ctx_obj, - struct intel_ringbuffer *ringbuf) + struct intel_context *ctx) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; + struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; + struct page *lrc_state_page; + int ret; WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) return ret; + lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); + if (WARN_ON(!lrc_state_page)) { + ret = -ENODEV; + goto unpin_ctx_obj; + } + ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); if (ret) goto unpin_ctx_obj; + ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); + intel_lr_context_descriptor_update(ctx, ring); + ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page); ctx_obj->dirty = true; /* Invalidate GuC TLB. */ @@ -1048,11 +1107,9 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq) { int ret = 0; struct intel_engine_cs *ring = rq->ring; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; - struct intel_ringbuffer *ringbuf = rq->ringbuf; if (rq->ctx->engine[ring->id].pin_count++ == 0) { - ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf); + ret = intel_lr_context_do_pin(ring, rq->ctx); if (ret) goto reset_pin_count; } @@ -1069,12 +1126,18 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq) struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; struct intel_ringbuffer *ringbuf = rq->ringbuf; - if (ctx_obj) { - WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); - if (--rq->ctx->engine[ring->id].pin_count == 0) { - intel_unpin_ringbuffer_obj(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - } + WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + + if (!ctx_obj) + return; + + if (--rq->ctx->engine[ring->id].pin_count == 0) { + kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state)); + intel_unpin_ringbuffer_obj(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); + rq->ctx->engine[ring->id].lrc_vma = NULL; + rq->ctx->engine[ring->id].lrc_desc = 0; + rq->ctx->engine[ring->id].lrc_reg_state = NULL; } } @@ -1087,7 +1150,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) struct drm_i915_private *dev_priv = dev->dev_private; struct i915_workarounds *w = &dev_priv->workarounds; - if (WARN_ON_ONCE(w->count == 0)) + if (w->count == 0) return 0; ring->gpu_caches_dirty = true; @@ -1474,7 +1537,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) u8 next_context_status_buffer_hw; lrc_setup_hardware_status_page(ring, - ring->default_context->engine[ring->id].state); + dev_priv->kernel_context->engine[ring->id].state); I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1493,9 +1556,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | * BDW | CSB regs not reset | CSB regs reset | * CHT | CSB regs not reset | CSB regs not reset | + * SKL | ? | ? | + * BXT | ? | ? | */ - next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) - & GEN8_CSB_PTR_MASK); + next_context_status_buffer_hw = + GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring))); /* * When the CSB registers are reset (also after power-up / gpu reset), @@ -1698,7 +1763,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *ring = ringbuf->ring; u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; - bool vf_flush_wa; + bool vf_flush_wa = false; u32 flags = 0; int ret; @@ -1707,6 +1772,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } @@ -1719,14 +1785,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; - } - /* - * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe - * control. - */ - vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 && - flags & PIPE_CONTROL_VF_CACHE_INVALIDATE; + /* + * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL + * pipe control. + */ + if (IS_GEN9(ring->dev)) + vf_flush_wa = true; + } ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); if (ret) @@ -1790,44 +1856,65 @@ static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) intel_flush_status_page(ring, I915_GEM_HWS_INDEX); } +/* + * Reserve space for 2 NOOPs at the end of each request to be + * used as a workaround for not being allowed to do lite + * restore with HEAD==TAIL (WaIdleLiteRestore). + */ +#define WA_TAIL_DWORDS 2 + +static inline u32 hws_seqno_address(struct intel_engine_cs *engine) +{ + return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; +} + static int gen8_emit_request(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; - u32 cmd; int ret; - /* - * Reserve space for 2 NOOPs at the end of each request to be - * used as a workaround for not being allowed to do lite - * restore with HEAD==TAIL (WaIdleLiteRestore). - */ - ret = intel_logical_ring_begin(request, 8); + ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); if (ret) return ret; - cmd = MI_STORE_DWORD_IMM_GEN4; - cmd |= MI_GLOBAL_GTT; + /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ + BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); - intel_logical_ring_emit(ringbuf, cmd); intel_logical_ring_emit(ringbuf, - (ring->status_page.gfx_addr + - (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); + (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); + intel_logical_ring_emit(ringbuf, + hws_seqno_address(request->ring) | + MI_FLUSH_DW_USE_GTT); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance_and_submit(request); + return intel_logical_ring_advance_and_submit(request); +} - /* - * Here we add two extra NOOPs as padding to avoid - * lite restore of a context with HEAD==TAIL. - */ - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); +static int gen8_emit_request_render(struct drm_i915_gem_request *request) +{ + struct intel_ringbuffer *ringbuf = request->ringbuf; + int ret; - return 0; + ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); + if (ret) + return ret; + + /* w/a for post sync ops following a GPGPU operation we + * need a prior CS_STALL, which is emitted by the flush + * following the batch. + */ + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); + intel_logical_ring_emit(ringbuf, + (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); + return intel_logical_ring_advance_and_submit(request); } static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) @@ -1910,12 +1997,44 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) ring->status_page.obj = NULL; } + ring->disable_lite_restore_wa = false; + ring->ctx_desc_template = 0; + lrc_destroy_wa_ctx_obj(ring); ring->dev = NULL; } -static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) +static void +logical_ring_default_vfuncs(struct drm_device *dev, + struct intel_engine_cs *ring) { + /* Default vfuncs which can be overriden by each engine. */ + ring->init_hw = gen8_init_common_ring; + ring->emit_request = gen8_emit_request; + ring->emit_flush = gen8_emit_flush; + ring->irq_get = gen8_logical_ring_get_irq; + ring->irq_put = gen8_logical_ring_put_irq; + ring->emit_bb_start = gen8_emit_bb_start; + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + ring->get_seqno = bxt_a_get_seqno; + ring->set_seqno = bxt_a_set_seqno; + } else { + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + } +} + +static inline void +logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift) +{ + ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; + ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; +} + +static int +logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) +{ + struct intel_context *dctx = to_i915(dev)->kernel_context; int ret; /* Intentionally left blank. */ @@ -1932,19 +2051,18 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin INIT_LIST_HEAD(&ring->execlist_retired_req_list); spin_lock_init(&ring->execlist_lock); + logical_ring_init_platform_invariants(ring); + ret = i915_cmd_parser_init_ring(ring); if (ret) goto error; - ret = intel_lr_context_deferred_alloc(ring->default_context, ring); + ret = intel_lr_context_deferred_alloc(dctx, ring); if (ret) goto error; /* As this is the default context, always pin it */ - ret = intel_lr_context_do_pin( - ring, - ring->default_context->engine[ring->id].state, - ring->default_context->engine[ring->id].ringbuf); + ret = intel_lr_context_do_pin(ring, dctx); if (ret) { DRM_ERROR( "Failed to pin and map ringbuffer %s: %d\n", @@ -1967,32 +2085,24 @@ static int logical_render_ring_init(struct drm_device *dev) ring->name = "render ring"; ring->id = RCS; + ring->exec_id = I915_EXEC_RENDER; ring->mmio_base = RENDER_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT; + + logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT); if (HAS_L3_DPF(dev)) ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + logical_ring_default_vfuncs(dev, ring); + + /* Override some for render ring. */ if (INTEL_INFO(dev)->gen >= 9) ring->init_hw = gen9_init_render_ring; else ring->init_hw = gen8_init_render_ring; ring->init_context = gen8_init_rcs_context; ring->cleanup = intel_fini_pipe_control; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; ring->emit_flush = gen8_emit_flush_render; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + ring->emit_request = gen8_emit_request_render; ring->dev = dev; @@ -2026,25 +2136,11 @@ static int logical_bsd_ring_init(struct drm_device *dev) ring->name = "bsd ring"; ring->id = VCS; + ring->exec_id = I915_EXEC_BSD; ring->mmio_base = GEN6_BSD_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2054,22 +2150,13 @@ static int logical_bsd2_ring_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; - ring->name = "bds2 ring"; + ring->name = "bsd2 ring"; ring->id = VCS2; + ring->exec_id = I915_EXEC_BSD; ring->mmio_base = GEN8_BSD2_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2081,25 +2168,11 @@ static int logical_blt_ring_init(struct drm_device *dev) ring->name = "blitter ring"; ring->id = BCS; + ring->exec_id = I915_EXEC_BLT; ring->mmio_base = BLT_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2111,25 +2184,11 @@ static int logical_vebox_ring_init(struct drm_device *dev) ring->name = "video enhancement ring"; ring->id = VECS; + ring->exec_id = I915_EXEC_VEBOX; ring->mmio_base = VEBOX_RING_BASE; - ring->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - ring->irq_keep_mask = - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - ring->init_hw = gen8_init_common_ring; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; - } - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, ring); return logical_ring_init(dev, ring); } @@ -2367,26 +2426,39 @@ void intel_lr_context_free(struct intel_context *ctx) { int i; - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = I915_NUM_RINGS; --i >= 0; ) { + struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; - if (ctx_obj) { - struct intel_ringbuffer *ringbuf = - ctx->engine[i].ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; + if (!ctx_obj) + continue; - if (ctx == ring->default_context) { - intel_unpin_ringbuffer_obj(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - } - WARN_ON(ctx->engine[ring->id].pin_count); - intel_ringbuffer_free(ringbuf); - drm_gem_object_unreference(&ctx_obj->base); + if (ctx == ctx->i915->kernel_context) { + intel_unpin_ringbuffer_obj(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); } + + WARN_ON(ctx->engine[i].pin_count); + intel_ringbuffer_free(ringbuf); + drm_gem_object_unreference(&ctx_obj->base); } } -static uint32_t get_lr_context_size(struct intel_engine_cs *ring) +/** + * intel_lr_context_size() - return the size of the context for an engine + * @ring: which engine to find the context size for + * + * Each engine may require a different amount of space for a context image, + * so when allocating (or copying) an image, this function can be used to + * find the right size for the specific engine. + * + * Return: size (in bytes) of an engine-specific context image + * + * Note: this size includes the HWSP, which is part of the context image + * in LRC mode, but does not include the "shared data page" used with + * GuC submission. The caller should account for this if using the GuC. + */ +uint32_t intel_lr_context_size(struct intel_engine_cs *ring) { int ret = 0; @@ -2443,7 +2515,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, */ int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_gem_object *ctx_obj; @@ -2454,7 +2526,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); WARN_ON(ctx->engine[ring->id].state); - context_size = round_up(get_lr_context_size(ring), 4096); + context_size = round_up(intel_lr_context_size(ring), 4096); /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; @@ -2480,14 +2552,13 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ctx->engine[ring->id].ringbuf = ringbuf; ctx->engine[ring->id].state = ctx_obj; - if (ctx != ring->default_context && ring->init_context) { + if (ctx != ctx->i915->kernel_context && ring->init_context) { struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, - ctx, &req); - if (ret) { - DRM_ERROR("ring create req: %d\n", - ret); + req = i915_gem_request_alloc(ring, ctx); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + DRM_ERROR("ring create req: %d\n", ret); goto error_ringbuf; } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 0b821b91723a..49af638f6213 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,8 +25,6 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 -#define GEN8_CSB_ENTRIES 6 -#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230) @@ -40,6 +38,22 @@ #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) +/* The docs specify that the write pointer wraps around after 5h, "After status + * is written out to the last available status QW at offset 5h, this pointer + * wraps to 0." + * + * Therefore, one must infer than even though there are 3 bits available, 6 and + * 7 appear to be * reserved. + */ +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x7 +#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8) +#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0) +#define GEN8_CSB_WRITE_PTR(csb_status) \ + (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0) +#define GEN8_CSB_READ_PTR(csb_status) \ + (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) + /* Logical Rings */ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); @@ -84,6 +98,7 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) void intel_lr_context_free(struct intel_context *ctx); +uint32_t intel_lr_context_size(struct intel_engine_cs *ring); int intel_lr_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *ring); void intel_lr_context_unpin(struct drm_i915_gem_request *req); @@ -92,13 +107,15 @@ void intel_lr_context_reset(struct drm_device *dev, uint64_t intel_lr_context_descriptor(struct intel_context *ctx, struct intel_engine_cs *ring); +u32 intel_execlists_ctx_id(struct intel_context *ctx, + struct intel_engine_cs *ring); + /* Execlists */ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); struct i915_execbuffer_params; int intel_execlists_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); -u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); void intel_lrc_irq_handler(struct intel_engine_cs *ring); void intel_execlists_retire_requests(struct intel_engine_cs *ring); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 76f1980a7541..9168413fe204 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay) WARN_ON(overlay->active); WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 4); if (ret) { @@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 2); if (ret) { @@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay) * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 6); if (ret) { @@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) /* synchronous slowpath */ struct drm_i915_gem_request *req; - ret = i915_gem_request_alloc(ring, ring->default_context, &req); - if (ret) - return ret; + req = i915_gem_request_alloc(ring, NULL); + if (IS_ERR(req)) + return PTR_ERR(req); ret = intel_ring_begin(req, 2); if (ret) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index eb5fa05cf476..20bf854eae8c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -32,6 +32,8 @@ #include <linux/module.h> /** + * DOC: RC6 + * * RC6 is a special power stage which allows the GPU to enter an very * low-voltage mode when idle, using down to 0V while at this stage. This * stage is entered automatically when the GPU is idle when RC6 support is @@ -1672,6 +1674,9 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) if (pipe_h < pfit_h) pipe_h = pfit_h; + if (WARN_ON(!pfit_w || !pfit_h)) + return pixel_rate; + pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, pfit_w * pfit_h); } @@ -1703,6 +1708,8 @@ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, if (WARN(latency == 0, "Latency value missing\n")) return UINT_MAX; + if (WARN_ON(!pipe_htotal)) + return UINT_MAX; ret = (latency * pixel_rate) / (pipe_htotal * 10000); ret = (ret + 1) * horiz_pixels * bytes_per_pixel; @@ -1713,6 +1720,17 @@ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, uint8_t bytes_per_pixel) { + /* + * Neither of these should be possible since this function shouldn't be + * called if the CRTC is off or the plane is invisible. But let's be + * extra paranoid to avoid a potential divide-by-zero if we screw up + * elsewhere in the driver. + */ + if (WARN_ON(!bytes_per_pixel)) + return 0; + if (WARN_ON(!horiz_pixels)) + return 0; + return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; } @@ -1998,14 +2016,19 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } static uint32_t -hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) +hsw_compute_linetime_wm(struct drm_device *dev, + struct intel_crtc_state *cstate) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = + &cstate->base.adjusted_mode; u32 linetime, ips_linetime; - if (!intel_crtc->active) + if (!cstate->base.active) + return 0; + if (WARN_ON(adjusted_mode->crtc_clock == 0)) + return 0; + if (WARN_ON(dev_priv->cdclk_freq == 0)) return 0; /* The WM are computed with base on how long it takes to fill a single @@ -2277,6 +2300,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc, return PTR_ERR(cstate); pipe_wm = &cstate->wm.optimal.ilk; + memset(pipe_wm, 0, sizeof(*pipe_wm)); for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { ps = drm_atomic_get_plane_state(state, @@ -2313,8 +2337,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc, pristate, sprstate, curstate, &pipe_wm->wm[0]); if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - pipe_wm->linetime = hsw_compute_linetime_wm(dev, - &intel_crtc->base); + pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); /* LP0 watermarks always use 1/2 DDB partitioning */ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); @@ -3597,23 +3620,45 @@ static void skl_update_wm(struct drm_crtc *crtc) dev_priv->wm.skl_hw = *results; } -static void ilk_program_watermarks(struct drm_i915_private *dev_priv) +static void ilk_compute_wm_config(struct drm_device *dev, + struct intel_wm_config *config) { - struct drm_device *dev = dev_priv->dev; + struct intel_crtc *crtc; + + /* Compute the currently _active_ config */ + for_each_intel_crtc(dev, crtc) { + const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; + + if (!wm->pipe_enabled) + continue; + + config->sprites_enabled |= wm->sprites_enabled; + config->sprites_scaled |= wm->sprites_scaled; + config->num_pipes_active++; + } +} + +static void ilk_program_watermarks(struct intel_crtc_state *cstate) +{ + struct drm_crtc *crtc = cstate->base.crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; - struct intel_wm_config *config = &dev_priv->wm.config; + struct intel_wm_config config = {}; struct ilk_wm_values results = {}; enum intel_ddb_partitioning partitioning; - ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max); - ilk_wm_merge(dev, config, &max, &lp_wm_1_2); + ilk_compute_wm_config(dev, &config); + + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); /* 5/6 split only in single pipe config on IVB+ */ if (INTEL_INFO(dev)->gen >= 7 && - config->num_pipes_active == 1 && config->sprites_enabled) { - ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max); - ilk_wm_merge(dev, config, &max, &lp_wm_5_6); + config.num_pipes_active == 1 && config.sprites_enabled) { + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); } else { @@ -3630,7 +3675,6 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) static void ilk_update_wm(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); @@ -3650,7 +3694,7 @@ static void ilk_update_wm(struct drm_crtc *crtc) intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; - ilk_program_watermarks(dev_priv); + ilk_program_watermarks(cstate); } static void skl_pipe_wm_active_state(uint32_t val, @@ -4036,7 +4080,7 @@ void intel_update_watermarks(struct drm_crtc *crtc) dev_priv->display.update_wm(crtc); } -/** +/* * Lock protecting IPS related data structures */ DEFINE_SPINLOCK(mchdev_lock); @@ -4509,13 +4553,13 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) } if (HAS_RC6p(dev)) DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + onoff(mode & GEN6_RC_CTL_RC6_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), + onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); else DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); + onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); } static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) @@ -4693,8 +4737,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* 3a: Enable RC6 */ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? - "on" : "off"); + DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); /* WaRsUseTimeoutMode */ if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { @@ -4713,8 +4756,7 @@ static void gen9_enable_rc6(struct drm_device *dev) * 3b: Enable Coarse Power Gating only when RC6 is enabled. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ - if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) + if (NEEDS_WaRsDisableCoarsePowerGating(dev)) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? @@ -6981,6 +7023,7 @@ void intel_init_pm(struct drm_device *dev) dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { dev_priv->display.update_wm = ilk_update_wm; dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; + dev_priv->display.program_watermarks = ilk_program_watermarks; } else { DRM_DEBUG_KMS("Failed to read display plane latency. " "Disable CxSR\n"); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 339701d7a9a5..9030e2bca0c0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { @@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { @@ -1865,15 +1867,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, offset = cs_offset; } - ret = intel_ring_begin(req, 4); + ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; @@ -1899,6 +1899,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, return 0; } +static void cleanup_phys_status_page(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = to_i915(ring->dev); + + if (!dev_priv->status_page_dmah) + return; + + drm_pci_free(ring->dev, dev_priv->status_page_dmah); + ring->status_page.page_addr = NULL; +} + static void cleanup_status_page(struct intel_engine_cs *ring) { struct drm_i915_gem_object *obj; @@ -1915,9 +1926,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring) static int init_status_page(struct intel_engine_cs *ring) { - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = ring->status_page.obj; - if ((obj = ring->status_page.obj) == NULL) { + if (obj == NULL) { unsigned flags; int ret; @@ -1988,6 +1999,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) else iounmap(ringbuf->virtual_start); ringbuf->virtual_start = NULL; + ringbuf->vma = NULL; i915_gem_object_ggtt_unpin(ringbuf->obj); } @@ -2054,6 +2066,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, } } + ringbuf->vma = i915_gem_obj_to_ggtt(obj); + return 0; } @@ -2162,7 +2176,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto error; } else { - BUG_ON(ring->id != RCS); + WARN_ON(ring->id != RCS); ret = init_phys_status_page(ring); if (ret) goto error; @@ -2208,7 +2222,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) if (ring->cleanup) ring->cleanup(ring); - cleanup_status_page(ring); + if (I915_NEED_GFX_HWS(ring->dev)) { + cleanup_status_page(ring); + } else { + WARN_ON(ring->id != RCS); + cleanup_phys_status_page(ring); + } i915_cmd_parser_fini_ring(ring); i915_gem_batch_pool_fini(&ring->batch_pool); @@ -2664,6 +2683,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->name = "render ring"; ring->id = RCS; + ring->exec_id = I915_EXEC_RENDER; ring->mmio_base = RENDER_RING_BASE; if (INTEL_INFO(dev)->gen >= 8) { @@ -2812,6 +2832,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->name = "bsd ring"; ring->id = VCS; + ring->exec_id = I915_EXEC_BSD; ring->write_tail = ring_write_tail; if (INTEL_INFO(dev)->gen >= 6) { @@ -2888,6 +2909,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) ring->name = "bsd2 ring"; ring->id = VCS2; + ring->exec_id = I915_EXEC_BSD; ring->write_tail = ring_write_tail; ring->mmio_base = GEN8_BSD2_RING_BASE; @@ -2918,6 +2940,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ring->name = "blitter ring"; ring->id = BCS; + ring->exec_id = I915_EXEC_BLT; ring->mmio_base = BLT_RING_BASE; ring->write_tail = ring_write_tail; @@ -2975,6 +2998,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->name = "video enhancement ring"; ring->id = VECS; + ring->exec_id = I915_EXEC_VEBOX; ring->mmio_base = VEBOX_RING_BASE; ring->write_tail = ring_write_tail; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 49574ffe54bc..b12f2aabd104 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -93,11 +93,13 @@ struct intel_ring_hangcheck { int score; enum intel_ring_hangcheck_action action; int deadlock; + u32 instdone[I915_NUM_INSTDONE_REG]; }; struct intel_ringbuffer { struct drm_i915_gem_object *obj; void __iomem *virtual_start; + struct i915_vma *vma; struct intel_engine_cs *ring; struct list_head link; @@ -147,14 +149,15 @@ struct i915_ctx_workarounds { struct intel_engine_cs { const char *name; enum intel_ring_id { - RCS = 0x0, - VCS, + RCS = 0, BCS, - VECS, - VCS2 + VCS, + VCS2, /* Keep instances of the same type engine together. */ + VECS } id; #define I915_NUM_RINGS 5 -#define LAST_USER_RING (VECS + 1) +#define _VCS(n) (VCS + (n)) + unsigned int exec_id; u32 mmio_base; struct drm_device *dev; struct intel_ringbuffer *buffer; @@ -268,6 +271,8 @@ struct intel_engine_cs { struct list_head execlist_queue; struct list_head execlist_retired_req_list; u8 next_context_status_buffer; + bool disable_lite_restore_wa; + u32 ctx_desc_template; u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ int (*emit_request)(struct drm_i915_gem_request *request); int (*emit_flush)(struct drm_i915_gem_request *request, @@ -305,7 +310,6 @@ struct intel_engine_cs { wait_queue_head_t irq_queue; - struct intel_context *default_context; struct intel_context *last_context; struct intel_ring_hangcheck hangcheck; @@ -406,7 +410,7 @@ intel_write_status_page(struct intel_engine_cs *ring, ring->status_page.page_addr[reg] = value; } -/** +/* * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or * MI_STORE_DATA_IMM. @@ -423,6 +427,7 @@ intel_write_status_page(struct intel_engine_cs *ring, * The area from dword 0x30 to 0x3ff is available for driver usage. */ #define I915_GEM_HWS_INDEX 0x30 +#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) #define I915_GEM_HWS_SCRATCH_INDEX 0x40 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index ddbdbffe829a..bbca527184d0 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -532,7 +532,8 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, SKL_DISP_PW_2); - WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n"); + WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), + "Platform doesn't support DC5.\n"); WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); @@ -568,7 +569,8 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n"); + WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev), + "Platform doesn't support DC6.\n"); WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, "Backlight is not disabled.\n"); @@ -595,7 +597,8 @@ static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv) { assert_can_disable_dc5(dev_priv); - if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + i915.enable_dc != 0 && i915.enable_dc != 1) assert_can_disable_dc6(dev_priv); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -623,7 +626,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv) static void skl_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { - struct drm_device *dev = dev_priv->dev; uint32_t tmp, fuse_status; uint32_t req_mask, state_mask; bool is_enabled, enable_requested, check_fuse_status = false; @@ -667,17 +669,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, !I915_READ(HSW_PWR_WELL_BIOS), "Invalid for power well status to be enabled, unless done by the BIOS, \ when request is to disable!\n"); - if (power_well->data == SKL_DISP_PW_2) { - /* - * DDI buffer programming unnecessary during - * driver-load/resume as it's already done - * during modeset initialization then. It's - * also invalid here as encoder list is still - * uninitialized. - */ - if (!dev_priv->power_domains.initializing) - intel_prepare_ddi(dev); - } I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); } @@ -783,7 +774,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1) + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + i915.enable_dc != 0 && i915.enable_dc != 1) skl_enable_dc6(dev_priv); else gen9_enable_dc5(dev_priv); @@ -795,7 +787,8 @@ static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, if (power_well->count > 0) { gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } else { - if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && + i915.enable_dc != 0 && i915.enable_dc != 1) gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); else @@ -1851,7 +1844,7 @@ void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv) { struct i915_power_well *well; - if (!IS_SKYLAKE(dev_priv)) + if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) return; well = lookup_power_well(dev_priv, SKL_DISP_PW_1); @@ -1865,7 +1858,7 @@ void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv) { struct i915_power_well *well; - if (!IS_SKYLAKE(dev_priv)) + if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) return; well = lookup_power_well(dev_priv, SKL_DISP_PW_1); diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 2e2d4eb4a00d..db0ed499268a 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -24,8 +24,8 @@ * Eric Anholt <eric@anholt.net> */ -/** - * @file SDVO command definitions and structures. +/* + * SDVO command definitions and structures. */ #define SDVO_OUTPUT_FIRST (0) @@ -66,39 +66,39 @@ struct intel_sdvo_caps { #define DTD_FLAG_VSYNC_POSITIVE (1 << 2) #define DTD_FLAG_INTERLACE (1 << 7) -/** This matches the EDID DTD structure, more or less */ +/* This matches the EDID DTD structure, more or less */ struct intel_sdvo_dtd { struct { - u16 clock; /**< pixel clock, in 10kHz units */ - u8 h_active; /**< lower 8 bits (pixels) */ - u8 h_blank; /**< lower 8 bits (pixels) */ - u8 h_high; /**< upper 4 bits each h_active, h_blank */ - u8 v_active; /**< lower 8 bits (lines) */ - u8 v_blank; /**< lower 8 bits (lines) */ - u8 v_high; /**< upper 4 bits each v_active, v_blank */ + u16 clock; /* pixel clock, in 10kHz units */ + u8 h_active; /* lower 8 bits (pixels) */ + u8 h_blank; /* lower 8 bits (pixels) */ + u8 h_high; /* upper 4 bits each h_active, h_blank */ + u8 v_active; /* lower 8 bits (lines) */ + u8 v_blank; /* lower 8 bits (lines) */ + u8 v_high; /* upper 4 bits each v_active, v_blank */ } part1; struct { - u8 h_sync_off; /**< lower 8 bits, from hblank start */ - u8 h_sync_width; /**< lower 8 bits (pixels) */ - /** lower 4 bits each vsync offset, vsync width */ + u8 h_sync_off; /* lower 8 bits, from hblank start */ + u8 h_sync_width; /* lower 8 bits (pixels) */ + /* lower 4 bits each vsync offset, vsync width */ u8 v_sync_off_width; - /** + /* * 2 high bits of hsync offset, 2 high bits of hsync width, * bits 4-5 of vsync offset, and 2 high bits of vsync width. */ u8 sync_off_width_high; u8 dtd_flags; u8 sdvo_flags; - /** bits 6-7 of vsync offset at bits 6-7 */ + /* bits 6-7 of vsync offset at bits 6-7 */ u8 v_sync_off_high; u8 reserved; } part2; } __packed; struct intel_sdvo_pixel_clock_range { - u16 min; /**< pixel clock, in 10kHz units */ - u16 max; /**< pixel clock, in 10kHz units */ + u16 min; /* pixel clock, in 10kHz units */ + u16 max; /* pixel clock, in 10kHz units */ } __packed; struct intel_sdvo_preferred_input_timing_args { @@ -144,7 +144,7 @@ struct intel_sdvo_preferred_input_timing_args { #define SDVO_CMD_RESET 0x01 -/** Returns a struct intel_sdvo_caps */ +/* Returns a struct intel_sdvo_caps */ #define SDVO_CMD_GET_DEVICE_CAPS 0x02 #define SDVO_CMD_GET_FIRMWARE_REV 0x86 @@ -152,7 +152,7 @@ struct intel_sdvo_preferred_input_timing_args { # define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 # define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 -/** +/* * Reports which inputs are trained (managed to sync). * * Devices must have trained within 2 vsyncs of a mode change. @@ -164,10 +164,10 @@ struct intel_sdvo_get_trained_inputs_response { unsigned int pad:6; } __packed; -/** Returns a struct intel_sdvo_output_flags of active outputs. */ +/* Returns a struct intel_sdvo_output_flags of active outputs. */ #define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 -/** +/* * Sets the current set of active outputs. * * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP @@ -175,7 +175,7 @@ struct intel_sdvo_get_trained_inputs_response { */ #define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 -/** +/* * Returns the current mapping of SDVO inputs to outputs on the device. * * Returns two struct intel_sdvo_output_flags structures. @@ -185,29 +185,29 @@ struct intel_sdvo_in_out_map { u16 in0, in1; }; -/** +/* * Sets the current mapping of SDVO inputs to outputs on the device. * * Takes two struct i380_sdvo_output_flags structures. */ #define SDVO_CMD_SET_IN_OUT_MAP 0x07 -/** +/* * Returns a struct intel_sdvo_output_flags of attached displays. */ #define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b -/** +/* * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging. */ #define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c -/** +/* * Takes a struct intel_sdvo_output_flags. */ #define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d -/** +/* * Returns a struct intel_sdvo_output_flags of displays with hot plug * interrupts enabled. */ @@ -221,7 +221,7 @@ struct intel_sdvo_get_interrupt_event_source_response { unsigned int pad:6; } __packed; -/** +/* * Selects which input is affected by future input commands. * * Commands affected include SET_INPUT_TIMINGS_PART[12], @@ -234,7 +234,7 @@ struct intel_sdvo_set_target_input_args { unsigned int pad:7; } __packed; -/** +/* * Takes a struct intel_sdvo_output_flags of which outputs are targeted by * future output commands. * @@ -280,7 +280,7 @@ struct intel_sdvo_set_target_input_args { # define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) # define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 -/** +/* * Generates a DTD based on the given width, height, and flags. * * This will be supported by any device supporting scaling or interlaced @@ -300,24 +300,24 @@ struct intel_sdvo_set_target_input_args { #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c -/** Returns a struct intel_sdvo_pixel_clock_range */ +/* Returns a struct intel_sdvo_pixel_clock_range */ #define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d -/** Returns a struct intel_sdvo_pixel_clock_range */ +/* Returns a struct intel_sdvo_pixel_clock_range */ #define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e -/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ +/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ #define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f -/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ #define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 -/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ +/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ #define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 # define SDVO_CLOCK_RATE_MULT_1X (1 << 0) # define SDVO_CLOCK_RATE_MULT_2X (1 << 1) # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 -/** 6 bytes of bit flags for TV formats shared by all TV format functions */ +/* 6 bytes of bit flags for TV formats shared by all TV format functions */ struct intel_sdvo_tv_format { unsigned int ntsc_m:1; unsigned int ntsc_j:1; @@ -376,7 +376,7 @@ struct intel_sdvo_tv_format { #define SDVO_CMD_SET_TV_FORMAT 0x29 -/** Returns the resolutiosn that can be used with the given TV format */ +/* Returns the resolutiosn that can be used with the given TV format */ #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83 struct intel_sdvo_sdtv_resolution_request { unsigned int ntsc_m:1; @@ -539,7 +539,7 @@ struct intel_sdvo_hdtv_resolution_reply { #define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d #define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e #define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f -/** +/* * The panel power sequencing parameters are in units of milliseconds. * The high fields are bits 8:9 of the 10-bit values. */ diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 4ff7a1f4183e..0875c8e0ec0a 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -178,28 +178,33 @@ void intel_pipe_update_end(struct intel_crtc *crtc) } static void -skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +skl_update_plane(struct drm_plane *drm_plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = drm_plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(drm_plane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; u32 plane_ctl, stride_div, stride; - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(drm_plane->state)->ckey; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr; u32 tile_height, plane_offset, plane_size; unsigned int rotation; int x_offset, y_offset; - struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config; - int scaler_id; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; + const struct intel_scaler *scaler = + &crtc_state->scaler_state.scalers[plane_state->scaler_id]; plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -208,14 +213,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, plane_ctl |= skl_plane_ctl_format(fb->pixel_format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); - rotation = drm_plane->state->rotation; + rotation = plane_state->base.rotation; plane_ctl |= skl_plane_ctl_rotation(rotation); - stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], + stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], fb->pixel_format); - scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id; - /* Sizes are 0 based */ src_w--; src_h--; @@ -236,9 +239,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, surf_addr = intel_plane_obj_offset(intel_plane, obj, 0); if (intel_rotation_90_or_270(rotation)) { + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + /* stride: Surface height in tiles */ - tile_height = intel_tile_height(dev, fb->pixel_format, - fb->modifier[0], 0); + tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); stride = DIV_ROUND_UP(fb->height, tile_height); plane_size = (src_w << 16) | src_h; x_offset = stride * tile_height - y - (src_h + 1); @@ -256,13 +260,13 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, I915_WRITE(PLANE_SIZE(pipe, plane), plane_size); /* program plane scaler */ - if (scaler_id >= 0) { + if (plane_state->scaler_id >= 0) { uint32_t ps_ctrl = 0; + int scaler_id = plane_state->scaler_id; DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, PS_PLANE_SEL(plane)); - ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | - crtc_state->scaler_state.scalers[scaler_id].mode; + ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); @@ -334,24 +338,29 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format) } static void -vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +vlv_update_plane(struct drm_plane *dplane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = dplane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(dplane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; int plane = intel_plane->plane; u32 sprctl; unsigned long sprsurf_offset, linear_offset; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(dplane->state)->ckey; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; sprctl = SP_ENABLE; @@ -414,14 +423,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, crtc_h--; linear_offset = y * fb->pitches[0] + x * pixel_size; - sprsurf_offset = intel_gen4_compute_page_offset(dev_priv, - &x, &y, - obj->tiling_mode, - pixel_size, - fb->pitches[0]); + sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], + pixel_size, + fb->pitches[0]); linear_offset -= sprsurf_offset; - if (dplane->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { sprctl |= SP_ROTATE_180; x += src_w; @@ -474,23 +482,28 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) } static void -ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +ivb_update_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; unsigned long sprsurf_offset, linear_offset; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(plane->state)->ckey; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; sprctl = SPRITE_ENABLE; @@ -544,13 +557,13 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; linear_offset = y * fb->pitches[0] + x * pixel_size; - sprsurf_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, fb->pitches[0]); + sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], + pixel_size, + fb->pitches[0]); linear_offset -= sprsurf_offset; - if (plane->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { sprctl |= SPRITE_ROTATE_180; /* HSW and BDW does this automagically in hardware */ @@ -612,23 +625,28 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) } static void -ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t x, uint32_t y, - uint32_t src_w, uint32_t src_h) +ilk_update_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; unsigned long dvssurf_offset, linear_offset; u32 dvscntr, dvsscale; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - const struct drm_intel_sprite_colorkey *key = - &to_intel_plane_state(plane->state)->ckey; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->dst.x1; + int crtc_y = plane_state->dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->dst); + uint32_t crtc_h = drm_rect_height(&plane_state->dst); + uint32_t x = plane_state->src.x1 >> 16; + uint32_t y = plane_state->src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; dvscntr = DVS_ENABLE; @@ -678,13 +696,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; linear_offset = y * fb->pitches[0] + x * pixel_size; - dvssurf_offset = - intel_gen4_compute_page_offset(dev_priv, - &x, &y, obj->tiling_mode, - pixel_size, fb->pitches[0]); + dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, + fb->modifier[0], + pixel_size, + fb->pitches[0]); linear_offset -= dvssurf_offset; - if (plane->state->rotation == BIT(DRM_ROTATE_180)) { + if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { dvscntr |= DVS_ROTATE_180; x += src_w; @@ -913,30 +931,6 @@ intel_check_sprite_plane(struct drm_plane *plane, return 0; } -static void -intel_commit_sprite_plane(struct drm_plane *plane, - struct intel_plane_state *state) -{ - struct drm_crtc *crtc = state->base.crtc; - struct intel_plane *intel_plane = to_intel_plane(plane); - struct drm_framebuffer *fb = state->base.fb; - - crtc = crtc ? crtc : plane->crtc; - - if (state->visible) { - intel_plane->update_plane(plane, crtc, fb, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), - drm_rect_height(&state->dst), - state->src.x1 >> 16, - state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16); - } else { - intel_plane->disable_plane(plane, crtc); - } -} - int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1118,7 +1112,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) intel_plane->plane = plane; intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane); intel_plane->check_plane = intel_check_sprite_plane; - intel_plane->commit_plane = intel_commit_sprite_plane; possible_crtcs = (1 << pipe); ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, &intel_plane_funcs, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 277e60ae0e47..bfa79e5c214e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -327,13 +327,54 @@ static void intel_uncore_ellc_detect(struct drm_device *dev) } } +static bool +fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +{ + u32 dbg; + + dbg = __raw_i915_read32(dev_priv, FPGA_DBG); + if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) + return false; + + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + + return true; +} + +static bool +vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +{ + u32 cer; + + cer = __raw_i915_read32(dev_priv, CLAIM_ER); + if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) + return false; + + __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); + + return true; +} + +static bool +check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +{ + if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) + return fpga_check_for_unclaimed_mmio(dev_priv); + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + return vlv_check_for_unclaimed_mmio(dev_priv); + + return false; +} + static void __intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_FPGA_DBG_UNCLAIMED(dev)) - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + /* clear out unclaimed reg detection bit */ + if (check_for_unclaimed_mmio(dev_priv)) + DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* clear out old GT FIFO errors */ if (IS_GEN6(dev) || IS_GEN7(dev)) @@ -585,38 +626,38 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) } static void -hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, - i915_reg_t reg, bool read, bool before) +__unclaimed_reg_debug(struct drm_i915_private *dev_priv, + const i915_reg_t reg, + const bool read, + const bool before) { - const char *op = read ? "reading" : "writing to"; - const char *when = before ? "before" : "after"; - - if (!i915.mmio_debug) + /* XXX. We limit the auto arming traces for mmio + * debugs on these platforms. There are just too many + * revealed by these and CI/Bat suffers from the noise. + * Please fix and then re-enable the automatic traces. + */ + if (i915.mmio_debug < 2 && + (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) return; - if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { - WARN(1, "Unclaimed register detected %s %s register 0x%x\n", - when, op, i915_mmio_reg_offset(reg)); - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + if (WARN(check_for_unclaimed_mmio(dev_priv), + "Unclaimed register detected %s %s register 0x%x\n", + before ? "before" : "after", + read ? "reading" : "writing to", + i915_mmio_reg_offset(reg))) i915.mmio_debug--; /* Only report the first N failures */ - } } -static void -hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) +static inline void +unclaimed_reg_debug(struct drm_i915_private *dev_priv, + const i915_reg_t reg, + const bool read, + const bool before) { - static bool mmio_debug_once = true; - - if (i915.mmio_debug || !mmio_debug_once) + if (likely(!i915.mmio_debug)) return; - if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { - DRM_DEBUG("Unclaimed register detected, " - "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - i915.mmio_debug = mmio_debug_once--; - } + __unclaimed_reg_debug(dev_priv, reg, read, before); } #define GEN2_READ_HEADER(x) \ @@ -664,9 +705,11 @@ __gen2_read(64) unsigned long irqflags; \ u##x val = 0; \ assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + unclaimed_reg_debug(dev_priv, reg, true, true) #define GEN6_READ_FOOTER \ + unclaimed_reg_debug(dev_priv, reg, true, false); \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ return val @@ -699,11 +742,9 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv, static u##x \ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ GEN6_READ_HEADER(x); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ if (NEEDS_FORCE_WAKE(offset)) \ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ val = __raw_i915_read##x(dev_priv, reg); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ GEN6_READ_FOOTER; \ } @@ -751,7 +792,6 @@ static u##x \ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ if (!SKL_NEEDS_FORCE_WAKE(offset)) \ fw_engine = 0; \ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ @@ -765,7 +805,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ if (fw_engine) \ __force_wake_get(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ - hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ GEN6_READ_FOOTER; \ } @@ -864,9 +903,11 @@ __gen2_write(64) unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + unclaimed_reg_debug(dev_priv, reg, false, true) #define GEN6_WRITE_FOOTER \ + unclaimed_reg_debug(dev_priv, reg, false, false); \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) #define __gen6_write(x) \ @@ -892,13 +933,10 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t if (NEEDS_FORCE_WAKE(offset)) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ __raw_i915_write##x(dev_priv, reg, val); \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ - hsw_unclaimed_reg_detect(dev_priv); \ GEN6_WRITE_FOOTER; \ } @@ -928,12 +966,9 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, static void \ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ GEN6_WRITE_HEADER; \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \ __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ __raw_i915_write##x(dev_priv, reg, val); \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ - hsw_unclaimed_reg_detect(dev_priv); \ GEN6_WRITE_FOOTER; \ } @@ -987,7 +1022,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ if (!SKL_NEEDS_FORCE_WAKE(offset) || \ is_gen9_shadowed(dev_priv, reg)) \ fw_engine = 0; \ @@ -1002,8 +1036,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ if (fw_engine) \ __force_wake_get(dev_priv, fw_engine); \ __raw_i915_write##x(dev_priv, reg, val); \ - hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ - hsw_unclaimed_reg_detect(dev_priv); \ GEN6_WRITE_FOOTER; \ } @@ -1223,6 +1255,8 @@ void intel_uncore_init(struct drm_device *dev) intel_uncore_fw_domains_init(dev); __intel_uncore_early_sanitize(dev, false); + dev_priv->uncore.unclaimed_mmio_check = 1; + switch (INTEL_INFO(dev)->gen) { default: case 9: @@ -1580,13 +1614,26 @@ bool intel_has_gpu_reset(struct drm_device *dev) return intel_get_gpu_reset(dev) != NULL; } -void intel_uncore_check_errors(struct drm_device *dev) +bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + return check_for_unclaimed_mmio(dev_priv); +} + +bool +intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) +{ + if (unlikely(i915.mmio_debug || + dev_priv->uncore.unclaimed_mmio_check <= 0)) + return false; - if (HAS_FPGA_DBG_UNCLAIMED(dev) && - (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unclaimed register before interrupt\n"); - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { + DRM_DEBUG("Unclaimed register detected, " + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); + i915.mmio_debug++; + dev_priv->uncore.unclaimed_mmio_check--; + return true; } + + return false; } diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 35ca4f007839..a1844b50546c 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -5,7 +5,7 @@ config DRM_IMX select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER - depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS + depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) depends on IMX_IPUV3_CORE help enable i.MX graphics support diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 19c18b7af28a..dc13c4857e6f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1564,7 +1564,7 @@ static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode, int bits_per_pixel) { uint32_t total_area, divisor; - int64_t active_area, pixels_per_second, bandwidth; + uint64_t active_area, pixels_per_second, bandwidth; uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8; divisor = 1024; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c76cc853b08a..3cedb8d5c855 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -18,6 +18,7 @@ #include <linux/spinlock.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> +#include <linux/pfn_t.h> #include "msm_drv.h" #include "msm_gem.h" @@ -222,7 +223,8 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, pfn, pfn << PAGE_SHIFT); - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, + __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index ffa902ece872..05a895496fc6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -156,6 +156,7 @@ nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, return -ENOMEM; nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object); chan->gr = gr; + chan->fifo = fifoch; *pobject = &chan->object; spin_lock_irqsave(&chan->gr->base.engine.lock, flags); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c index 254094ab7fb8..5da2aa8cc333 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c @@ -141,9 +141,8 @@ gk20a_pllg_calc_rate(struct gk20a_clk *clk) rate = clk->parent_rate * clk->n; divider = clk->m * pl_to_div[clk->pl]; - do_div(rate, divider); - return rate / 2; + return rate / divider / 2; } static int diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 6c220cd3497a..336ad4de9981 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -1,9 +1,8 @@ - config DRM_OMAP tristate "OMAP DRM" depends on DRM depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM - depends on OMAP2_DSS + select OMAP2_DSS select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT @@ -14,13 +13,18 @@ config DRM_OMAP help DRM display driver for OMAP2/3/4 based boards. +if DRM_OMAP + config DRM_OMAP_NUM_CRTCS int "Number of CRTCs" range 1 10 default 1 if ARCH_OMAP2 || ARCH_OMAP3 default 2 if ARCH_OMAP4 - depends on DRM_OMAP help Select the number of video overlays which can be used as framebuffers. The remaining overlays are reserved for video. +source "drivers/gpu/drm/omapdrm/dss/Kconfig" +source "drivers/gpu/drm/omapdrm/displays/Kconfig" + +endif diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile index 368c1ec6805a..fe4c2228bc18 100644 --- a/drivers/gpu/drm/omapdrm/Makefile +++ b/drivers/gpu/drm/omapdrm/Makefile @@ -3,6 +3,9 @@ # Direct Rendering Infrastructure (DRI) # +obj-y += dss/ +obj-y += displays/ + ccflags-y := -Iinclude/drm -Werror omapdrm-y := omap_drv.o \ omap_irq.o \ diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig new file mode 100644 index 000000000000..2a618afe0f53 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -0,0 +1,85 @@ +menu "OMAPDRM External Display Device Drivers" + +config DISPLAY_ENCODER_OPA362 + tristate "OPA362 external analog amplifier" + help + Driver for OPA362 external analog TV amplifier controlled + through a GPIO. + +config DISPLAY_ENCODER_TFP410 + tristate "TFP410 DPI to DVI Encoder" + help + Driver for TFP410 DPI to DVI encoder. + +config DISPLAY_ENCODER_TPD12S015 + tristate "TPD12S015 HDMI ESD protection and level shifter" + help + Driver for TPD12S015, which offers HDMI ESD protection and level + shifting. + +config DISPLAY_CONNECTOR_DVI + tristate "DVI Connector" + depends on I2C + help + Driver for a generic DVI connector. + +config DISPLAY_CONNECTOR_HDMI + tristate "HDMI Connector" + help + Driver for a generic HDMI connector. + +config DISPLAY_CONNECTOR_ANALOG_TV + tristate "Analog TV Connector" + help + Driver for a generic analog TV connector. + +config DISPLAY_PANEL_DPI + tristate "Generic DPI panel" + help + Driver for generic DPI panels. + +config DISPLAY_PANEL_DSI_CM + tristate "Generic DSI Command Mode Panel" + depends on BACKLIGHT_CLASS_DEVICE + help + Driver for generic DSI command mode panels. + +config DISPLAY_PANEL_SONY_ACX565AKM + tristate "ACX565AKM Panel" + depends on SPI && BACKLIGHT_CLASS_DEVICE + help + This is the LCD panel used on Nokia N900 + +config DISPLAY_PANEL_LGPHILIPS_LB035Q02 + tristate "LG.Philips LB035Q02 LCD Panel" + depends on SPI + help + LCD Panel used on the Gumstix Overo Palo35 + +config DISPLAY_PANEL_SHARP_LS037V7DW01 + tristate "Sharp LS037V7DW01 LCD Panel" + depends on BACKLIGHT_CLASS_DEVICE + help + LCD Panel used in TI's SDP3430 and EVM boards + +config DISPLAY_PANEL_TPO_TD028TTEC1 + tristate "TPO TD028TTEC1 LCD Panel" + depends on SPI + help + LCD panel used in Openmoko. + +config DISPLAY_PANEL_TPO_TD043MTEA1 + tristate "TPO TD043MTEA1 LCD Panel" + depends on SPI + help + LCD Panel used in OMAP3 Pandora + +config DISPLAY_PANEL_NEC_NL8048HL11 + tristate "NEC NL8048HL11 Panel" + depends on SPI + depends on BACKLIGHT_CLASS_DEVICE + help + This NEC NL8048HL11 panel is TFT LCD used in the + Zoom2/3/3630 sdp boards. + +endmenu diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile new file mode 100644 index 000000000000..9aa176bfbf2e --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/Makefile @@ -0,0 +1,14 @@ +obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o +obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o +obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o +obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o +obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o +obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o +obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o +obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o +obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o +obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o +obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o +obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o +obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o +obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c new file mode 100644 index 000000000000..8511c648a15c --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -0,0 +1,320 @@ +/* + * Analog TV Connector driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct device *dev; + + struct omap_video_timings timings; + + enum omap_dss_venc_type connector_type; + bool invert_polarity; +}; + +static const struct omap_video_timings tvc_pal_timings = { + .x_res = 720, + .y_res = 574, + .pixelclock = 13500000, + .hsw = 64, + .hfp = 12, + .hbp = 68, + .vsw = 5, + .vfp = 5, + .vbp = 41, + + .interlace = true, +}; + +static const struct of_device_id tvc_of_match[]; + +struct tvc_of_data { + enum omap_dss_venc_type connector_type; +}; + +#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) + +static int tvc_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(ddata->dev, "connect\n"); + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.atv->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void tvc_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(ddata->dev, "disconnect\n"); + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.atv->disconnect(in, dssdev); +} + +static int tvc_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(ddata->dev, "enable\n"); + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + in->ops.atv->set_timings(in, &ddata->timings); + + if (!ddata->dev->of_node) { + in->ops.atv->set_type(in, ddata->connector_type); + + in->ops.atv->invert_vid_out_polarity(in, + ddata->invert_polarity); + } + + r = in->ops.atv->enable(in); + if (r) + return r; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return r; +} + +static void tvc_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(ddata->dev, "disable\n"); + + if (!omapdss_device_is_enabled(dssdev)) + return; + + in->ops.atv->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void tvc_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->timings = *timings; + dssdev->panel.timings = *timings; + + in->ops.atv->set_timings(in, timings); +} + +static void tvc_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->timings; +} + +static int tvc_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.atv->check_timings(in, timings); +} + +static u32 tvc_get_wss(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.atv->get_wss(in); +} + +static int tvc_set_wss(struct omap_dss_device *dssdev, u32 wss) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.atv->set_wss(in, wss); +} + +static struct omap_dss_driver tvc_driver = { + .connect = tvc_connect, + .disconnect = tvc_disconnect, + + .enable = tvc_enable, + .disable = tvc_disable, + + .set_timings = tvc_set_timings, + .get_timings = tvc_get_timings, + .check_timings = tvc_check_timings, + + .get_resolution = omapdss_default_get_resolution, + + .get_wss = tvc_get_wss, + .set_wss = tvc_set_wss, +}; + +static int tvc_probe_pdata(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct connector_atv_platform_data *pdata; + struct omap_dss_device *in, *dssdev; + + pdata = dev_get_platdata(&pdev->dev); + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&pdev->dev, "Failed to find video source\n"); + return -EPROBE_DEFER; + } + + ddata->in = in; + + ddata->connector_type = pdata->connector_type; + ddata->invert_polarity = pdata->invert_polarity; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int tvc_probe_of(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + struct omap_dss_device *in; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int tvc_probe(struct platform_device *pdev) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + ddata->dev = &pdev->dev; + + if (dev_get_platdata(&pdev->dev)) { + r = tvc_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = tvc_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + ddata->timings = tvc_pal_timings; + + dssdev = &ddata->dssdev; + dssdev->driver = &tvc_driver; + dssdev->dev = &pdev->dev; + dssdev->type = OMAP_DISPLAY_TYPE_VENC; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = tvc_pal_timings; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; +err_reg: + omap_dss_put_device(ddata->in); + return r; +} + +static int __exit tvc_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_display(&ddata->dssdev); + + tvc_disable(dssdev); + tvc_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id tvc_of_match[] = { + { .compatible = "omapdss,svideo-connector", }, + { .compatible = "omapdss,composite-video-connector", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, tvc_of_match); + +static struct platform_driver tvc_connector_driver = { + .probe = tvc_probe, + .remove = __exit_p(tvc_remove), + .driver = { + .name = "connector-analog-tv", + .of_match_table = tvc_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(tvc_connector_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("Analog TV Connector driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c new file mode 100644 index 000000000000..d811e6dcaef7 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -0,0 +1,398 @@ +/* + * Generic DVI Connector driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <drm/drm_edid.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +static const struct omap_video_timings dvic_default_timings = { + .x_res = 640, + .y_res = 480, + + .pixelclock = 23500000, + + .hfp = 48, + .hsw = 32, + .hbp = 80, + + .vfp = 3, + .vsw = 4, + .vbp = 7, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, +}; + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct omap_video_timings timings; + + struct i2c_adapter *i2c_adapter; +}; + +#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) + +static int dvic_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dvi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void dvic_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dvi->disconnect(in, dssdev); +} + +static int dvic_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + in->ops.dvi->set_timings(in, &ddata->timings); + + r = in->ops.dvi->enable(in); + if (r) + return r; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void dvic_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + in->ops.dvi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void dvic_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->timings = *timings; + dssdev->panel.timings = *timings; + + in->ops.dvi->set_timings(in, timings); +} + +static void dvic_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->timings; +} + +static int dvic_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.dvi->check_timings(in, timings); +} + +static int dvic_ddc_read(struct i2c_adapter *adapter, + unsigned char *buf, u16 count, u8 offset) +{ + int r, retries; + + for (retries = 3; retries > 0; retries--) { + struct i2c_msg msgs[] = { + { + .addr = DDC_ADDR, + .flags = 0, + .len = 1, + .buf = &offset, + }, { + .addr = DDC_ADDR, + .flags = I2C_M_RD, + .len = count, + .buf = buf, + } + }; + + r = i2c_transfer(adapter, msgs, 2); + if (r == 2) + return 0; + + if (r != -EAGAIN) + break; + } + + return r < 0 ? r : -EIO; +} + +static int dvic_read_edid(struct omap_dss_device *dssdev, + u8 *edid, int len) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + int r, l, bytes_read; + + if (!ddata->i2c_adapter) + return -ENODEV; + + l = min(EDID_LENGTH, len); + r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0); + if (r) + return r; + + bytes_read = l; + + /* if there are extensions, read second block */ + if (len > EDID_LENGTH && edid[0x7e] > 0) { + l = min(EDID_LENGTH, len - EDID_LENGTH); + + r = dvic_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH, + l, EDID_LENGTH); + if (r) + return r; + + bytes_read += l; + } + + return bytes_read; +} + +static bool dvic_detect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + unsigned char out; + int r; + + if (!ddata->i2c_adapter) + return true; + + r = dvic_ddc_read(ddata->i2c_adapter, &out, 1, 0); + + return r == 0; +} + +static struct omap_dss_driver dvic_driver = { + .connect = dvic_connect, + .disconnect = dvic_disconnect, + + .enable = dvic_enable, + .disable = dvic_disable, + + .set_timings = dvic_set_timings, + .get_timings = dvic_get_timings, + .check_timings = dvic_check_timings, + + .get_resolution = omapdss_default_get_resolution, + + .read_edid = dvic_read_edid, + .detect = dvic_detect, +}; + +static int dvic_probe_pdata(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct connector_dvi_platform_data *pdata; + struct omap_dss_device *in, *dssdev; + int i2c_bus_num; + + pdata = dev_get_platdata(&pdev->dev); + i2c_bus_num = pdata->i2c_bus_num; + + if (i2c_bus_num != -1) { + struct i2c_adapter *adapter; + + adapter = i2c_get_adapter(i2c_bus_num); + if (!adapter) { + dev_err(&pdev->dev, + "Failed to get I2C adapter, bus %d\n", + i2c_bus_num); + return -EPROBE_DEFER; + } + + ddata->i2c_adapter = adapter; + } + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + i2c_put_adapter(ddata->i2c_adapter); + + dev_err(&pdev->dev, "Failed to find video source\n"); + return -EPROBE_DEFER; + } + + ddata->in = in; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int dvic_probe_of(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + struct omap_dss_device *in; + struct device_node *adapter_node; + struct i2c_adapter *adapter; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); + if (adapter_node) { + adapter = of_get_i2c_adapter_by_node(adapter_node); + if (adapter == NULL) { + dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); + omap_dss_put_device(ddata->in); + return -EPROBE_DEFER; + } + + ddata->i2c_adapter = adapter; + } + + return 0; +} + +static int dvic_probe(struct platform_device *pdev) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + + if (dev_get_platdata(&pdev->dev)) { + r = dvic_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = dvic_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + ddata->timings = dvic_default_timings; + + dssdev = &ddata->dssdev; + dssdev->driver = &dvic_driver; + dssdev->dev = &pdev->dev; + dssdev->type = OMAP_DISPLAY_TYPE_DVI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = dvic_default_timings; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: + omap_dss_put_device(ddata->in); + + i2c_put_adapter(ddata->i2c_adapter); + + return r; +} + +static int __exit dvic_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_display(&ddata->dssdev); + + dvic_disable(dssdev); + dvic_disconnect(dssdev); + + omap_dss_put_device(in); + + i2c_put_adapter(ddata->i2c_adapter); + + return 0; +} + +static const struct of_device_id dvic_of_match[] = { + { .compatible = "omapdss,dvi-connector", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, dvic_of_match); + +static struct platform_driver dvi_connector_driver = { + .probe = dvic_probe, + .remove = __exit_p(dvic_remove), + .driver = { + .name = "connector-dvi", + .of_match_table = dvic_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(dvi_connector_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("Generic DVI Connector driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c new file mode 100644 index 000000000000..6ee4129bc0c0 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -0,0 +1,348 @@ +/* + * HDMI Connector driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_gpio.h> + +#include <drm/drm_edid.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +static const struct omap_video_timings hdmic_default_timings = { + .x_res = 640, + .y_res = 480, + .pixelclock = 25175000, + .hsw = 96, + .hfp = 16, + .hbp = 48, + .vsw = 2, + .vfp = 11, + .vbp = 31, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + + .interlace = false, +}; + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct device *dev; + + struct omap_video_timings timings; + + int hpd_gpio; +}; + +#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) + +static int hdmic_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(ddata->dev, "connect\n"); + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.hdmi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void hdmic_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(ddata->dev, "disconnect\n"); + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.hdmi->disconnect(in, dssdev); +} + +static int hdmic_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(ddata->dev, "enable\n"); + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + in->ops.hdmi->set_timings(in, &ddata->timings); + + r = in->ops.hdmi->enable(in); + if (r) + return r; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return r; +} + +static void hdmic_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(ddata->dev, "disable\n"); + + if (!omapdss_device_is_enabled(dssdev)) + return; + + in->ops.hdmi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void hdmic_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->timings = *timings; + dssdev->panel.timings = *timings; + + in->ops.hdmi->set_timings(in, timings); +} + +static void hdmic_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->timings; +} + +static int hdmic_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->check_timings(in, timings); +} + +static int hdmic_read_edid(struct omap_dss_device *dssdev, + u8 *edid, int len) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->read_edid(in, edid, len); +} + +static bool hdmic_detect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (gpio_is_valid(ddata->hpd_gpio)) + return gpio_get_value_cansleep(ddata->hpd_gpio); + else + return in->ops.hdmi->detect(in); +} + +static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); +} + +static int hdmic_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_infoframe(in, avi); +} + +static struct omap_dss_driver hdmic_driver = { + .connect = hdmic_connect, + .disconnect = hdmic_disconnect, + + .enable = hdmic_enable, + .disable = hdmic_disable, + + .set_timings = hdmic_set_timings, + .get_timings = hdmic_get_timings, + .check_timings = hdmic_check_timings, + + .get_resolution = omapdss_default_get_resolution, + + .read_edid = hdmic_read_edid, + .detect = hdmic_detect, + .set_hdmi_mode = hdmic_set_hdmi_mode, + .set_hdmi_infoframe = hdmic_set_infoframe, +}; + +static int hdmic_probe_pdata(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct connector_hdmi_platform_data *pdata; + struct omap_dss_device *in, *dssdev; + + pdata = dev_get_platdata(&pdev->dev); + + ddata->hpd_gpio = -ENODEV; + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&pdev->dev, "Failed to find video source\n"); + return -EPROBE_DEFER; + } + + ddata->in = in; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int hdmic_probe_of(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + struct omap_dss_device *in; + int gpio; + + /* HPD GPIO */ + gpio = of_get_named_gpio(node, "hpd-gpios", 0); + if (gpio_is_valid(gpio)) + ddata->hpd_gpio = gpio; + else + ddata->hpd_gpio = -ENODEV; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int hdmic_probe(struct platform_device *pdev) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + ddata->dev = &pdev->dev; + + if (dev_get_platdata(&pdev->dev)) { + r = hdmic_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = hdmic_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + if (gpio_is_valid(ddata->hpd_gpio)) { + r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, + GPIOF_DIR_IN, "hdmi_hpd"); + if (r) + goto err_reg; + } + + ddata->timings = hdmic_default_timings; + + dssdev = &ddata->dssdev; + dssdev->driver = &hdmic_driver; + dssdev->dev = &pdev->dev; + dssdev->type = OMAP_DISPLAY_TYPE_HDMI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = hdmic_default_timings; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; +err_reg: + omap_dss_put_device(ddata->in); + return r; +} + +static int __exit hdmic_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_display(&ddata->dssdev); + + hdmic_disable(dssdev); + hdmic_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id hdmic_of_match[] = { + { .compatible = "omapdss,hdmi-connector", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, hdmic_of_match); + +static struct platform_driver hdmi_connector_driver = { + .probe = hdmic_probe, + .remove = __exit_p(hdmic_remove), + .driver = { + .name = "connector-hdmi", + .of_match_table = hdmic_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(hdmi_connector_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("HDMI Connector driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c new file mode 100644 index 000000000000..8c246c213e06 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -0,0 +1,278 @@ +/* + * OPA362 analog video amplifier with output/power control + * + * Copyright (C) 2014 Golden Delicious Computers + * Author: H. Nikolaus Schaller <hns@goldelico.com> + * + * based on encoder-tfp410 + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct gpio_desc *enable_gpio; + + struct omap_video_timings timings; +}; + +#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) + +static int opa362_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(dssdev->dev, "connect\n"); + + if (omapdss_device_is_connected(dssdev)) + return -EBUSY; + + r = in->ops.atv->connect(in, dssdev); + if (r) + return r; + + dst->src = dssdev; + dssdev->dst = dst; + + return 0; +} + +static void opa362_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(dssdev->dev, "disconnect\n"); + + WARN_ON(!omapdss_device_is_connected(dssdev)); + if (!omapdss_device_is_connected(dssdev)) + return; + + WARN_ON(dst != dssdev->dst); + if (dst != dssdev->dst) + return; + + dst->src = NULL; + dssdev->dst = NULL; + + in->ops.atv->disconnect(in, &ddata->dssdev); +} + +static int opa362_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(dssdev->dev, "enable\n"); + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + in->ops.atv->set_timings(in, &ddata->timings); + + r = in->ops.atv->enable(in); + if (r) + return r; + + if (ddata->enable_gpio) + gpiod_set_value_cansleep(ddata->enable_gpio, 1); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void opa362_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(dssdev->dev, "disable\n"); + + if (!omapdss_device_is_enabled(dssdev)) + return; + + if (ddata->enable_gpio) + gpiod_set_value_cansleep(ddata->enable_gpio, 0); + + in->ops.atv->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void opa362_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(dssdev->dev, "set_timings\n"); + + ddata->timings = *timings; + dssdev->panel.timings = *timings; + + in->ops.atv->set_timings(in, timings); +} + +static void opa362_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + dev_dbg(dssdev->dev, "get_timings\n"); + + *timings = ddata->timings; +} + +static int opa362_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(dssdev->dev, "check_timings\n"); + + return in->ops.atv->check_timings(in, timings); +} + +static void opa362_set_type(struct omap_dss_device *dssdev, + enum omap_dss_venc_type type) +{ + /* we can only drive a COMPOSITE output */ + WARN_ON(type != OMAP_DSS_VENC_TYPE_COMPOSITE); + +} + +static const struct omapdss_atv_ops opa362_atv_ops = { + .connect = opa362_connect, + .disconnect = opa362_disconnect, + + .enable = opa362_enable, + .disable = opa362_disable, + + .check_timings = opa362_check_timings, + .set_timings = opa362_set_timings, + .get_timings = opa362_get_timings, + + .set_type = opa362_set_type, +}; + +static int opa362_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev, *in; + struct gpio_desc *gpio; + int r; + + dev_dbg(&pdev->dev, "probe\n"); + + if (node == NULL) { + dev_err(&pdev->dev, "Unable to find device tree\n"); + return -EINVAL; + } + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + + gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + ddata->enable_gpio = gpio; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + dssdev = &ddata->dssdev; + dssdev->ops.atv = &opa362_atv_ops; + dssdev->dev = &pdev->dev; + dssdev->type = OMAP_DISPLAY_TYPE_VENC; + dssdev->output_type = OMAP_DISPLAY_TYPE_VENC; + dssdev->owner = THIS_MODULE; + + r = omapdss_register_output(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register output\n"); + goto err_reg; + } + + return 0; +err_reg: + omap_dss_put_device(ddata->in); + return r; +} + +static int __exit opa362_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_output(&ddata->dssdev); + + WARN_ON(omapdss_device_is_enabled(dssdev)); + if (omapdss_device_is_enabled(dssdev)) + opa362_disable(dssdev); + + WARN_ON(omapdss_device_is_connected(dssdev)); + if (omapdss_device_is_connected(dssdev)) + opa362_disconnect(dssdev, dssdev->dst); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id opa362_of_match[] = { + { .compatible = "omapdss,ti,opa362", }, + {}, +}; +MODULE_DEVICE_TABLE(of, opa362_of_match); + +static struct platform_driver opa362_driver = { + .probe = opa362_probe, + .remove = __exit_p(opa362_remove), + .driver = { + .name = "amplifier-opa362", + .of_match_table = opa362_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(opa362_driver); + +MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>"); +MODULE_DESCRIPTION("OPA362 analog video amplifier with output/power control"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c new file mode 100644 index 000000000000..d9048b3df495 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -0,0 +1,320 @@ +/* + * TFP410 DPI-to-DVI encoder driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + int pd_gpio; + int data_lines; + + struct omap_video_timings timings; +}; + +#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) + +static int tfp410_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return -EBUSY; + + r = in->ops.dpi->connect(in, dssdev); + if (r) + return r; + + dst->src = dssdev; + dssdev->dst = dst; + + return 0; +} + +static void tfp410_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + WARN_ON(!omapdss_device_is_connected(dssdev)); + if (!omapdss_device_is_connected(dssdev)) + return; + + WARN_ON(dst != dssdev->dst); + if (dst != dssdev->dst) + return; + + dst->src = NULL; + dssdev->dst = NULL; + + in->ops.dpi->disconnect(in, &ddata->dssdev); +} + +static int tfp410_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + in->ops.dpi->set_timings(in, &ddata->timings); + if (ddata->data_lines) + in->ops.dpi->set_data_lines(in, ddata->data_lines); + + r = in->ops.dpi->enable(in); + if (r) + return r; + + if (gpio_is_valid(ddata->pd_gpio)) + gpio_set_value_cansleep(ddata->pd_gpio, 1); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void tfp410_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + if (gpio_is_valid(ddata->pd_gpio)) + gpio_set_value_cansleep(ddata->pd_gpio, 0); + + in->ops.dpi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void tfp410_fix_timings(struct omap_video_timings *timings) +{ + timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH; +} + +static void tfp410_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + tfp410_fix_timings(timings); + + ddata->timings = *timings; + dssdev->panel.timings = *timings; + + in->ops.dpi->set_timings(in, timings); +} + +static void tfp410_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->timings; +} + +static int tfp410_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + tfp410_fix_timings(timings); + + return in->ops.dpi->check_timings(in, timings); +} + +static const struct omapdss_dvi_ops tfp410_dvi_ops = { + .connect = tfp410_connect, + .disconnect = tfp410_disconnect, + + .enable = tfp410_enable, + .disable = tfp410_disable, + + .check_timings = tfp410_check_timings, + .set_timings = tfp410_set_timings, + .get_timings = tfp410_get_timings, +}; + +static int tfp410_probe_pdata(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct encoder_tfp410_platform_data *pdata; + struct omap_dss_device *dssdev, *in; + + pdata = dev_get_platdata(&pdev->dev); + + ddata->pd_gpio = pdata->power_down_gpio; + + ddata->data_lines = pdata->data_lines; + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&pdev->dev, "Failed to find video source\n"); + return -ENODEV; + } + + ddata->in = in; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int tfp410_probe_of(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + struct omap_dss_device *in; + int gpio; + + gpio = of_get_named_gpio(node, "powerdown-gpios", 0); + + if (gpio_is_valid(gpio) || gpio == -ENOENT) { + ddata->pd_gpio = gpio; + } else { + dev_err(&pdev->dev, "failed to parse PD gpio\n"); + return gpio; + } + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int tfp410_probe(struct platform_device *pdev) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + + if (dev_get_platdata(&pdev->dev)) { + r = tfp410_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = tfp410_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + if (gpio_is_valid(ddata->pd_gpio)) { + r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, + GPIOF_OUT_INIT_LOW, "tfp410 PD"); + if (r) { + dev_err(&pdev->dev, "Failed to request PD GPIO %d\n", + ddata->pd_gpio); + goto err_gpio; + } + } + + dssdev = &ddata->dssdev; + dssdev->ops.dvi = &tfp410_dvi_ops; + dssdev->dev = &pdev->dev; + dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; + dssdev->owner = THIS_MODULE; + dssdev->phy.dpi.data_lines = ddata->data_lines; + dssdev->port_num = 1; + + r = omapdss_register_output(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register output\n"); + goto err_reg; + } + + return 0; +err_reg: +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int __exit tfp410_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_output(&ddata->dssdev); + + WARN_ON(omapdss_device_is_enabled(dssdev)); + if (omapdss_device_is_enabled(dssdev)) + tfp410_disable(dssdev); + + WARN_ON(omapdss_device_is_connected(dssdev)); + if (omapdss_device_is_connected(dssdev)) + tfp410_disconnect(dssdev, dssdev->dst); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id tfp410_of_match[] = { + { .compatible = "omapdss,ti,tfp410", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, tfp410_of_match); + +static struct platform_driver tfp410_driver = { + .probe = tfp410_probe, + .remove = __exit_p(tfp410_remove), + .driver = { + .name = "tfp410", + .of_match_table = tfp410_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(tfp410_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("TFP410 DPI to DVI encoder driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c new file mode 100644 index 000000000000..990af6baeb0f --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -0,0 +1,379 @@ +/* + * TPD12S015 HDMI ESD protection & level shifter chip driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/gpio.h> +#include <linux/platform_device.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + int ct_cp_hpd_gpio; + int ls_oe_gpio; + int hpd_gpio; + + struct omap_video_timings timings; +}; + +#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) + +static int tpd_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + r = in->ops.hdmi->connect(in, dssdev); + if (r) + return r; + + dst->src = dssdev; + dssdev->dst = dst; + + gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1); + /* DC-DC converter needs at max 300us to get to 90% of 5V */ + udelay(300); + + return 0; +} + +static void tpd_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + WARN_ON(dst != dssdev->dst); + + if (dst != dssdev->dst) + return; + + gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0); + + dst->src = NULL; + dssdev->dst = NULL; + + in->ops.hdmi->disconnect(in, &ddata->dssdev); +} + +static int tpd_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) + return 0; + + in->ops.hdmi->set_timings(in, &ddata->timings); + + r = in->ops.hdmi->enable(in); + if (r) + return r; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return r; +} + +static void tpd_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) + return; + + in->ops.hdmi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void tpd_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->timings = *timings; + dssdev->panel.timings = *timings; + + in->ops.hdmi->set_timings(in, timings); +} + +static void tpd_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->timings; +} + +static int tpd_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + r = in->ops.hdmi->check_timings(in, timings); + + return r; +} + +static int tpd_read_edid(struct omap_dss_device *dssdev, + u8 *edid, int len) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!gpio_get_value_cansleep(ddata->hpd_gpio)) + return -ENODEV; + + if (gpio_is_valid(ddata->ls_oe_gpio)) + gpio_set_value_cansleep(ddata->ls_oe_gpio, 1); + + r = in->ops.hdmi->read_edid(in, edid, len); + + if (gpio_is_valid(ddata->ls_oe_gpio)) + gpio_set_value_cansleep(ddata->ls_oe_gpio, 0); + + return r; +} + +static bool tpd_detect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + return gpio_get_value_cansleep(ddata->hpd_gpio); +} + +static int tpd_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_infoframe(in, avi); +} + +static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev, + bool hdmi_mode) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); +} + +static const struct omapdss_hdmi_ops tpd_hdmi_ops = { + .connect = tpd_connect, + .disconnect = tpd_disconnect, + + .enable = tpd_enable, + .disable = tpd_disable, + + .check_timings = tpd_check_timings, + .set_timings = tpd_set_timings, + .get_timings = tpd_get_timings, + + .read_edid = tpd_read_edid, + .detect = tpd_detect, + .set_infoframe = tpd_set_infoframe, + .set_hdmi_mode = tpd_set_hdmi_mode, +}; + +static int tpd_probe_pdata(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct encoder_tpd12s015_platform_data *pdata; + struct omap_dss_device *dssdev, *in; + + pdata = dev_get_platdata(&pdev->dev); + + ddata->ct_cp_hpd_gpio = pdata->ct_cp_hpd_gpio; + ddata->ls_oe_gpio = pdata->ls_oe_gpio; + ddata->hpd_gpio = pdata->hpd_gpio; + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&pdev->dev, "Failed to find video source\n"); + return -ENODEV; + } + + ddata->in = in; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int tpd_probe_of(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + struct omap_dss_device *in; + int gpio; + + /* CT CP HPD GPIO */ + gpio = of_get_gpio(node, 0); + if (!gpio_is_valid(gpio)) { + dev_err(&pdev->dev, "failed to parse CT CP HPD gpio\n"); + return gpio; + } + ddata->ct_cp_hpd_gpio = gpio; + + /* LS OE GPIO */ + gpio = of_get_gpio(node, 1); + if (gpio_is_valid(gpio) || gpio == -ENOENT) { + ddata->ls_oe_gpio = gpio; + } else { + dev_err(&pdev->dev, "failed to parse LS OE gpio\n"); + return gpio; + } + + /* HPD GPIO */ + gpio = of_get_gpio(node, 2); + if (!gpio_is_valid(gpio)) { + dev_err(&pdev->dev, "failed to parse HPD gpio\n"); + return gpio; + } + ddata->hpd_gpio = gpio; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int tpd_probe(struct platform_device *pdev) +{ + struct omap_dss_device *in, *dssdev; + struct panel_drv_data *ddata; + int r; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + + if (dev_get_platdata(&pdev->dev)) { + r = tpd_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = tpd_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + r = devm_gpio_request_one(&pdev->dev, ddata->ct_cp_hpd_gpio, + GPIOF_OUT_INIT_LOW, "hdmi_ct_cp_hpd"); + if (r) + goto err_gpio; + + if (gpio_is_valid(ddata->ls_oe_gpio)) { + r = devm_gpio_request_one(&pdev->dev, ddata->ls_oe_gpio, + GPIOF_OUT_INIT_LOW, "hdmi_ls_oe"); + if (r) + goto err_gpio; + } + + r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, + GPIOF_DIR_IN, "hdmi_hpd"); + if (r) + goto err_gpio; + + dssdev = &ddata->dssdev; + dssdev->ops.hdmi = &tpd_hdmi_ops; + dssdev->dev = &pdev->dev; + dssdev->type = OMAP_DISPLAY_TYPE_HDMI; + dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; + dssdev->owner = THIS_MODULE; + dssdev->port_num = 1; + + in = ddata->in; + + r = omapdss_register_output(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register output\n"); + goto err_reg; + } + + return 0; +err_reg: +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int __exit tpd_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_output(&ddata->dssdev); + + WARN_ON(omapdss_device_is_enabled(dssdev)); + if (omapdss_device_is_enabled(dssdev)) + tpd_disable(dssdev); + + WARN_ON(omapdss_device_is_connected(dssdev)); + if (omapdss_device_is_connected(dssdev)) + tpd_disconnect(dssdev, dssdev->dst); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id tpd_of_match[] = { + { .compatible = "omapdss,ti,tpd12s015", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, tpd_of_match); + +static struct platform_driver tpd_driver = { + .probe = tpd_probe, + .remove = __exit_p(tpd_remove), + .driver = { + .name = "tpd12s015", + .of_match_table = tpd_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(tpd_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("TPD12S015 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c new file mode 100644 index 000000000000..e780fd4f8b46 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -0,0 +1,328 @@ +/* + * Generic MIPI DPI Panel Driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> +#include <video/of_display_timing.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + int data_lines; + + struct omap_video_timings videomode; + + /* used for non-DT boot, to be removed */ + int backlight_gpio; + + struct gpio_desc *enable_gpio; +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static int panel_dpi_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dpi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void panel_dpi_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dpi->disconnect(in, dssdev); +} + +static int panel_dpi_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + if (ddata->data_lines) + in->ops.dpi->set_data_lines(in, ddata->data_lines); + in->ops.dpi->set_timings(in, &ddata->videomode); + + r = in->ops.dpi->enable(in); + if (r) + return r; + + gpiod_set_value_cansleep(ddata->enable_gpio, 1); + + if (gpio_is_valid(ddata->backlight_gpio)) + gpio_set_value_cansleep(ddata->backlight_gpio, 1); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void panel_dpi_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + if (gpio_is_valid(ddata->backlight_gpio)) + gpio_set_value_cansleep(ddata->backlight_gpio, 0); + + gpiod_set_value_cansleep(ddata->enable_gpio, 0); + + in->ops.dpi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void panel_dpi_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->videomode = *timings; + dssdev->panel.timings = *timings; + + in->ops.dpi->set_timings(in, timings); +} + +static void panel_dpi_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->videomode; +} + +static int panel_dpi_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.dpi->check_timings(in, timings); +} + +static struct omap_dss_driver panel_dpi_ops = { + .connect = panel_dpi_connect, + .disconnect = panel_dpi_disconnect, + + .enable = panel_dpi_enable, + .disable = panel_dpi_disable, + + .set_timings = panel_dpi_set_timings, + .get_timings = panel_dpi_get_timings, + .check_timings = panel_dpi_check_timings, + + .get_resolution = omapdss_default_get_resolution, +}; + +static int panel_dpi_probe_pdata(struct platform_device *pdev) +{ + const struct panel_dpi_platform_data *pdata; + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev, *in; + struct videomode vm; + int r; + + pdata = dev_get_platdata(&pdev->dev); + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&pdev->dev, "failed to find video source '%s'\n", + pdata->source); + return -EPROBE_DEFER; + } + + ddata->in = in; + + ddata->data_lines = pdata->data_lines; + + videomode_from_timing(pdata->display_timing, &vm); + videomode_to_omap_video_timings(&vm, &ddata->videomode); + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + r = devm_gpio_request_one(&pdev->dev, pdata->enable_gpio, + GPIOF_OUT_INIT_LOW, "panel enable"); + if (r) + goto err_gpio; + + ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio); + + ddata->backlight_gpio = pdata->backlight_gpio; + + return 0; + +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int panel_dpi_probe_of(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + struct omap_dss_device *in; + int r; + struct display_timing timing; + struct videomode vm; + struct gpio_desc *gpio; + + gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + ddata->enable_gpio = gpio; + + ddata->backlight_gpio = -ENOENT; + + r = of_get_display_timing(node, "panel-timing", &timing); + if (r) { + dev_err(&pdev->dev, "failed to get video timing\n"); + return r; + } + + videomode_from_timing(&timing, &vm); + videomode_to_omap_video_timings(&vm, &ddata->videomode); + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int panel_dpi_probe(struct platform_device *pdev) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (ddata == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + + if (dev_get_platdata(&pdev->dev)) { + r = panel_dpi_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = panel_dpi_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + if (gpio_is_valid(ddata->backlight_gpio)) { + r = devm_gpio_request_one(&pdev->dev, ddata->backlight_gpio, + GPIOF_OUT_INIT_LOW, "panel backlight"); + if (r) + goto err_gpio; + } + + dssdev = &ddata->dssdev; + dssdev->dev = &pdev->dev; + dssdev->driver = &panel_dpi_ops; + dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = ddata->videomode; + dssdev->phy.dpi.data_lines = ddata->data_lines; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int __exit panel_dpi_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_display(dssdev); + + panel_dpi_disable(dssdev); + panel_dpi_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id panel_dpi_of_match[] = { + { .compatible = "omapdss,panel-dpi", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, panel_dpi_of_match); + +static struct platform_driver panel_dpi_driver = { + .probe = panel_dpi_probe, + .remove = __exit_p(panel_dpi_remove), + .driver = { + .name = "panel-dpi", + .of_match_table = panel_dpi_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(panel_dpi_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("Generic MIPI DPI Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c new file mode 100644 index 000000000000..3414c2609320 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -0,0 +1,1388 @@ +/* + * Generic DSI Command Mode panel driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +/* #define DEBUG */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/fb.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> +#include <video/mipi_display.h> + +/* DSI Virtual channel. Hardcoded for now. */ +#define TCH 0 + +#define DCS_READ_NUM_ERRORS 0x05 +#define DCS_BRIGHTNESS 0x51 +#define DCS_CTRL_DISPLAY 0x53 +#define DCS_GET_ID1 0xda +#define DCS_GET_ID2 0xdb +#define DCS_GET_ID3 0xdc + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct omap_video_timings timings; + + struct platform_device *pdev; + + struct mutex lock; + + struct backlight_device *bldev; + + unsigned long hw_guard_end; /* next value of jiffies when we can + * issue the next sleep in/out command + */ + unsigned long hw_guard_wait; /* max guard time in jiffies */ + + /* panel HW configuration from DT or platform data */ + int reset_gpio; + int ext_te_gpio; + + bool use_dsi_backlight; + + struct omap_dsi_pin_config pin_config; + + /* runtime variables */ + bool enabled; + + bool te_enabled; + + atomic_t do_update; + int channel; + + struct delayed_work te_timeout_work; + + bool intro_printed; + + struct workqueue_struct *workqueue; + + bool ulps_enabled; + unsigned ulps_timeout; + struct delayed_work ulps_work; +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static irqreturn_t dsicm_te_isr(int irq, void *data); +static void dsicm_te_timeout_work_callback(struct work_struct *work); +static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable); + +static int dsicm_panel_reset(struct panel_drv_data *ddata); + +static void dsicm_ulps_work(struct work_struct *work); + +static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec) +{ + ddata->hw_guard_wait = msecs_to_jiffies(guard_msec); + ddata->hw_guard_end = jiffies + ddata->hw_guard_wait; +} + +static void hw_guard_wait(struct panel_drv_data *ddata) +{ + unsigned long wait = ddata->hw_guard_end - jiffies; + + if ((long)wait > 0 && wait <= ddata->hw_guard_wait) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(wait); + } +} + +static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) +{ + struct omap_dss_device *in = ddata->in; + int r; + u8 buf[1]; + + r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, buf, 1); + + if (r < 0) + return r; + + *data = buf[0]; + + return 0; +} + +static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) +{ + struct omap_dss_device *in = ddata->in; + return in->ops.dsi->dcs_write(in, ddata->channel, &dcs_cmd, 1); +} + +static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) +{ + struct omap_dss_device *in = ddata->in; + u8 buf[2] = { dcs_cmd, param }; + + return in->ops.dsi->dcs_write(in, ddata->channel, buf, 2); +} + +static int dsicm_sleep_in(struct panel_drv_data *ddata) + +{ + struct omap_dss_device *in = ddata->in; + u8 cmd; + int r; + + hw_guard_wait(ddata); + + cmd = MIPI_DCS_ENTER_SLEEP_MODE; + r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, &cmd, 1); + if (r) + return r; + + hw_guard_start(ddata, 120); + + usleep_range(5000, 10000); + + return 0; +} + +static int dsicm_sleep_out(struct panel_drv_data *ddata) +{ + int r; + + hw_guard_wait(ddata); + + r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE); + if (r) + return r; + + hw_guard_start(ddata, 120); + + usleep_range(5000, 10000); + + return 0; +} + +static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) +{ + int r; + + r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1); + if (r) + return r; + r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2); + if (r) + return r; + r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3); + if (r) + return r; + + return 0; +} + +static int dsicm_set_update_window(struct panel_drv_data *ddata, + u16 x, u16 y, u16 w, u16 h) +{ + struct omap_dss_device *in = ddata->in; + int r; + u16 x1 = x; + u16 x2 = x + w - 1; + u16 y1 = y; + u16 y2 = y + h - 1; + + u8 buf[5]; + buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS; + buf[1] = (x1 >> 8) & 0xff; + buf[2] = (x1 >> 0) & 0xff; + buf[3] = (x2 >> 8) & 0xff; + buf[4] = (x2 >> 0) & 0xff; + + r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + if (r) + return r; + + buf[0] = MIPI_DCS_SET_PAGE_ADDRESS; + buf[1] = (y1 >> 8) & 0xff; + buf[2] = (y1 >> 0) & 0xff; + buf[3] = (y2 >> 8) & 0xff; + buf[4] = (y2 >> 0) & 0xff; + + r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + if (r) + return r; + + in->ops.dsi->bta_sync(in, ddata->channel); + + return r; +} + +static void dsicm_queue_ulps_work(struct panel_drv_data *ddata) +{ + if (ddata->ulps_timeout > 0) + queue_delayed_work(ddata->workqueue, &ddata->ulps_work, + msecs_to_jiffies(ddata->ulps_timeout)); +} + +static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata) +{ + cancel_delayed_work(&ddata->ulps_work); +} + +static int dsicm_enter_ulps(struct panel_drv_data *ddata) +{ + struct omap_dss_device *in = ddata->in; + int r; + + if (ddata->ulps_enabled) + return 0; + + dsicm_cancel_ulps_work(ddata); + + r = _dsicm_enable_te(ddata, false); + if (r) + goto err; + + if (gpio_is_valid(ddata->ext_te_gpio)) + disable_irq(gpio_to_irq(ddata->ext_te_gpio)); + + in->ops.dsi->disable(in, false, true); + + ddata->ulps_enabled = true; + + return 0; + +err: + dev_err(&ddata->pdev->dev, "enter ULPS failed"); + dsicm_panel_reset(ddata); + + ddata->ulps_enabled = false; + + dsicm_queue_ulps_work(ddata); + + return r; +} + +static int dsicm_exit_ulps(struct panel_drv_data *ddata) +{ + struct omap_dss_device *in = ddata->in; + int r; + + if (!ddata->ulps_enabled) + return 0; + + r = in->ops.dsi->enable(in); + if (r) { + dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); + goto err1; + } + + in->ops.dsi->enable_hs(in, ddata->channel, true); + + r = _dsicm_enable_te(ddata, true); + if (r) { + dev_err(&ddata->pdev->dev, "failed to re-enable TE"); + goto err2; + } + + if (gpio_is_valid(ddata->ext_te_gpio)) + enable_irq(gpio_to_irq(ddata->ext_te_gpio)); + + dsicm_queue_ulps_work(ddata); + + ddata->ulps_enabled = false; + + return 0; + +err2: + dev_err(&ddata->pdev->dev, "failed to exit ULPS"); + + r = dsicm_panel_reset(ddata); + if (!r) { + if (gpio_is_valid(ddata->ext_te_gpio)) + enable_irq(gpio_to_irq(ddata->ext_te_gpio)); + ddata->ulps_enabled = false; + } +err1: + dsicm_queue_ulps_work(ddata); + + return r; +} + +static int dsicm_wake_up(struct panel_drv_data *ddata) +{ + if (ddata->ulps_enabled) + return dsicm_exit_ulps(ddata); + + dsicm_cancel_ulps_work(ddata); + dsicm_queue_ulps_work(ddata); + return 0; +} + +static int dsicm_bl_update_status(struct backlight_device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); + struct omap_dss_device *in = ddata->in; + int r; + int level; + + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) + level = dev->props.brightness; + else + level = 0; + + dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level); + + mutex_lock(&ddata->lock); + + if (ddata->enabled) { + in->ops.dsi->bus_lock(in); + + r = dsicm_wake_up(ddata); + if (!r) + r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level); + + in->ops.dsi->bus_unlock(in); + } else { + r = 0; + } + + mutex_unlock(&ddata->lock); + + return r; +} + +static int dsicm_bl_get_intensity(struct backlight_device *dev) +{ + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) + return dev->props.brightness; + + return 0; +} + +static const struct backlight_ops dsicm_bl_ops = { + .get_brightness = dsicm_bl_get_intensity, + .update_status = dsicm_bl_update_status, +}; + +static void dsicm_get_resolution(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres) +{ + *xres = dssdev->panel.timings.x_res; + *yres = dssdev->panel.timings.y_res; +} + +static ssize_t dsicm_num_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *in = ddata->in; + u8 errors = 0; + int r; + + mutex_lock(&ddata->lock); + + if (ddata->enabled) { + in->ops.dsi->bus_lock(in); + + r = dsicm_wake_up(ddata); + if (!r) + r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS, + &errors); + + in->ops.dsi->bus_unlock(in); + } else { + r = -ENODEV; + } + + mutex_unlock(&ddata->lock); + + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%d\n", errors); +} + +static ssize_t dsicm_hw_revision_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *in = ddata->in; + u8 id1, id2, id3; + int r; + + mutex_lock(&ddata->lock); + + if (ddata->enabled) { + in->ops.dsi->bus_lock(in); + + r = dsicm_wake_up(ddata); + if (!r) + r = dsicm_get_id(ddata, &id1, &id2, &id3); + + in->ops.dsi->bus_unlock(in); + } else { + r = -ENODEV; + } + + mutex_unlock(&ddata->lock); + + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); +} + +static ssize_t dsicm_store_ulps(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *in = ddata->in; + unsigned long t; + int r; + + r = kstrtoul(buf, 0, &t); + if (r) + return r; + + mutex_lock(&ddata->lock); + + if (ddata->enabled) { + in->ops.dsi->bus_lock(in); + + if (t) + r = dsicm_enter_ulps(ddata); + else + r = dsicm_wake_up(ddata); + + in->ops.dsi->bus_unlock(in); + } + + mutex_unlock(&ddata->lock); + + if (r) + return r; + + return count; +} + +static ssize_t dsicm_show_ulps(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + unsigned t; + + mutex_lock(&ddata->lock); + t = ddata->ulps_enabled; + mutex_unlock(&ddata->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static ssize_t dsicm_store_ulps_timeout(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *in = ddata->in; + unsigned long t; + int r; + + r = kstrtoul(buf, 0, &t); + if (r) + return r; + + mutex_lock(&ddata->lock); + ddata->ulps_timeout = t; + + if (ddata->enabled) { + /* dsicm_wake_up will restart the timer */ + in->ops.dsi->bus_lock(in); + r = dsicm_wake_up(ddata); + in->ops.dsi->bus_unlock(in); + } + + mutex_unlock(&ddata->lock); + + if (r) + return r; + + return count; +} + +static ssize_t dsicm_show_ulps_timeout(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + unsigned t; + + mutex_lock(&ddata->lock); + t = ddata->ulps_timeout; + mutex_unlock(&ddata->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL); +static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL); +static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, + dsicm_show_ulps, dsicm_store_ulps); +static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, + dsicm_show_ulps_timeout, dsicm_store_ulps_timeout); + +static struct attribute *dsicm_attrs[] = { + &dev_attr_num_dsi_errors.attr, + &dev_attr_hw_revision.attr, + &dev_attr_ulps.attr, + &dev_attr_ulps_timeout.attr, + NULL, +}; + +static struct attribute_group dsicm_attr_group = { + .attrs = dsicm_attrs, +}; + +static void dsicm_hw_reset(struct panel_drv_data *ddata) +{ + if (!gpio_is_valid(ddata->reset_gpio)) + return; + + gpio_set_value(ddata->reset_gpio, 1); + udelay(10); + /* reset the panel */ + gpio_set_value(ddata->reset_gpio, 0); + /* assert reset */ + udelay(10); + gpio_set_value(ddata->reset_gpio, 1); + /* wait after releasing reset */ + usleep_range(5000, 10000); +} + +static int dsicm_power_on(struct panel_drv_data *ddata) +{ + struct omap_dss_device *in = ddata->in; + u8 id1, id2, id3; + int r; + struct omap_dss_dsi_config dsi_config = { + .mode = OMAP_DSS_DSI_CMD_MODE, + .pixel_format = OMAP_DSS_DSI_FMT_RGB888, + .timings = &ddata->timings, + .hs_clk_min = 150000000, + .hs_clk_max = 300000000, + .lp_clk_min = 7000000, + .lp_clk_max = 10000000, + }; + + if (ddata->pin_config.num_pins > 0) { + r = in->ops.dsi->configure_pins(in, &ddata->pin_config); + if (r) { + dev_err(&ddata->pdev->dev, + "failed to configure DSI pins\n"); + goto err0; + } + } + + r = in->ops.dsi->set_config(in, &dsi_config); + if (r) { + dev_err(&ddata->pdev->dev, "failed to configure DSI\n"); + goto err0; + } + + r = in->ops.dsi->enable(in); + if (r) { + dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); + goto err0; + } + + dsicm_hw_reset(ddata); + + in->ops.dsi->enable_hs(in, ddata->channel, false); + + r = dsicm_sleep_out(ddata); + if (r) + goto err; + + r = dsicm_get_id(ddata, &id1, &id2, &id3); + if (r) + goto err; + + r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff); + if (r) + goto err; + + r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY, + (1<<2) | (1<<5)); /* BL | BCTRL */ + if (r) + goto err; + + r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT, + MIPI_DCS_PIXEL_FMT_24BIT); + if (r) + goto err; + + r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON); + if (r) + goto err; + + r = _dsicm_enable_te(ddata, ddata->te_enabled); + if (r) + goto err; + + r = in->ops.dsi->enable_video_output(in, ddata->channel); + if (r) + goto err; + + ddata->enabled = 1; + + if (!ddata->intro_printed) { + dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n", + id1, id2, id3); + ddata->intro_printed = true; + } + + in->ops.dsi->enable_hs(in, ddata->channel, true); + + return 0; +err: + dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n"); + + dsicm_hw_reset(ddata); + + in->ops.dsi->disable(in, true, false); +err0: + return r; +} + +static void dsicm_power_off(struct panel_drv_data *ddata) +{ + struct omap_dss_device *in = ddata->in; + int r; + + in->ops.dsi->disable_video_output(in, ddata->channel); + + r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF); + if (!r) + r = dsicm_sleep_in(ddata); + + if (r) { + dev_err(&ddata->pdev->dev, + "error disabling panel, issuing HW reset\n"); + dsicm_hw_reset(ddata); + } + + in->ops.dsi->disable(in, true, false); + + ddata->enabled = 0; +} + +static int dsicm_panel_reset(struct panel_drv_data *ddata) +{ + dev_err(&ddata->pdev->dev, "performing LCD reset\n"); + + dsicm_power_off(ddata); + dsicm_hw_reset(ddata); + return dsicm_power_on(ddata); +} + +static int dsicm_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + struct device *dev = &ddata->pdev->dev; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dsi->connect(in, dssdev); + if (r) { + dev_err(dev, "Failed to connect to video source\n"); + return r; + } + + r = in->ops.dsi->request_vc(ddata->in, &ddata->channel); + if (r) { + dev_err(dev, "failed to get virtual channel\n"); + goto err_req_vc; + } + + r = in->ops.dsi->set_vc_id(ddata->in, ddata->channel, TCH); + if (r) { + dev_err(dev, "failed to set VC_ID\n"); + goto err_vc_id; + } + + return 0; + +err_vc_id: + in->ops.dsi->release_vc(ddata->in, ddata->channel); +err_req_vc: + in->ops.dsi->disconnect(in, dssdev); + return r; +} + +static void dsicm_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dsi->release_vc(in, ddata->channel); + in->ops.dsi->disconnect(in, dssdev); +} + +static int dsicm_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(&ddata->pdev->dev, "enable\n"); + + mutex_lock(&ddata->lock); + + if (!omapdss_device_is_connected(dssdev)) { + r = -ENODEV; + goto err; + } + + if (omapdss_device_is_enabled(dssdev)) { + r = 0; + goto err; + } + + in->ops.dsi->bus_lock(in); + + r = dsicm_power_on(ddata); + + in->ops.dsi->bus_unlock(in); + + if (r) + goto err; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + mutex_unlock(&ddata->lock); + + return 0; +err: + dev_dbg(&ddata->pdev->dev, "enable failed\n"); + mutex_unlock(&ddata->lock); + return r; +} + +static void dsicm_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(&ddata->pdev->dev, "disable\n"); + + mutex_lock(&ddata->lock); + + dsicm_cancel_ulps_work(ddata); + + in->ops.dsi->bus_lock(in); + + if (omapdss_device_is_enabled(dssdev)) { + r = dsicm_wake_up(ddata); + if (!r) + dsicm_power_off(ddata); + } + + in->ops.dsi->bus_unlock(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; + + mutex_unlock(&ddata->lock); +} + +static void dsicm_framedone_cb(int err, void *data) +{ + struct panel_drv_data *ddata = data; + struct omap_dss_device *in = ddata->in; + + dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); + in->ops.dsi->bus_unlock(ddata->in); +} + +static irqreturn_t dsicm_te_isr(int irq, void *data) +{ + struct panel_drv_data *ddata = data; + struct omap_dss_device *in = ddata->in; + int old; + int r; + + old = atomic_cmpxchg(&ddata->do_update, 1, 0); + + if (old) { + cancel_delayed_work(&ddata->te_timeout_work); + + r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb, + ddata); + if (r) + goto err; + } + + return IRQ_HANDLED; +err: + dev_err(&ddata->pdev->dev, "start update failed\n"); + in->ops.dsi->bus_unlock(in); + return IRQ_HANDLED; +} + +static void dsicm_te_timeout_work_callback(struct work_struct *work) +{ + struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, + te_timeout_work.work); + struct omap_dss_device *in = ddata->in; + + dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); + + atomic_set(&ddata->do_update, 0); + in->ops.dsi->bus_unlock(in); +} + +static int dsicm_update(struct omap_dss_device *dssdev, + u16 x, u16 y, u16 w, u16 h) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); + + mutex_lock(&ddata->lock); + in->ops.dsi->bus_lock(in); + + r = dsicm_wake_up(ddata); + if (r) + goto err; + + if (!ddata->enabled) { + r = 0; + goto err; + } + + /* XXX no need to send this every frame, but dsi break if not done */ + r = dsicm_set_update_window(ddata, 0, 0, + dssdev->panel.timings.x_res, + dssdev->panel.timings.y_res); + if (r) + goto err; + + if (ddata->te_enabled && gpio_is_valid(ddata->ext_te_gpio)) { + schedule_delayed_work(&ddata->te_timeout_work, + msecs_to_jiffies(250)); + atomic_set(&ddata->do_update, 1); + } else { + r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb, + ddata); + if (r) + goto err; + } + + /* note: no bus_unlock here. unlock is in framedone_cb */ + mutex_unlock(&ddata->lock); + return 0; +err: + in->ops.dsi->bus_unlock(in); + mutex_unlock(&ddata->lock); + return r; +} + +static int dsicm_sync(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(&ddata->pdev->dev, "sync\n"); + + mutex_lock(&ddata->lock); + in->ops.dsi->bus_lock(in); + in->ops.dsi->bus_unlock(in); + mutex_unlock(&ddata->lock); + + dev_dbg(&ddata->pdev->dev, "sync done\n"); + + return 0; +} + +static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) +{ + struct omap_dss_device *in = ddata->in; + int r; + + if (enable) + r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0); + else + r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF); + + if (!gpio_is_valid(ddata->ext_te_gpio)) + in->ops.dsi->enable_te(in, enable); + + /* possible panel bug */ + msleep(100); + + return r; +} + +static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + mutex_lock(&ddata->lock); + + if (ddata->te_enabled == enable) + goto end; + + in->ops.dsi->bus_lock(in); + + if (ddata->enabled) { + r = dsicm_wake_up(ddata); + if (r) + goto err; + + r = _dsicm_enable_te(ddata, enable); + if (r) + goto err; + } + + ddata->te_enabled = enable; + + in->ops.dsi->bus_unlock(in); +end: + mutex_unlock(&ddata->lock); + + return 0; +err: + in->ops.dsi->bus_unlock(in); + mutex_unlock(&ddata->lock); + + return r; +} + +static int dsicm_get_te(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + int r; + + mutex_lock(&ddata->lock); + r = ddata->te_enabled; + mutex_unlock(&ddata->lock); + + return r; +} + +static int dsicm_memory_read(struct omap_dss_device *dssdev, + void *buf, size_t size, + u16 x, u16 y, u16 w, u16 h) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + int first = 1; + int plen; + unsigned buf_used = 0; + + if (size < w * h * 3) + return -ENOMEM; + + mutex_lock(&ddata->lock); + + if (!ddata->enabled) { + r = -ENODEV; + goto err1; + } + + size = min(w * h * 3, + dssdev->panel.timings.x_res * + dssdev->panel.timings.y_res * 3); + + in->ops.dsi->bus_lock(in); + + r = dsicm_wake_up(ddata); + if (r) + goto err2; + + /* plen 1 or 2 goes into short packet. until checksum error is fixed, + * use short packets. plen 32 works, but bigger packets seem to cause + * an error. */ + if (size % 2) + plen = 1; + else + plen = 2; + + dsicm_set_update_window(ddata, x, y, w, h); + + r = in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, plen); + if (r) + goto err2; + + while (buf_used < size) { + u8 dcs_cmd = first ? 0x2e : 0x3e; + first = 0; + + r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, + buf + buf_used, size - buf_used); + + if (r < 0) { + dev_err(dssdev->dev, "read error\n"); + goto err3; + } + + buf_used += r; + + if (r < plen) { + dev_err(&ddata->pdev->dev, "short read\n"); + break; + } + + if (signal_pending(current)) { + dev_err(&ddata->pdev->dev, "signal pending, " + "aborting memory read\n"); + r = -ERESTARTSYS; + goto err3; + } + } + + r = buf_used; + +err3: + in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, 1); +err2: + in->ops.dsi->bus_unlock(in); +err1: + mutex_unlock(&ddata->lock); + return r; +} + +static void dsicm_ulps_work(struct work_struct *work) +{ + struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, + ulps_work.work); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + mutex_lock(&ddata->lock); + + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) { + mutex_unlock(&ddata->lock); + return; + } + + in->ops.dsi->bus_lock(in); + + dsicm_enter_ulps(ddata); + + in->ops.dsi->bus_unlock(in); + mutex_unlock(&ddata->lock); +} + +static struct omap_dss_driver dsicm_ops = { + .connect = dsicm_connect, + .disconnect = dsicm_disconnect, + + .enable = dsicm_enable, + .disable = dsicm_disable, + + .update = dsicm_update, + .sync = dsicm_sync, + + .get_resolution = dsicm_get_resolution, + .get_recommended_bpp = omapdss_default_get_recommended_bpp, + + .enable_te = dsicm_enable_te, + .get_te = dsicm_get_te, + + .memory_read = dsicm_memory_read, +}; + +static int dsicm_probe_pdata(struct platform_device *pdev) +{ + const struct panel_dsicm_platform_data *pdata; + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev, *in; + + pdata = dev_get_platdata(&pdev->dev); + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&pdev->dev, "failed to find video source\n"); + return -EPROBE_DEFER; + } + ddata->in = in; + + ddata->reset_gpio = pdata->reset_gpio; + + if (pdata->use_ext_te) + ddata->ext_te_gpio = pdata->ext_te_gpio; + else + ddata->ext_te_gpio = -1; + + ddata->ulps_timeout = pdata->ulps_timeout; + + ddata->use_dsi_backlight = pdata->use_dsi_backlight; + + ddata->pin_config = pdata->pin_config; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int dsicm_probe_of(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *in; + int gpio; + + gpio = of_get_named_gpio(node, "reset-gpios", 0); + if (!gpio_is_valid(gpio)) { + dev_err(&pdev->dev, "failed to parse reset gpio\n"); + return gpio; + } + ddata->reset_gpio = gpio; + + gpio = of_get_named_gpio(node, "te-gpios", 0); + if (gpio_is_valid(gpio) || gpio == -ENOENT) { + ddata->ext_te_gpio = gpio; + } else { + dev_err(&pdev->dev, "failed to parse TE gpio\n"); + return gpio; + } + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + /* TODO: ulps, backlight */ + + return 0; +} + +static int dsicm_probe(struct platform_device *pdev) +{ + struct backlight_properties props; + struct panel_drv_data *ddata; + struct backlight_device *bldev = NULL; + struct device *dev = &pdev->dev; + struct omap_dss_device *dssdev; + int r; + + dev_dbg(dev, "probe\n"); + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + ddata->pdev = pdev; + + if (dev_get_platdata(dev)) { + r = dsicm_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = dsicm_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + ddata->timings.x_res = 864; + ddata->timings.y_res = 480; + ddata->timings.pixelclock = 864 * 480 * 60; + + dssdev = &ddata->dssdev; + dssdev->dev = dev; + dssdev->driver = &dsicm_ops; + dssdev->panel.timings = ddata->timings; + dssdev->type = OMAP_DISPLAY_TYPE_DSI; + dssdev->owner = THIS_MODULE; + + dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; + dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | + OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(dev, "Failed to register panel\n"); + goto err_reg; + } + + mutex_init(&ddata->lock); + + atomic_set(&ddata->do_update, 0); + + if (gpio_is_valid(ddata->reset_gpio)) { + r = devm_gpio_request_one(dev, ddata->reset_gpio, + GPIOF_OUT_INIT_LOW, "taal rst"); + if (r) { + dev_err(dev, "failed to request reset gpio\n"); + return r; + } + } + + if (gpio_is_valid(ddata->ext_te_gpio)) { + r = devm_gpio_request_one(dev, ddata->ext_te_gpio, + GPIOF_IN, "taal irq"); + if (r) { + dev_err(dev, "GPIO request failed\n"); + return r; + } + + r = devm_request_irq(dev, gpio_to_irq(ddata->ext_te_gpio), + dsicm_te_isr, + IRQF_TRIGGER_RISING, + "taal vsync", ddata); + + if (r) { + dev_err(dev, "IRQ request failed\n"); + return r; + } + + INIT_DEFERRABLE_WORK(&ddata->te_timeout_work, + dsicm_te_timeout_work_callback); + + dev_dbg(dev, "Using GPIO TE\n"); + } + + ddata->workqueue = create_singlethread_workqueue("dsicm_wq"); + if (ddata->workqueue == NULL) { + dev_err(dev, "can't create workqueue\n"); + return -ENOMEM; + } + INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work); + + dsicm_hw_reset(ddata); + + if (ddata->use_dsi_backlight) { + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 255; + + props.type = BACKLIGHT_RAW; + bldev = backlight_device_register(dev_name(dev), + dev, ddata, &dsicm_bl_ops, &props); + if (IS_ERR(bldev)) { + r = PTR_ERR(bldev); + goto err_bl; + } + + ddata->bldev = bldev; + + bldev->props.fb_blank = FB_BLANK_UNBLANK; + bldev->props.power = FB_BLANK_UNBLANK; + bldev->props.brightness = 255; + + dsicm_bl_update_status(bldev); + } + + r = sysfs_create_group(&dev->kobj, &dsicm_attr_group); + if (r) { + dev_err(dev, "failed to create sysfs files\n"); + goto err_sysfs_create; + } + + return 0; + +err_sysfs_create: + if (bldev != NULL) + backlight_device_unregister(bldev); +err_bl: + destroy_workqueue(ddata->workqueue); +err_reg: + return r; +} + +static int __exit dsicm_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct backlight_device *bldev; + + dev_dbg(&pdev->dev, "remove\n"); + + omapdss_unregister_display(dssdev); + + dsicm_disable(dssdev); + dsicm_disconnect(dssdev); + + sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); + + bldev = ddata->bldev; + if (bldev != NULL) { + bldev->props.power = FB_BLANK_POWERDOWN; + dsicm_bl_update_status(bldev); + backlight_device_unregister(bldev); + } + + omap_dss_put_device(ddata->in); + + dsicm_cancel_ulps_work(ddata); + destroy_workqueue(ddata->workqueue); + + /* reset, to be sure that the panel is in a valid state */ + dsicm_hw_reset(ddata); + + return 0; +} + +static const struct of_device_id dsicm_of_match[] = { + { .compatible = "omapdss,panel-dsi-cm", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, dsicm_of_match); + +static struct platform_driver dsicm_driver = { + .probe = dsicm_probe, + .remove = __exit_p(dsicm_remove), + .driver = { + .name = "panel-dsi-cm", + .of_match_table = dsicm_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(dsicm_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c new file mode 100644 index 000000000000..18eb60e9c9ec --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -0,0 +1,404 @@ +/* + * LG.Philips LB035Q02 LCD Panel driver + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * Based on a driver by: Steve Sakoman <steve@sakoman.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <linux/mutex.h> +#include <linux/gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +static struct omap_video_timings lb035q02_timings = { + .x_res = 320, + .y_res = 240, + + .pixelclock = 6500000, + + .hsw = 2, + .hfp = 20, + .hbp = 68, + + .vsw = 2, + .vfp = 4, + .vbp = 18, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, +}; + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct spi_device *spi; + + int data_lines; + + struct omap_video_timings videomode; + + /* used for non-DT boot, to be removed */ + int backlight_gpio; + + struct gpio_desc *enable_gpio; +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val) +{ + struct spi_message msg; + struct spi_transfer index_xfer = { + .len = 3, + .cs_change = 1, + }; + struct spi_transfer value_xfer = { + .len = 3, + }; + u8 buffer[16]; + + spi_message_init(&msg); + + /* register index */ + buffer[0] = 0x70; + buffer[1] = 0x00; + buffer[2] = reg & 0x7f; + index_xfer.tx_buf = buffer; + spi_message_add_tail(&index_xfer, &msg); + + /* register value */ + buffer[4] = 0x72; + buffer[5] = val >> 8; + buffer[6] = val; + value_xfer.tx_buf = buffer + 4; + spi_message_add_tail(&value_xfer, &msg); + + return spi_sync(spi, &msg); +} + +static void init_lb035q02_panel(struct spi_device *spi) +{ + /* Init sequence from page 28 of the lb035q02 spec */ + lb035q02_write_reg(spi, 0x01, 0x6300); + lb035q02_write_reg(spi, 0x02, 0x0200); + lb035q02_write_reg(spi, 0x03, 0x0177); + lb035q02_write_reg(spi, 0x04, 0x04c7); + lb035q02_write_reg(spi, 0x05, 0xffc0); + lb035q02_write_reg(spi, 0x06, 0xe806); + lb035q02_write_reg(spi, 0x0a, 0x4008); + lb035q02_write_reg(spi, 0x0b, 0x0000); + lb035q02_write_reg(spi, 0x0d, 0x0030); + lb035q02_write_reg(spi, 0x0e, 0x2800); + lb035q02_write_reg(spi, 0x0f, 0x0000); + lb035q02_write_reg(spi, 0x16, 0x9f80); + lb035q02_write_reg(spi, 0x17, 0x0a0f); + lb035q02_write_reg(spi, 0x1e, 0x00c1); + lb035q02_write_reg(spi, 0x30, 0x0300); + lb035q02_write_reg(spi, 0x31, 0x0007); + lb035q02_write_reg(spi, 0x32, 0x0000); + lb035q02_write_reg(spi, 0x33, 0x0000); + lb035q02_write_reg(spi, 0x34, 0x0707); + lb035q02_write_reg(spi, 0x35, 0x0004); + lb035q02_write_reg(spi, 0x36, 0x0302); + lb035q02_write_reg(spi, 0x37, 0x0202); + lb035q02_write_reg(spi, 0x3a, 0x0a0d); + lb035q02_write_reg(spi, 0x3b, 0x0806); +} + +static int lb035q02_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dpi->connect(in, dssdev); + if (r) + return r; + + init_lb035q02_panel(ddata->spi); + + return 0; +} + +static void lb035q02_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dpi->disconnect(in, dssdev); +} + +static int lb035q02_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + if (ddata->data_lines) + in->ops.dpi->set_data_lines(in, ddata->data_lines); + in->ops.dpi->set_timings(in, &ddata->videomode); + + r = in->ops.dpi->enable(in); + if (r) + return r; + + if (ddata->enable_gpio) + gpiod_set_value_cansleep(ddata->enable_gpio, 1); + + if (gpio_is_valid(ddata->backlight_gpio)) + gpio_set_value_cansleep(ddata->backlight_gpio, 1); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void lb035q02_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + if (ddata->enable_gpio) + gpiod_set_value_cansleep(ddata->enable_gpio, 0); + + if (gpio_is_valid(ddata->backlight_gpio)) + gpio_set_value_cansleep(ddata->backlight_gpio, 0); + + in->ops.dpi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void lb035q02_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->videomode = *timings; + dssdev->panel.timings = *timings; + + in->ops.dpi->set_timings(in, timings); +} + +static void lb035q02_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->videomode; +} + +static int lb035q02_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.dpi->check_timings(in, timings); +} + +static struct omap_dss_driver lb035q02_ops = { + .connect = lb035q02_connect, + .disconnect = lb035q02_disconnect, + + .enable = lb035q02_enable, + .disable = lb035q02_disable, + + .set_timings = lb035q02_set_timings, + .get_timings = lb035q02_get_timings, + .check_timings = lb035q02_check_timings, + + .get_resolution = omapdss_default_get_resolution, +}; + +static int lb035q02_probe_pdata(struct spi_device *spi) +{ + const struct panel_lb035q02_platform_data *pdata; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev, *in; + int r; + + pdata = dev_get_platdata(&spi->dev); + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&spi->dev, "failed to find video source '%s'\n", + pdata->source); + return -EPROBE_DEFER; + } + + ddata->in = in; + + ddata->data_lines = pdata->data_lines; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio, + GPIOF_OUT_INIT_LOW, "panel enable"); + if (r) + goto err_gpio; + + ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio); + + ddata->backlight_gpio = pdata->backlight_gpio; + + return 0; +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int lb035q02_probe_of(struct spi_device *spi) +{ + struct device_node *node = spi->dev.of_node; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *in; + struct gpio_desc *gpio; + + gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) { + dev_err(&spi->dev, "failed to parse enable gpio\n"); + return PTR_ERR(gpio); + } + + ddata->enable_gpio = gpio; + + ddata->backlight_gpio = -ENOENT; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&spi->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int lb035q02_panel_spi_probe(struct spi_device *spi) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); + if (ddata == NULL) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, ddata); + + ddata->spi = spi; + + if (dev_get_platdata(&spi->dev)) { + r = lb035q02_probe_pdata(spi); + if (r) + return r; + } else if (spi->dev.of_node) { + r = lb035q02_probe_of(spi); + if (r) + return r; + } else { + return -ENODEV; + } + + if (gpio_is_valid(ddata->backlight_gpio)) { + r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, + GPIOF_OUT_INIT_LOW, "panel backlight"); + if (r) + goto err_gpio; + } + + ddata->videomode = lb035q02_timings; + + dssdev = &ddata->dssdev; + dssdev->dev = &spi->dev; + dssdev->driver = &lb035q02_ops; + dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = ddata->videomode; + dssdev->phy.dpi.data_lines = ddata->data_lines; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&spi->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int lb035q02_panel_spi_remove(struct spi_device *spi) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_display(dssdev); + + lb035q02_disable(dssdev); + lb035q02_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id lb035q02_of_match[] = { + { .compatible = "omapdss,lgphilips,lb035q02", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, lb035q02_of_match); + +static struct spi_driver lb035q02_spi_driver = { + .probe = lb035q02_panel_spi_probe, + .remove = lb035q02_panel_spi_remove, + .driver = { + .name = "panel_lgphilips_lb035q02", + .of_match_table = lb035q02_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_spi_driver(lb035q02_spi_driver); + +MODULE_ALIAS("spi:lgphilips,lb035q02"); +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c new file mode 100644 index 000000000000..8a928c9a2fc9 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -0,0 +1,437 @@ +/* + * NEC NL8048HL11 Panel driver + * + * Copyright (C) 2010 Texas Instruments Inc. + * Author: Erik Gilling <konkers@android.com> + * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <linux/fb.h> +#include <linux/gpio.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct omap_video_timings videomode; + + int data_lines; + + int res_gpio; + int qvga_gpio; + + struct spi_device *spi; +}; + +#define LCD_XRES 800 +#define LCD_YRES 480 +/* + * NEC PIX Clock Ratings + * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz + */ +#define LCD_PIXEL_CLOCK 23800000 + +static const struct { + unsigned char addr; + unsigned char dat; +} nec_8048_init_seq[] = { + { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 }, + { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 }, + { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 }, + { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F }, + { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F }, + { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F }, + { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F }, + { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 }, + { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 }, + { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C }, + { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, + { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 }, + { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 }, + { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 }, + { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC }, + { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, + { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 }, + { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 }, +}; + +static const struct omap_video_timings nec_8048_panel_timings = { + .x_res = LCD_XRES, + .y_res = LCD_YRES, + .pixelclock = LCD_PIXEL_CLOCK, + .hfp = 6, + .hsw = 1, + .hbp = 4, + .vfp = 3, + .vsw = 1, + .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr, + unsigned char reg_data) +{ + int ret = 0; + unsigned int cmd = 0, data = 0; + + cmd = 0x0000 | reg_addr; /* register address write */ + data = 0x0100 | reg_data; /* register data write */ + data = (cmd << 16) | data; + + ret = spi_write(spi, (unsigned char *)&data, 4); + if (ret) + pr_err("error in spi_write %x\n", data); + + return ret; +} + +static int init_nec_8048_wvga_lcd(struct spi_device *spi) +{ + unsigned int i; + /* Initialization Sequence */ + /* nec_8048_spi_send(spi, REG, VAL) */ + for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++) + nec_8048_spi_send(spi, nec_8048_init_seq[i].addr, + nec_8048_init_seq[i].dat); + udelay(20); + nec_8048_spi_send(spi, nec_8048_init_seq[i].addr, + nec_8048_init_seq[i].dat); + return 0; +} + +static int nec_8048_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dpi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void nec_8048_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dpi->disconnect(in, dssdev); +} + +static int nec_8048_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + if (ddata->data_lines) + in->ops.dpi->set_data_lines(in, ddata->data_lines); + in->ops.dpi->set_timings(in, &ddata->videomode); + + r = in->ops.dpi->enable(in); + if (r) + return r; + + if (gpio_is_valid(ddata->res_gpio)) + gpio_set_value_cansleep(ddata->res_gpio, 1); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void nec_8048_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + if (gpio_is_valid(ddata->res_gpio)) + gpio_set_value_cansleep(ddata->res_gpio, 0); + + in->ops.dpi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void nec_8048_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->videomode = *timings; + dssdev->panel.timings = *timings; + + in->ops.dpi->set_timings(in, timings); +} + +static void nec_8048_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->videomode; +} + +static int nec_8048_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.dpi->check_timings(in, timings); +} + +static struct omap_dss_driver nec_8048_ops = { + .connect = nec_8048_connect, + .disconnect = nec_8048_disconnect, + + .enable = nec_8048_enable, + .disable = nec_8048_disable, + + .set_timings = nec_8048_set_timings, + .get_timings = nec_8048_get_timings, + .check_timings = nec_8048_check_timings, + + .get_resolution = omapdss_default_get_resolution, +}; + + +static int nec_8048_probe_pdata(struct spi_device *spi) +{ + const struct panel_nec_nl8048hl11_platform_data *pdata; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev, *in; + + pdata = dev_get_platdata(&spi->dev); + + ddata->qvga_gpio = pdata->qvga_gpio; + ddata->res_gpio = pdata->res_gpio; + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&spi->dev, "failed to find video source '%s'\n", + pdata->source); + return -EPROBE_DEFER; + } + ddata->in = in; + + ddata->data_lines = pdata->data_lines; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int nec_8048_probe_of(struct spi_device *spi) +{ + struct device_node *node = spi->dev.of_node; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *in; + int gpio; + + gpio = of_get_named_gpio(node, "reset-gpios", 0); + if (!gpio_is_valid(gpio)) { + dev_err(&spi->dev, "failed to parse enable gpio\n"); + return gpio; + } + ddata->res_gpio = gpio; + + /* XXX the panel spec doesn't mention any QVGA pin?? */ + ddata->qvga_gpio = -ENOENT; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&spi->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int nec_8048_probe(struct spi_device *spi) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + dev_dbg(&spi->dev, "%s\n", __func__); + + spi->mode = SPI_MODE_0; + spi->bits_per_word = 32; + + r = spi_setup(spi); + if (r < 0) { + dev_err(&spi->dev, "spi_setup failed: %d\n", r); + return r; + } + + init_nec_8048_wvga_lcd(spi); + + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); + if (ddata == NULL) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, ddata); + + ddata->spi = spi; + + if (dev_get_platdata(&spi->dev)) { + r = nec_8048_probe_pdata(spi); + if (r) + return r; + } else if (spi->dev.of_node) { + r = nec_8048_probe_of(spi); + if (r) + return r; + } else { + return -ENODEV; + } + + if (gpio_is_valid(ddata->qvga_gpio)) { + r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, + GPIOF_OUT_INIT_HIGH, "lcd QVGA"); + if (r) + goto err_gpio; + } + + if (gpio_is_valid(ddata->res_gpio)) { + r = devm_gpio_request_one(&spi->dev, ddata->res_gpio, + GPIOF_OUT_INIT_LOW, "lcd RES"); + if (r) + goto err_gpio; + } + + ddata->videomode = nec_8048_panel_timings; + + dssdev = &ddata->dssdev; + dssdev->dev = &spi->dev; + dssdev->driver = &nec_8048_ops; + dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = ddata->videomode; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&spi->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int nec_8048_remove(struct spi_device *spi) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + dev_dbg(&ddata->spi->dev, "%s\n", __func__); + + omapdss_unregister_display(dssdev); + + nec_8048_disable(dssdev); + nec_8048_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int nec_8048_suspend(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + + nec_8048_spi_send(spi, 2, 0x01); + mdelay(40); + + return 0; +} + +static int nec_8048_resume(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + + /* reinitialize the panel */ + spi_setup(spi); + nec_8048_spi_send(spi, 2, 0x00); + init_nec_8048_wvga_lcd(spi); + + return 0; +} +static SIMPLE_DEV_PM_OPS(nec_8048_pm_ops, nec_8048_suspend, + nec_8048_resume); +#define NEC_8048_PM_OPS (&nec_8048_pm_ops) +#else +#define NEC_8048_PM_OPS NULL +#endif + +static const struct of_device_id nec_8048_of_match[] = { + { .compatible = "omapdss,nec,nl8048hl11", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, nec_8048_of_match); + +static struct spi_driver nec_8048_driver = { + .driver = { + .name = "panel-nec-nl8048hl11", + .pm = NEC_8048_PM_OPS, + .of_match_table = nec_8048_of_match, + .suppress_bind_attrs = true, + }, + .probe = nec_8048_probe, + .remove = nec_8048_remove, +}; + +module_spi_driver(nec_8048_driver); + +MODULE_ALIAS("spi:nec,nl8048hl11"); +MODULE_AUTHOR("Erik Gilling <konkers@android.com>"); +MODULE_DESCRIPTION("NEC-NL8048HL11 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c new file mode 100644 index 000000000000..abfd1f6e3327 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -0,0 +1,415 @@ +/* + * LCD panel driver for Sharp LS037V7DW01 + * + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/regulator/consumer.h> +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + struct regulator *vcc; + + int data_lines; + + struct omap_video_timings videomode; + + struct gpio_desc *resb_gpio; /* low = reset active min 20 us */ + struct gpio_desc *ini_gpio; /* high = power on */ + struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */ + struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */ + struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */ +}; + +static const struct omap_video_timings sharp_ls_timings = { + .x_res = 480, + .y_res = 640, + + .pixelclock = 19200000, + + .hsw = 2, + .hfp = 1, + .hbp = 28, + + .vsw = 1, + .vfp = 1, + .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static int sharp_ls_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dpi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void sharp_ls_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dpi->disconnect(in, dssdev); +} + +static int sharp_ls_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + if (ddata->data_lines) + in->ops.dpi->set_data_lines(in, ddata->data_lines); + in->ops.dpi->set_timings(in, &ddata->videomode); + + if (ddata->vcc) { + r = regulator_enable(ddata->vcc); + if (r != 0) + return r; + } + + r = in->ops.dpi->enable(in); + if (r) { + regulator_disable(ddata->vcc); + return r; + } + + /* wait couple of vsyncs until enabling the LCD */ + msleep(50); + + if (ddata->resb_gpio) + gpiod_set_value_cansleep(ddata->resb_gpio, 1); + + if (ddata->ini_gpio) + gpiod_set_value_cansleep(ddata->ini_gpio, 1); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void sharp_ls_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + if (ddata->ini_gpio) + gpiod_set_value_cansleep(ddata->ini_gpio, 0); + + if (ddata->resb_gpio) + gpiod_set_value_cansleep(ddata->resb_gpio, 0); + + /* wait at least 5 vsyncs after disabling the LCD */ + + msleep(100); + + in->ops.dpi->disable(in); + + if (ddata->vcc) + regulator_disable(ddata->vcc); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void sharp_ls_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->videomode = *timings; + dssdev->panel.timings = *timings; + + in->ops.dpi->set_timings(in, timings); +} + +static void sharp_ls_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->videomode; +} + +static int sharp_ls_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.dpi->check_timings(in, timings); +} + +static struct omap_dss_driver sharp_ls_ops = { + .connect = sharp_ls_connect, + .disconnect = sharp_ls_disconnect, + + .enable = sharp_ls_enable, + .disable = sharp_ls_disable, + + .set_timings = sharp_ls_set_timings, + .get_timings = sharp_ls_get_timings, + .check_timings = sharp_ls_check_timings, + + .get_resolution = omapdss_default_get_resolution, +}; + +static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, + char *desc, struct gpio_desc **gpiod) +{ + struct gpio_desc *gd; + int r; + + *gpiod = NULL; + + r = devm_gpio_request_one(dev, gpio, flags, desc); + if (r) + return r == -ENOENT ? 0 : r; + + gd = gpio_to_desc(gpio); + if (IS_ERR(gd)) + return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd); + + *gpiod = gd; + return 0; +} + +static int sharp_ls_probe_pdata(struct platform_device *pdev) +{ + const struct panel_sharp_ls037v7dw01_platform_data *pdata; + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev, *in; + int r; + + pdata = dev_get_platdata(&pdev->dev); + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&pdev->dev, "failed to find video source '%s'\n", + pdata->source); + return -EPROBE_DEFER; + } + + ddata->in = in; + + ddata->data_lines = pdata->data_lines; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW, + "lcd MO", &ddata->mo_gpio); + if (r) + return r; + r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH, + "lcd LR", &ddata->lr_gpio); + if (r) + return r; + r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH, + "lcd UD", &ddata->ud_gpio); + if (r) + return r; + r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW, + "lcd RESB", &ddata->resb_gpio); + if (r) + return r; + r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW, + "lcd INI", &ddata->ini_gpio); + if (r) + return r; + + return 0; +} + +static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, + const char *desc, struct gpio_desc **gpiod) +{ + struct gpio_desc *gd; + + *gpiod = NULL; + + gd = devm_gpiod_get_index(dev, desc, index, GPIOD_OUT_LOW); + if (IS_ERR(gd)) + return PTR_ERR(gd); + + *gpiod = gd; + return 0; +} + +static int sharp_ls_probe_of(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + struct omap_dss_device *in; + int r; + + ddata->vcc = devm_regulator_get(&pdev->dev, "envdd"); + if (IS_ERR(ddata->vcc)) { + dev_err(&pdev->dev, "failed to get regulator\n"); + return PTR_ERR(ddata->vcc); + } + + /* lcd INI */ + r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "enable", &ddata->ini_gpio); + if (r) + return r; + + /* lcd RESB */ + r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "reset", &ddata->resb_gpio); + if (r) + return r; + + /* lcd MO */ + r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "mode", &ddata->mo_gpio); + if (r) + return r; + + /* lcd LR */ + r = sharp_ls_get_gpio_of(&pdev->dev, 1, 1, "mode", &ddata->lr_gpio); + if (r) + return r; + + /* lcd UD */ + r = sharp_ls_get_gpio_of(&pdev->dev, 2, 1, "mode", &ddata->ud_gpio); + if (r) + return r; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&pdev->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int sharp_ls_probe(struct platform_device *pdev) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (ddata == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, ddata); + + if (dev_get_platdata(&pdev->dev)) { + r = sharp_ls_probe_pdata(pdev); + if (r) + return r; + } else if (pdev->dev.of_node) { + r = sharp_ls_probe_of(pdev); + if (r) + return r; + } else { + return -ENODEV; + } + + ddata->videomode = sharp_ls_timings; + + dssdev = &ddata->dssdev; + dssdev->dev = &pdev->dev; + dssdev->driver = &sharp_ls_ops; + dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = ddata->videomode; + dssdev->phy.dpi.data_lines = ddata->data_lines; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&pdev->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: + omap_dss_put_device(ddata->in); + return r; +} + +static int __exit sharp_ls_remove(struct platform_device *pdev) +{ + struct panel_drv_data *ddata = platform_get_drvdata(pdev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + omapdss_unregister_display(dssdev); + + sharp_ls_disable(dssdev); + sharp_ls_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id sharp_ls_of_match[] = { + { .compatible = "omapdss,sharp,ls037v7dw01", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, sharp_ls_of_match); + +static struct platform_driver sharp_ls_driver = { + .probe = sharp_ls_probe, + .remove = __exit_p(sharp_ls_remove), + .driver = { + .name = "panel-sharp-ls037v7dw01", + .of_match_table = sharp_ls_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_platform_driver(sharp_ls_driver); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c new file mode 100644 index 000000000000..31efcca801bd --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -0,0 +1,917 @@ +/* + * Sony ACX565AKM LCD Panel driver + * + * Copyright (C) 2010 Nokia Corporation + * + * Original Driver Author: Imre Deak <imre.deak@nokia.com> + * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com> + * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <linux/jiffies.h> +#include <linux/sched.h> +#include <linux/backlight.h> +#include <linux/fb.h> +#include <linux/gpio.h> +#include <linux/of.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +#define MIPID_CMD_READ_DISP_ID 0x04 +#define MIPID_CMD_READ_RED 0x06 +#define MIPID_CMD_READ_GREEN 0x07 +#define MIPID_CMD_READ_BLUE 0x08 +#define MIPID_CMD_READ_DISP_STATUS 0x09 +#define MIPID_CMD_RDDSDR 0x0F +#define MIPID_CMD_SLEEP_IN 0x10 +#define MIPID_CMD_SLEEP_OUT 0x11 +#define MIPID_CMD_DISP_OFF 0x28 +#define MIPID_CMD_DISP_ON 0x29 +#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51 +#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52 +#define MIPID_CMD_WRITE_CTRL_DISP 0x53 + +#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5) +#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4) +#define CTRL_DISP_BACKLIGHT_ON (1 << 2) +#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1) + +#define MIPID_CMD_READ_CTRL_DISP 0x54 +#define MIPID_CMD_WRITE_CABC 0x55 +#define MIPID_CMD_READ_CABC 0x56 + +#define MIPID_VER_LPH8923 3 +#define MIPID_VER_LS041Y3 4 +#define MIPID_VER_L4F00311 8 +#define MIPID_VER_ACX565AKM 9 + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + int reset_gpio; + int datapairs; + + struct omap_video_timings videomode; + + char *name; + int enabled; + int model; + int revision; + u8 display_id[3]; + unsigned has_bc:1; + unsigned has_cabc:1; + unsigned cabc_mode; + unsigned long hw_guard_end; /* next value of jiffies + when we can issue the + next sleep in/out command */ + unsigned long hw_guard_wait; /* max guard time in jiffies */ + + struct spi_device *spi; + struct mutex mutex; + + struct backlight_device *bl_dev; +}; + +static const struct omap_video_timings acx565akm_panel_timings = { + .x_res = 800, + .y_res = 480, + .pixelclock = 24000000, + .hfp = 28, + .hsw = 4, + .hbp = 24, + .vfp = 3, + .vsw = 3, + .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static void acx565akm_transfer(struct panel_drv_data *ddata, int cmd, + const u8 *wbuf, int wlen, u8 *rbuf, int rlen) +{ + struct spi_message m; + struct spi_transfer *x, xfer[5]; + int r; + + BUG_ON(ddata->spi == NULL); + + spi_message_init(&m); + + memset(xfer, 0, sizeof(xfer)); + x = &xfer[0]; + + cmd &= 0xff; + x->tx_buf = &cmd; + x->bits_per_word = 9; + x->len = 2; + + if (rlen > 1 && wlen == 0) { + /* + * Between the command and the response data there is a + * dummy clock cycle. Add an extra bit after the command + * word to account for this. + */ + x->bits_per_word = 10; + cmd <<= 1; + } + spi_message_add_tail(x, &m); + + if (wlen) { + x++; + x->tx_buf = wbuf; + x->len = wlen; + x->bits_per_word = 9; + spi_message_add_tail(x, &m); + } + + if (rlen) { + x++; + x->rx_buf = rbuf; + x->len = rlen; + spi_message_add_tail(x, &m); + } + + r = spi_sync(ddata->spi, &m); + if (r < 0) + dev_dbg(&ddata->spi->dev, "spi_sync %d\n", r); +} + +static inline void acx565akm_cmd(struct panel_drv_data *ddata, int cmd) +{ + acx565akm_transfer(ddata, cmd, NULL, 0, NULL, 0); +} + +static inline void acx565akm_write(struct panel_drv_data *ddata, + int reg, const u8 *buf, int len) +{ + acx565akm_transfer(ddata, reg, buf, len, NULL, 0); +} + +static inline void acx565akm_read(struct panel_drv_data *ddata, + int reg, u8 *buf, int len) +{ + acx565akm_transfer(ddata, reg, NULL, 0, buf, len); +} + +static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec) +{ + ddata->hw_guard_wait = msecs_to_jiffies(guard_msec); + ddata->hw_guard_end = jiffies + ddata->hw_guard_wait; +} + +static void hw_guard_wait(struct panel_drv_data *ddata) +{ + unsigned long wait = ddata->hw_guard_end - jiffies; + + if ((long)wait > 0 && wait <= ddata->hw_guard_wait) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(wait); + } +} + +static void set_sleep_mode(struct panel_drv_data *ddata, int on) +{ + int cmd; + + if (on) + cmd = MIPID_CMD_SLEEP_IN; + else + cmd = MIPID_CMD_SLEEP_OUT; + /* + * We have to keep 120msec between sleep in/out commands. + * (8.2.15, 8.2.16). + */ + hw_guard_wait(ddata); + acx565akm_cmd(ddata, cmd); + hw_guard_start(ddata, 120); +} + +static void set_display_state(struct panel_drv_data *ddata, int enabled) +{ + int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF; + + acx565akm_cmd(ddata, cmd); +} + +static int panel_enabled(struct panel_drv_data *ddata) +{ + u32 disp_status; + int enabled; + + acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS, + (u8 *)&disp_status, 4); + disp_status = __be32_to_cpu(disp_status); + enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10)); + dev_dbg(&ddata->spi->dev, + "LCD panel %senabled by bootloader (status 0x%04x)\n", + enabled ? "" : "not ", disp_status); + return enabled; +} + +static int panel_detect(struct panel_drv_data *ddata) +{ + acx565akm_read(ddata, MIPID_CMD_READ_DISP_ID, ddata->display_id, 3); + dev_dbg(&ddata->spi->dev, "MIPI display ID: %02x%02x%02x\n", + ddata->display_id[0], + ddata->display_id[1], + ddata->display_id[2]); + + switch (ddata->display_id[0]) { + case 0x10: + ddata->model = MIPID_VER_ACX565AKM; + ddata->name = "acx565akm"; + ddata->has_bc = 1; + ddata->has_cabc = 1; + break; + case 0x29: + ddata->model = MIPID_VER_L4F00311; + ddata->name = "l4f00311"; + break; + case 0x45: + ddata->model = MIPID_VER_LPH8923; + ddata->name = "lph8923"; + break; + case 0x83: + ddata->model = MIPID_VER_LS041Y3; + ddata->name = "ls041y3"; + break; + default: + ddata->name = "unknown"; + dev_err(&ddata->spi->dev, "invalid display ID\n"); + return -ENODEV; + } + + ddata->revision = ddata->display_id[1]; + + dev_info(&ddata->spi->dev, "omapfb: %s rev %02x LCD detected\n", + ddata->name, ddata->revision); + + return 0; +} + +/*----------------------Backlight Control-------------------------*/ + +static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable) +{ + u16 ctrl; + + acx565akm_read(ddata, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1); + if (enable) { + ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON | + CTRL_DISP_BACKLIGHT_ON; + } else { + ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON | + CTRL_DISP_BACKLIGHT_ON); + } + + ctrl |= 1 << 8; + acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2); +} + +static void set_cabc_mode(struct panel_drv_data *ddata, unsigned mode) +{ + u16 cabc_ctrl; + + ddata->cabc_mode = mode; + if (!ddata->enabled) + return; + cabc_ctrl = 0; + acx565akm_read(ddata, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1); + cabc_ctrl &= ~3; + cabc_ctrl |= (1 << 8) | (mode & 3); + acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2); +} + +static unsigned get_cabc_mode(struct panel_drv_data *ddata) +{ + return ddata->cabc_mode; +} + +static unsigned get_hw_cabc_mode(struct panel_drv_data *ddata) +{ + u8 cabc_ctrl; + + acx565akm_read(ddata, MIPID_CMD_READ_CABC, &cabc_ctrl, 1); + return cabc_ctrl & 3; +} + +static void acx565akm_set_brightness(struct panel_drv_data *ddata, int level) +{ + int bv; + + bv = level | (1 << 8); + acx565akm_write(ddata, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2); + + if (level) + enable_backlight_ctrl(ddata, 1); + else + enable_backlight_ctrl(ddata, 0); +} + +static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata) +{ + u8 bv; + + acx565akm_read(ddata, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1); + + return bv; +} + + +static int acx565akm_bl_update_status(struct backlight_device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); + int level; + + dev_dbg(&ddata->spi->dev, "%s\n", __func__); + + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) + level = dev->props.brightness; + else + level = 0; + + if (ddata->has_bc) + acx565akm_set_brightness(ddata, level); + else + return -ENODEV; + + return 0; +} + +static int acx565akm_bl_get_intensity(struct backlight_device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); + + dev_dbg(&dev->dev, "%s\n", __func__); + + if (!ddata->has_bc) + return -ENODEV; + + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) { + if (ddata->has_bc) + return acx565akm_get_actual_brightness(ddata); + else + return dev->props.brightness; + } + + return 0; +} + +static int acx565akm_bl_update_status_locked(struct backlight_device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); + int r; + + mutex_lock(&ddata->mutex); + r = acx565akm_bl_update_status(dev); + mutex_unlock(&ddata->mutex); + + return r; +} + +static int acx565akm_bl_get_intensity_locked(struct backlight_device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); + int r; + + mutex_lock(&ddata->mutex); + r = acx565akm_bl_get_intensity(dev); + mutex_unlock(&ddata->mutex); + + return r; +} + +static const struct backlight_ops acx565akm_bl_ops = { + .get_brightness = acx565akm_bl_get_intensity_locked, + .update_status = acx565akm_bl_update_status_locked, +}; + +/*--------------------Auto Brightness control via Sysfs---------------------*/ + +static const char * const cabc_modes[] = { + "off", /* always used when CABC is not supported */ + "ui", + "still-image", + "moving-image", +}; + +static ssize_t show_cabc_mode(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + const char *mode_str; + int mode; + int len; + + if (!ddata->has_cabc) + mode = 0; + else + mode = get_cabc_mode(ddata); + mode_str = "unknown"; + if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes)) + mode_str = cabc_modes[mode]; + len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str); + + return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1; +} + +static ssize_t store_cabc_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { + const char *mode_str = cabc_modes[i]; + int cmp_len = strlen(mode_str); + + if (count > 0 && buf[count - 1] == '\n') + count--; + if (count != cmp_len) + continue; + + if (strncmp(buf, mode_str, cmp_len) == 0) + break; + } + + if (i == ARRAY_SIZE(cabc_modes)) + return -EINVAL; + + if (!ddata->has_cabc && i != 0) + return -EINVAL; + + mutex_lock(&ddata->mutex); + set_cabc_mode(ddata, i); + mutex_unlock(&ddata->mutex); + + return count; +} + +static ssize_t show_cabc_available_modes(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + int len; + int i; + + if (!ddata->has_cabc) + return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]); + + for (i = 0, len = 0; + len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) + len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s", + i ? " " : "", cabc_modes[i], + i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : ""); + + return len < PAGE_SIZE ? len : PAGE_SIZE - 1; +} + +static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, + show_cabc_mode, store_cabc_mode); +static DEVICE_ATTR(cabc_available_modes, S_IRUGO, + show_cabc_available_modes, NULL); + +static struct attribute *bldev_attrs[] = { + &dev_attr_cabc_mode.attr, + &dev_attr_cabc_available_modes.attr, + NULL, +}; + +static struct attribute_group bldev_attr_group = { + .attrs = bldev_attrs, +}; + +static int acx565akm_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.sdi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void acx565akm_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.sdi->disconnect(in, dssdev); +} + +static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + dev_dbg(&ddata->spi->dev, "%s\n", __func__); + + in->ops.sdi->set_timings(in, &ddata->videomode); + + if (ddata->datapairs > 0) + in->ops.sdi->set_datapairs(in, ddata->datapairs); + + r = in->ops.sdi->enable(in); + if (r) { + pr_err("%s sdi enable failed\n", __func__); + return r; + } + + /*FIXME tweak me */ + msleep(50); + + if (gpio_is_valid(ddata->reset_gpio)) + gpio_set_value(ddata->reset_gpio, 1); + + if (ddata->enabled) { + dev_dbg(&ddata->spi->dev, "panel already enabled\n"); + return 0; + } + + /* + * We have to meet all the following delay requirements: + * 1. tRW: reset pulse width 10usec (7.12.1) + * 2. tRT: reset cancel time 5msec (7.12.1) + * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst + * case (7.6.2) + * 4. 120msec before the sleep out command (7.12.1) + */ + msleep(120); + + set_sleep_mode(ddata, 0); + ddata->enabled = 1; + + /* 5msec between sleep out and the next command. (8.2.16) */ + usleep_range(5000, 10000); + set_display_state(ddata, 1); + set_cabc_mode(ddata, ddata->cabc_mode); + + return acx565akm_bl_update_status(ddata->bl_dev); +} + +static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + dev_dbg(dssdev->dev, "%s\n", __func__); + + if (!ddata->enabled) + return; + + set_display_state(ddata, 0); + set_sleep_mode(ddata, 1); + ddata->enabled = 0; + /* + * We have to provide PCLK,HS,VS signals for 2 frames (worst case + * ~50msec) after sending the sleep in command and asserting the + * reset signal. We probably could assert the reset w/o the delay + * but we still delay to avoid possible artifacts. (7.6.1) + */ + msleep(50); + + if (gpio_is_valid(ddata->reset_gpio)) + gpio_set_value(ddata->reset_gpio, 0); + + /* FIXME need to tweak this delay */ + msleep(100); + + in->ops.sdi->disable(in); +} + +static int acx565akm_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + int r; + + dev_dbg(dssdev->dev, "%s\n", __func__); + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + mutex_lock(&ddata->mutex); + r = acx565akm_panel_power_on(dssdev); + mutex_unlock(&ddata->mutex); + if (r) + return r; + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void acx565akm_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + dev_dbg(dssdev->dev, "%s\n", __func__); + + if (!omapdss_device_is_enabled(dssdev)) + return; + + mutex_lock(&ddata->mutex); + acx565akm_panel_power_off(dssdev); + mutex_unlock(&ddata->mutex); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void acx565akm_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->videomode = *timings; + dssdev->panel.timings = *timings; + + in->ops.sdi->set_timings(in, timings); +} + +static void acx565akm_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->videomode; +} + +static int acx565akm_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.sdi->check_timings(in, timings); +} + +static struct omap_dss_driver acx565akm_ops = { + .connect = acx565akm_connect, + .disconnect = acx565akm_disconnect, + + .enable = acx565akm_enable, + .disable = acx565akm_disable, + + .set_timings = acx565akm_set_timings, + .get_timings = acx565akm_get_timings, + .check_timings = acx565akm_check_timings, + + .get_resolution = omapdss_default_get_resolution, +}; + +static int acx565akm_probe_pdata(struct spi_device *spi) +{ + const struct panel_acx565akm_platform_data *pdata; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev, *in; + + pdata = dev_get_platdata(&spi->dev); + + ddata->reset_gpio = pdata->reset_gpio; + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&spi->dev, "failed to find video source '%s'\n", + pdata->source); + return -EPROBE_DEFER; + } + ddata->in = in; + + ddata->datapairs = pdata->datapairs; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int acx565akm_probe_of(struct spi_device *spi) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct device_node *np = spi->dev.of_node; + + ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0); + + ddata->in = omapdss_of_find_source_for_first_ep(np); + if (IS_ERR(ddata->in)) { + dev_err(&spi->dev, "failed to find video source\n"); + return PTR_ERR(ddata->in); + } + + return 0; +} + +static int acx565akm_probe(struct spi_device *spi) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + struct backlight_device *bldev; + int max_brightness, brightness; + struct backlight_properties props; + int r; + + dev_dbg(&spi->dev, "%s\n", __func__); + + spi->mode = SPI_MODE_3; + + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); + if (ddata == NULL) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, ddata); + + ddata->spi = spi; + + mutex_init(&ddata->mutex); + + if (dev_get_platdata(&spi->dev)) { + r = acx565akm_probe_pdata(spi); + if (r) + return r; + } else if (spi->dev.of_node) { + r = acx565akm_probe_of(spi); + if (r) + return r; + } else { + dev_err(&spi->dev, "platform data missing!\n"); + return -ENODEV; + } + + if (gpio_is_valid(ddata->reset_gpio)) { + r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio, + GPIOF_OUT_INIT_LOW, "lcd reset"); + if (r) + goto err_gpio; + } + + if (gpio_is_valid(ddata->reset_gpio)) + gpio_set_value(ddata->reset_gpio, 1); + + /* + * After reset we have to wait 5 msec before the first + * command can be sent. + */ + usleep_range(5000, 10000); + + ddata->enabled = panel_enabled(ddata); + + r = panel_detect(ddata); + + if (!ddata->enabled && gpio_is_valid(ddata->reset_gpio)) + gpio_set_value(ddata->reset_gpio, 0); + + if (r) { + dev_err(&spi->dev, "%s panel detect error\n", __func__); + goto err_detect; + } + + memset(&props, 0, sizeof(props)); + props.fb_blank = FB_BLANK_UNBLANK; + props.power = FB_BLANK_UNBLANK; + props.type = BACKLIGHT_RAW; + + bldev = backlight_device_register("acx565akm", &ddata->spi->dev, + ddata, &acx565akm_bl_ops, &props); + if (IS_ERR(bldev)) { + r = PTR_ERR(bldev); + goto err_reg_bl; + } + ddata->bl_dev = bldev; + if (ddata->has_cabc) { + r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group); + if (r) { + dev_err(&bldev->dev, + "%s failed to create sysfs files\n", __func__); + goto err_sysfs; + } + ddata->cabc_mode = get_hw_cabc_mode(ddata); + } + + max_brightness = 255; + + if (ddata->has_bc) + brightness = acx565akm_get_actual_brightness(ddata); + else + brightness = 0; + + bldev->props.max_brightness = max_brightness; + bldev->props.brightness = brightness; + + acx565akm_bl_update_status(bldev); + + + ddata->videomode = acx565akm_panel_timings; + + dssdev = &ddata->dssdev; + dssdev->dev = &spi->dev; + dssdev->driver = &acx565akm_ops; + dssdev->type = OMAP_DISPLAY_TYPE_SDI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = ddata->videomode; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&spi->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: + sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group); +err_sysfs: + backlight_device_unregister(bldev); +err_reg_bl: +err_detect: +err_gpio: + omap_dss_put_device(ddata->in); + return r; +} + +static int acx565akm_remove(struct spi_device *spi) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + dev_dbg(&ddata->spi->dev, "%s\n", __func__); + + sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group); + backlight_device_unregister(ddata->bl_dev); + + omapdss_unregister_display(dssdev); + + acx565akm_disable(dssdev); + acx565akm_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id acx565akm_of_match[] = { + { .compatible = "omapdss,sony,acx565akm", }, + {}, +}; +MODULE_DEVICE_TABLE(of, acx565akm_of_match); + +static struct spi_driver acx565akm_driver = { + .driver = { + .name = "acx565akm", + .of_match_table = acx565akm_of_match, + .suppress_bind_attrs = true, + }, + .probe = acx565akm_probe, + .remove = acx565akm_remove, +}; + +module_spi_driver(acx565akm_driver); + +MODULE_AUTHOR("Nokia Corporation"); +MODULE_DESCRIPTION("acx565akm LCD Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c new file mode 100644 index 000000000000..4d657f3ab679 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -0,0 +1,511 @@ +/* + * Toppoly TD028TTEC1 panel support + * + * Copyright (C) 2008 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Neo 1973 code (jbt6k74.c): + * Copyright (C) 2006-2007 by OpenMoko, Inc. + * Author: Harald Welte <laforge@openmoko.org> + * + * Ported and adapted from Neo 1973 U-Boot by: + * H. Nikolaus Schaller <hns@goldelico.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <linux/gpio.h> +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + int data_lines; + + struct omap_video_timings videomode; + + struct spi_device *spi_dev; +}; + +static struct omap_video_timings td028ttec1_panel_timings = { + .x_res = 480, + .y_res = 640, + .pixelclock = 22153000, + .hfp = 24, + .hsw = 8, + .hbp = 8, + .vfp = 4, + .vsw = 2, + .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, +}; + +#define JBT_COMMAND 0x000 +#define JBT_DATA 0x100 + +static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg) +{ + int rc; + u16 tx_buf = JBT_COMMAND | reg; + + rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf, + 1*sizeof(u16)); + if (rc != 0) + dev_err(&ddata->spi_dev->dev, + "jbt_ret_write_0 spi_write ret %d\n", rc); + + return rc; +} + +static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data) +{ + int rc; + u16 tx_buf[2]; + + tx_buf[0] = JBT_COMMAND | reg; + tx_buf[1] = JBT_DATA | data; + rc = spi_write(ddata->spi_dev, (u8 *)tx_buf, + 2*sizeof(u16)); + if (rc != 0) + dev_err(&ddata->spi_dev->dev, + "jbt_reg_write_1 spi_write ret %d\n", rc); + + return rc; +} + +static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data) +{ + int rc; + u16 tx_buf[3]; + + tx_buf[0] = JBT_COMMAND | reg; + tx_buf[1] = JBT_DATA | (data >> 8); + tx_buf[2] = JBT_DATA | (data & 0xff); + + rc = spi_write(ddata->spi_dev, (u8 *)tx_buf, + 3*sizeof(u16)); + + if (rc != 0) + dev_err(&ddata->spi_dev->dev, + "jbt_reg_write_2 spi_write ret %d\n", rc); + + return rc; +} + +enum jbt_register { + JBT_REG_SLEEP_IN = 0x10, + JBT_REG_SLEEP_OUT = 0x11, + + JBT_REG_DISPLAY_OFF = 0x28, + JBT_REG_DISPLAY_ON = 0x29, + + JBT_REG_RGB_FORMAT = 0x3a, + JBT_REG_QUAD_RATE = 0x3b, + + JBT_REG_POWER_ON_OFF = 0xb0, + JBT_REG_BOOSTER_OP = 0xb1, + JBT_REG_BOOSTER_MODE = 0xb2, + JBT_REG_BOOSTER_FREQ = 0xb3, + JBT_REG_OPAMP_SYSCLK = 0xb4, + JBT_REG_VSC_VOLTAGE = 0xb5, + JBT_REG_VCOM_VOLTAGE = 0xb6, + JBT_REG_EXT_DISPL = 0xb7, + JBT_REG_OUTPUT_CONTROL = 0xb8, + JBT_REG_DCCLK_DCEV = 0xb9, + JBT_REG_DISPLAY_MODE1 = 0xba, + JBT_REG_DISPLAY_MODE2 = 0xbb, + JBT_REG_DISPLAY_MODE = 0xbc, + JBT_REG_ASW_SLEW = 0xbd, + JBT_REG_DUMMY_DISPLAY = 0xbe, + JBT_REG_DRIVE_SYSTEM = 0xbf, + + JBT_REG_SLEEP_OUT_FR_A = 0xc0, + JBT_REG_SLEEP_OUT_FR_B = 0xc1, + JBT_REG_SLEEP_OUT_FR_C = 0xc2, + JBT_REG_SLEEP_IN_LCCNT_D = 0xc3, + JBT_REG_SLEEP_IN_LCCNT_E = 0xc4, + JBT_REG_SLEEP_IN_LCCNT_F = 0xc5, + JBT_REG_SLEEP_IN_LCCNT_G = 0xc6, + + JBT_REG_GAMMA1_FINE_1 = 0xc7, + JBT_REG_GAMMA1_FINE_2 = 0xc8, + JBT_REG_GAMMA1_INCLINATION = 0xc9, + JBT_REG_GAMMA1_BLUE_OFFSET = 0xca, + + JBT_REG_BLANK_CONTROL = 0xcf, + JBT_REG_BLANK_TH_TV = 0xd0, + JBT_REG_CKV_ON_OFF = 0xd1, + JBT_REG_CKV_1_2 = 0xd2, + JBT_REG_OEV_TIMING = 0xd3, + JBT_REG_ASW_TIMING_1 = 0xd4, + JBT_REG_ASW_TIMING_2 = 0xd5, + + JBT_REG_HCLOCK_VGA = 0xec, + JBT_REG_HCLOCK_QVGA = 0xed, +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dpi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dpi->disconnect(in, dssdev); +} + +static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + if (ddata->data_lines) + in->ops.dpi->set_data_lines(in, ddata->data_lines); + in->ops.dpi->set_timings(in, &ddata->videomode); + + r = in->ops.dpi->enable(in); + if (r) + return r; + + dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n", + dssdev->state); + + /* three times command zero */ + r |= jbt_ret_write_0(ddata, 0x00); + usleep_range(1000, 2000); + r |= jbt_ret_write_0(ddata, 0x00); + usleep_range(1000, 2000); + r |= jbt_ret_write_0(ddata, 0x00); + usleep_range(1000, 2000); + + if (r) { + dev_warn(dssdev->dev, "transfer error\n"); + goto transfer_err; + } + + /* deep standby out */ + r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17); + + /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */ + r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80); + + /* Quad mode off */ + r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00); + + /* AVDD on, XVDD on */ + r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16); + + /* Output control */ + r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9); + + /* Sleep mode off */ + r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT); + + /* at this point we have like 50% grey */ + + /* initialize register set */ + r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01); + r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00); + r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60); + r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10); + r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56); + r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33); + r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11); + r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11); + r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02); + r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b); + r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40); + r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03); + r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04); + /* + * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement + * to avoid red / blue flicker + */ + r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04); + r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00); + + r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11); + r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11); + r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11); + r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040); + r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0); + r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020); + r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0); + + r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533); + r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00); + r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00); + r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00); + + r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0); + r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02); + r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804); + + r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01); + r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000); + + r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e); + r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4); + r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e); + + r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + +transfer_err: + + return r ? -EIO : 0; +} + +static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n"); + + jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF); + jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002); + jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN); + jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00); + + in->ops.dpi->disable(in); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->videomode = *timings; + dssdev->panel.timings = *timings; + + in->ops.dpi->set_timings(in, timings); +} + +static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->videomode; +} + +static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.dpi->check_timings(in, timings); +} + +static struct omap_dss_driver td028ttec1_ops = { + .connect = td028ttec1_panel_connect, + .disconnect = td028ttec1_panel_disconnect, + + .enable = td028ttec1_panel_enable, + .disable = td028ttec1_panel_disable, + + .set_timings = td028ttec1_panel_set_timings, + .get_timings = td028ttec1_panel_get_timings, + .check_timings = td028ttec1_panel_check_timings, +}; + +static int td028ttec1_panel_probe_pdata(struct spi_device *spi) +{ + const struct panel_tpo_td028ttec1_platform_data *pdata; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev, *in; + + pdata = dev_get_platdata(&spi->dev); + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&spi->dev, "failed to find video source '%s'\n", + pdata->source); + return -EPROBE_DEFER; + } + + ddata->in = in; + + ddata->data_lines = pdata->data_lines; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int td028ttec1_probe_of(struct spi_device *spi) +{ + struct device_node *node = spi->dev.of_node; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *in; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&spi->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int td028ttec1_panel_probe(struct spi_device *spi) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + dev_dbg(&spi->dev, "%s\n", __func__); + + spi->bits_per_word = 9; + spi->mode = SPI_MODE_3; + + r = spi_setup(spi); + if (r < 0) { + dev_err(&spi->dev, "spi_setup failed: %d\n", r); + return r; + } + + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); + if (ddata == NULL) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, ddata); + + ddata->spi_dev = spi; + + if (dev_get_platdata(&spi->dev)) { + r = td028ttec1_panel_probe_pdata(spi); + if (r) + return r; + } else if (spi->dev.of_node) { + r = td028ttec1_probe_of(spi); + if (r) + return r; + } else { + return -ENODEV; + } + + ddata->videomode = td028ttec1_panel_timings; + + dssdev = &ddata->dssdev; + dssdev->dev = &spi->dev; + dssdev->driver = &td028ttec1_ops; + dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = ddata->videomode; + dssdev->phy.dpi.data_lines = ddata->data_lines; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&spi->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: + omap_dss_put_device(ddata->in); + return r; +} + +static int td028ttec1_panel_remove(struct spi_device *spi) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__); + + omapdss_unregister_display(dssdev); + + td028ttec1_panel_disable(dssdev); + td028ttec1_panel_disconnect(dssdev); + + omap_dss_put_device(in); + + return 0; +} + +static const struct of_device_id td028ttec1_of_match[] = { + { .compatible = "omapdss,toppoly,td028ttec1", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, td028ttec1_of_match); + +static struct spi_driver td028ttec1_spi_driver = { + .probe = td028ttec1_panel_probe, + .remove = td028ttec1_panel_remove, + + .driver = { + .name = "panel-tpo-td028ttec1", + .of_match_table = td028ttec1_of_match, + .suppress_bind_attrs = true, + }, +}; + +module_spi_driver(td028ttec1_spi_driver); + +MODULE_ALIAS("spi:toppoly,td028ttec1"); +MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>"); +MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c new file mode 100644 index 000000000000..68e3b68a2920 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -0,0 +1,686 @@ +/* + * TPO TD043MTEA1 Panel driver + * + * Author: Gražvydas Ignotas <notasas@gmail.com> + * Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/spi/spi.h> +#include <linux/regulator/consumer.h> +#include <linux/gpio.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/of_gpio.h> + +#include <video/omapdss.h> +#include <video/omap-panel-data.h> + +#define TPO_R02_MODE(x) ((x) & 7) +#define TPO_R02_MODE_800x480 7 +#define TPO_R02_NCLK_RISING BIT(3) +#define TPO_R02_HSYNC_HIGH BIT(4) +#define TPO_R02_VSYNC_HIGH BIT(5) + +#define TPO_R03_NSTANDBY BIT(0) +#define TPO_R03_EN_CP_CLK BIT(1) +#define TPO_R03_EN_VGL_PUMP BIT(2) +#define TPO_R03_EN_PWM BIT(3) +#define TPO_R03_DRIVING_CAP_100 BIT(4) +#define TPO_R03_EN_PRE_CHARGE BIT(6) +#define TPO_R03_SOFTWARE_CTL BIT(7) + +#define TPO_R04_NFLIP_H BIT(0) +#define TPO_R04_NFLIP_V BIT(1) +#define TPO_R04_CP_CLK_FREQ_1H BIT(2) +#define TPO_R04_VGL_FREQ_1H BIT(4) + +#define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \ + TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \ + TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \ + TPO_R03_SOFTWARE_CTL) + +#define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \ + TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL) + +static const u16 tpo_td043_def_gamma[12] = { + 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023 +}; + +struct panel_drv_data { + struct omap_dss_device dssdev; + struct omap_dss_device *in; + + struct omap_video_timings videomode; + + int data_lines; + + struct spi_device *spi; + struct regulator *vcc_reg; + int nreset_gpio; + u16 gamma[12]; + u32 mode; + u32 hmirror:1; + u32 vmirror:1; + u32 powered_on:1; + u32 spi_suspended:1; + u32 power_on_resume:1; +}; + +static const struct omap_video_timings tpo_td043_timings = { + .x_res = 800, + .y_res = 480, + + .pixelclock = 36000000, + + .hsw = 1, + .hfp = 68, + .hbp = 214, + + .vsw = 1, + .vfp = 39, + .vbp = 34, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, +}; + +#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) + +static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) +{ + struct spi_message m; + struct spi_transfer xfer; + u16 w; + int r; + + spi_message_init(&m); + + memset(&xfer, 0, sizeof(xfer)); + + w = ((u16)addr << 10) | (1 << 8) | data; + xfer.tx_buf = &w; + xfer.bits_per_word = 16; + xfer.len = 2; + spi_message_add_tail(&xfer, &m); + + r = spi_sync(spi, &m); + if (r < 0) + dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r); + return r; +} + +static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12]) +{ + u8 i, val; + + /* gamma bits [9:8] */ + for (val = i = 0; i < 4; i++) + val |= (gamma[i] & 0x300) >> ((i + 1) * 2); + tpo_td043_write(spi, 0x11, val); + + for (val = i = 0; i < 4; i++) + val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2); + tpo_td043_write(spi, 0x12, val); + + for (val = i = 0; i < 4; i++) + val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2); + tpo_td043_write(spi, 0x13, val); + + /* gamma bits [7:0] */ + for (val = i = 0; i < 12; i++) + tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff); +} + +static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v) +{ + u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V | + TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H; + if (h) + reg4 &= ~TPO_R04_NFLIP_H; + if (v) + reg4 &= ~TPO_R04_NFLIP_V; + + return tpo_td043_write(spi, 4, reg4); +} + +static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev); + + ddata->hmirror = enable; + return tpo_td043_write_mirror(ddata->spi, ddata->hmirror, + ddata->vmirror); +} + +static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev); + + return ddata->hmirror; +} + +static ssize_t tpo_td043_vmirror_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror); +} + +static ssize_t tpo_td043_vmirror_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + int val; + int ret; + + ret = kstrtoint(buf, 0, &val); + if (ret < 0) + return ret; + + val = !!val; + + ret = tpo_td043_write_mirror(ddata->spi, ddata->hmirror, val); + if (ret < 0) + return ret; + + ddata->vmirror = val; + + return count; +} + +static ssize_t tpo_td043_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode); +} + +static ssize_t tpo_td043_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + long val; + int ret; + + ret = kstrtol(buf, 0, &val); + if (ret != 0 || val & ~7) + return -EINVAL; + + ddata->mode = val; + + val |= TPO_R02_NCLK_RISING; + tpo_td043_write(ddata->spi, 2, val); + + return count; +} + +static ssize_t tpo_td043_gamma_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + ssize_t len = 0; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) { + ret = snprintf(buf + len, PAGE_SIZE - len, "%u ", + ddata->gamma[i]); + if (ret < 0) + return ret; + len += ret; + } + buf[len - 1] = '\n'; + + return len; +} + +static ssize_t tpo_td043_gamma_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + unsigned int g[12]; + int ret; + int i; + + ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u", + &g[0], &g[1], &g[2], &g[3], &g[4], &g[5], + &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]); + + if (ret != 12) + return -EINVAL; + + for (i = 0; i < 12; i++) + ddata->gamma[i] = g[i]; + + tpo_td043_write_gamma(ddata->spi, ddata->gamma); + + return count; +} + +static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR, + tpo_td043_vmirror_show, tpo_td043_vmirror_store); +static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, + tpo_td043_mode_show, tpo_td043_mode_store); +static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR, + tpo_td043_gamma_show, tpo_td043_gamma_store); + +static struct attribute *tpo_td043_attrs[] = { + &dev_attr_vmirror.attr, + &dev_attr_mode.attr, + &dev_attr_gamma.attr, + NULL, +}; + +static struct attribute_group tpo_td043_attr_group = { + .attrs = tpo_td043_attrs, +}; + +static int tpo_td043_power_on(struct panel_drv_data *ddata) +{ + int r; + + if (ddata->powered_on) + return 0; + + r = regulator_enable(ddata->vcc_reg); + if (r != 0) + return r; + + /* wait for panel to stabilize */ + msleep(160); + + if (gpio_is_valid(ddata->nreset_gpio)) + gpio_set_value(ddata->nreset_gpio, 1); + + tpo_td043_write(ddata->spi, 2, + TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING); + tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL); + tpo_td043_write(ddata->spi, 0x20, 0xf0); + tpo_td043_write(ddata->spi, 0x21, 0xf0); + tpo_td043_write_mirror(ddata->spi, ddata->hmirror, + ddata->vmirror); + tpo_td043_write_gamma(ddata->spi, ddata->gamma); + + ddata->powered_on = 1; + return 0; +} + +static void tpo_td043_power_off(struct panel_drv_data *ddata) +{ + if (!ddata->powered_on) + return; + + tpo_td043_write(ddata->spi, 3, + TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM); + + if (gpio_is_valid(ddata->nreset_gpio)) + gpio_set_value(ddata->nreset_gpio, 0); + + /* wait for at least 2 vsyncs before cutting off power */ + msleep(50); + + tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY); + + regulator_disable(ddata->vcc_reg); + + ddata->powered_on = 0; +} + +static int tpo_td043_connect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (omapdss_device_is_connected(dssdev)) + return 0; + + r = in->ops.dpi->connect(in, dssdev); + if (r) + return r; + + return 0; +} + +static void tpo_td043_disconnect(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_connected(dssdev)) + return; + + in->ops.dpi->disconnect(in, dssdev); +} + +static int tpo_td043_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + int r; + + if (!omapdss_device_is_connected(dssdev)) + return -ENODEV; + + if (omapdss_device_is_enabled(dssdev)) + return 0; + + if (ddata->data_lines) + in->ops.dpi->set_data_lines(in, ddata->data_lines); + in->ops.dpi->set_timings(in, &ddata->videomode); + + r = in->ops.dpi->enable(in); + if (r) + return r; + + /* + * If we are resuming from system suspend, SPI clocks might not be + * enabled yet, so we'll program the LCD from SPI PM resume callback. + */ + if (!ddata->spi_suspended) { + r = tpo_td043_power_on(ddata); + if (r) { + in->ops.dpi->disable(in); + return r; + } + } + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + + return 0; +} + +static void tpo_td043_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + if (!omapdss_device_is_enabled(dssdev)) + return; + + in->ops.dpi->disable(in); + + if (!ddata->spi_suspended) + tpo_td043_power_off(ddata); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} + +static void tpo_td043_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + ddata->videomode = *timings; + dssdev->panel.timings = *timings; + + in->ops.dpi->set_timings(in, timings); +} + +static void tpo_td043_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + + *timings = ddata->videomode; +} + +static int tpo_td043_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.dpi->check_timings(in, timings); +} + +static struct omap_dss_driver tpo_td043_ops = { + .connect = tpo_td043_connect, + .disconnect = tpo_td043_disconnect, + + .enable = tpo_td043_enable, + .disable = tpo_td043_disable, + + .set_timings = tpo_td043_set_timings, + .get_timings = tpo_td043_get_timings, + .check_timings = tpo_td043_check_timings, + + .set_mirror = tpo_td043_set_hmirror, + .get_mirror = tpo_td043_get_hmirror, + + .get_resolution = omapdss_default_get_resolution, +}; + + +static int tpo_td043_probe_pdata(struct spi_device *spi) +{ + const struct panel_tpo_td043mtea1_platform_data *pdata; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev, *in; + + pdata = dev_get_platdata(&spi->dev); + + ddata->nreset_gpio = pdata->nreset_gpio; + + in = omap_dss_find_output(pdata->source); + if (in == NULL) { + dev_err(&spi->dev, "failed to find video source '%s'\n", + pdata->source); + return -EPROBE_DEFER; + } + ddata->in = in; + + ddata->data_lines = pdata->data_lines; + + dssdev = &ddata->dssdev; + dssdev->name = pdata->name; + + return 0; +} + +static int tpo_td043_probe_of(struct spi_device *spi) +{ + struct device_node *node = spi->dev.of_node; + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *in; + int gpio; + + gpio = of_get_named_gpio(node, "reset-gpios", 0); + if (!gpio_is_valid(gpio)) { + dev_err(&spi->dev, "failed to parse enable gpio\n"); + return gpio; + } + ddata->nreset_gpio = gpio; + + in = omapdss_of_find_source_for_first_ep(node); + if (IS_ERR(in)) { + dev_err(&spi->dev, "failed to find video source\n"); + return PTR_ERR(in); + } + + ddata->in = in; + + return 0; +} + +static int tpo_td043_probe(struct spi_device *spi) +{ + struct panel_drv_data *ddata; + struct omap_dss_device *dssdev; + int r; + + dev_dbg(&spi->dev, "%s\n", __func__); + + spi->bits_per_word = 16; + spi->mode = SPI_MODE_0; + + r = spi_setup(spi); + if (r < 0) { + dev_err(&spi->dev, "spi_setup failed: %d\n", r); + return r; + } + + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); + if (ddata == NULL) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, ddata); + + ddata->spi = spi; + + if (dev_get_platdata(&spi->dev)) { + r = tpo_td043_probe_pdata(spi); + if (r) + return r; + } else if (spi->dev.of_node) { + r = tpo_td043_probe_of(spi); + if (r) + return r; + } else { + return -ENODEV; + } + + ddata->mode = TPO_R02_MODE_800x480; + memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); + + ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); + if (IS_ERR(ddata->vcc_reg)) { + dev_err(&spi->dev, "failed to get LCD VCC regulator\n"); + r = PTR_ERR(ddata->vcc_reg); + goto err_regulator; + } + + if (gpio_is_valid(ddata->nreset_gpio)) { + r = devm_gpio_request_one(&spi->dev, + ddata->nreset_gpio, GPIOF_OUT_INIT_LOW, + "lcd reset"); + if (r < 0) { + dev_err(&spi->dev, "couldn't request reset GPIO\n"); + goto err_gpio_req; + } + } + + r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group); + if (r) { + dev_err(&spi->dev, "failed to create sysfs files\n"); + goto err_sysfs; + } + + ddata->videomode = tpo_td043_timings; + + dssdev = &ddata->dssdev; + dssdev->dev = &spi->dev; + dssdev->driver = &tpo_td043_ops; + dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->owner = THIS_MODULE; + dssdev->panel.timings = ddata->videomode; + + r = omapdss_register_display(dssdev); + if (r) { + dev_err(&spi->dev, "Failed to register panel\n"); + goto err_reg; + } + + return 0; + +err_reg: + sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); +err_sysfs: +err_gpio_req: +err_regulator: + omap_dss_put_device(ddata->in); + return r; +} + +static int tpo_td043_remove(struct spi_device *spi) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); + struct omap_dss_device *dssdev = &ddata->dssdev; + struct omap_dss_device *in = ddata->in; + + dev_dbg(&ddata->spi->dev, "%s\n", __func__); + + omapdss_unregister_display(dssdev); + + tpo_td043_disable(dssdev); + tpo_td043_disconnect(dssdev); + + omap_dss_put_device(in); + + sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tpo_td043_spi_suspend(struct device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + + dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", ddata); + + ddata->power_on_resume = ddata->powered_on; + tpo_td043_power_off(ddata); + ddata->spi_suspended = 1; + + return 0; +} + +static int tpo_td043_spi_resume(struct device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + int ret; + + dev_dbg(dev, "tpo_td043_spi_resume\n"); + + if (ddata->power_on_resume) { + ret = tpo_td043_power_on(ddata); + if (ret) + return ret; + } + ddata->spi_suspended = 0; + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm, + tpo_td043_spi_suspend, tpo_td043_spi_resume); + +static const struct of_device_id tpo_td043_of_match[] = { + { .compatible = "omapdss,tpo,td043mtea1", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, tpo_td043_of_match); + +static struct spi_driver tpo_td043_spi_driver = { + .driver = { + .name = "panel-tpo-td043mtea1", + .pm = &tpo_td043_spi_pm, + .of_match_table = tpo_td043_of_match, + .suppress_bind_attrs = true, + }, + .probe = tpo_td043_probe, + .remove = tpo_td043_remove, +}; + +module_spi_driver(tpo_td043_spi_driver); + +MODULE_ALIAS("spi:tpo,td043mtea1"); +MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>"); +MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig new file mode 100644 index 000000000000..d1fa730c7d54 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/Kconfig @@ -0,0 +1,135 @@ +config OMAP2_DSS_INIT + bool + +menuconfig OMAP2_DSS + tristate "OMAP2+ Display Subsystem support" + select VIDEOMODE_HELPERS + select OMAP2_DSS_INIT + select HDMI + help + OMAP2+ Display Subsystem support. + +if OMAP2_DSS + +config OMAP2_DSS_DEBUG + bool "Debug support" + default n + help + This enables printing of debug messages. Alternatively, debug messages + can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting + appropriate flags in <debugfs>/dynamic_debug/control. + +config OMAP2_DSS_DEBUGFS + bool "Debugfs filesystem support" + depends on DEBUG_FS + default n + help + This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables + querying about clock configuration and register configuration of dss, + dispc, dsi, hdmi and rfbi. + +config OMAP2_DSS_COLLECT_IRQ_STATS + bool "Collect DSS IRQ statistics" + depends on OMAP2_DSS_DEBUGFS + default n + help + Collect DSS IRQ statistics, printable via debugfs. + + The statistics can be found from + <debugfs>/omapdss/dispc_irq for DISPC interrupts, and + <debugfs>/omapdss/dsi_irq for DSI interrupts. + +config OMAP2_DSS_DPI + bool "DPI support" + default y + help + DPI Interface. This is the Parallel Display Interface. + +config OMAP2_DSS_RFBI + bool "RFBI support" + depends on BROKEN + default n + help + MIPI DBI support (RFBI, Remote Framebuffer Interface, in Texas + Instrument's terminology). + + DBI is a bus between the host processor and a peripheral, + such as a display or a framebuffer chip. + + See http://www.mipi.org/ for DBI specifications. + +config OMAP2_DSS_VENC + bool "VENC support" + default y + help + OMAP Video Encoder support for S-Video and composite TV-out. + +config OMAP2_DSS_HDMI_COMMON + bool + +config OMAP4_DSS_HDMI + bool "HDMI support for OMAP4" + default y + select OMAP2_DSS_HDMI_COMMON + help + HDMI support for OMAP4 based SoCs. + +config OMAP5_DSS_HDMI + bool "HDMI support for OMAP5" + default n + select OMAP2_DSS_HDMI_COMMON + help + HDMI Interface for OMAP5 and similar cores. This adds the High + Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI + specification. + +config OMAP2_DSS_SDI + bool "SDI support" + default n + help + SDI (Serial Display Interface) support. + + SDI is a high speed one-way display serial bus between the host + processor and a display. + +config OMAP2_DSS_DSI + bool "DSI support" + default n + help + MIPI DSI (Display Serial Interface) support. + + DSI is a high speed half-duplex serial interface between the host + processor and a peripheral, such as a display or a framebuffer chip. + + See http://www.mipi.org/ for DSI specifications. + +config OMAP2_DSS_MIN_FCK_PER_PCK + int "Minimum FCK/PCK ratio (for scaling)" + range 0 32 + default 0 + help + This can be used to adjust the minimum FCK/PCK ratio. + + With this you can make sure that DISPC FCK is at least + n x PCK. Video plane scaling requires higher FCK than + normally. + + If this is set to 0, there's no extra constraint on the + DISPC FCK. However, the FCK will at minimum be + 2xPCK (if active matrix) or 3xPCK (if passive matrix). + + Max FCK is 173MHz, so this doesn't work if your PCK + is very high. + +config OMAP2_DSS_SLEEP_AFTER_VENC_RESET + bool "Sleep 20ms after VENC reset" + default y + help + There is a 20ms sleep after VENC reset which seemed to fix the + reset. The reason for the bug is unclear, and it's also unclear + on what platforms this happens. + + This option enables the sleep, and is enabled by default. You can + disable the sleep if it doesn't cause problems on your platform. + +endif diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile new file mode 100644 index 000000000000..b5136d3d4b77 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/Makefile @@ -0,0 +1,18 @@ +obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o +obj-$(CONFIG_OMAP2_DSS) += omapdss.o +# Core DSS files +omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \ + output.o dss-of.o pll.o video-pll.o +# DSS compat layer files +omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \ + dispc-compat.o display-sysfs.o +omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o +omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o +omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o +omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o +omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o +omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \ + hdmi_phy.o +omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o +omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o +ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG diff --git a/drivers/gpu/drm/omapdrm/dss/apply.c b/drivers/gpu/drm/omapdrm/dss/apply.c new file mode 100644 index 000000000000..663ccc3bf4e5 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/apply.c @@ -0,0 +1,1702 @@ +/* + * Copyright (C) 2011 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "APPLY" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/jiffies.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" +#include "dispc-compat.h" + +/* + * We have 4 levels of cache for the dispc settings. First two are in SW and + * the latter two in HW. + * + * set_info() + * v + * +--------------------+ + * | user_info | + * +--------------------+ + * v + * apply() + * v + * +--------------------+ + * | info | + * +--------------------+ + * v + * write_regs() + * v + * +--------------------+ + * | shadow registers | + * +--------------------+ + * v + * VFP or lcd/digit_enable + * v + * +--------------------+ + * | registers | + * +--------------------+ + */ + +struct ovl_priv_data { + + bool user_info_dirty; + struct omap_overlay_info user_info; + + bool info_dirty; + struct omap_overlay_info info; + + bool shadow_info_dirty; + + bool extra_info_dirty; + bool shadow_extra_info_dirty; + + bool enabled; + u32 fifo_low, fifo_high; + + /* + * True if overlay is to be enabled. Used to check and calculate configs + * for the overlay before it is enabled in the HW. + */ + bool enabling; +}; + +struct mgr_priv_data { + + bool user_info_dirty; + struct omap_overlay_manager_info user_info; + + bool info_dirty; + struct omap_overlay_manager_info info; + + bool shadow_info_dirty; + + /* If true, GO bit is up and shadow registers cannot be written. + * Never true for manual update displays */ + bool busy; + + /* If true, dispc output is enabled */ + bool updating; + + /* If true, a display is enabled using this manager */ + bool enabled; + + bool extra_info_dirty; + bool shadow_extra_info_dirty; + + struct omap_video_timings timings; + struct dss_lcd_mgr_config lcd_config; + + void (*framedone_handler)(void *); + void *framedone_handler_data; +}; + +static struct { + struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS]; + struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS]; + + bool irq_enabled; +} dss_data; + +/* protects dss_data */ +static spinlock_t data_lock; +/* lock for blocking functions */ +static DEFINE_MUTEX(apply_lock); +static DECLARE_COMPLETION(extra_updated_completion); + +static void dss_register_vsync_isr(void); + +static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl) +{ + return &dss_data.ovl_priv_data_array[ovl->id]; +} + +static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr) +{ + return &dss_data.mgr_priv_data_array[mgr->id]; +} + +static void apply_init_priv(void) +{ + const int num_ovls = dss_feat_get_num_ovls(); + struct mgr_priv_data *mp; + int i; + + spin_lock_init(&data_lock); + + for (i = 0; i < num_ovls; ++i) { + struct ovl_priv_data *op; + + op = &dss_data.ovl_priv_data_array[i]; + + op->info.color_mode = OMAP_DSS_COLOR_RGB16; + op->info.rotation_type = OMAP_DSS_ROT_DMA; + + op->info.global_alpha = 255; + + switch (i) { + case 0: + op->info.zorder = 0; + break; + case 1: + op->info.zorder = + dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0; + break; + case 2: + op->info.zorder = + dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0; + break; + case 3: + op->info.zorder = + dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0; + break; + } + + op->user_info = op->info; + } + + /* + * Initialize some of the lcd_config fields for TV manager, this lets + * us prevent checking if the manager is LCD or TV at some places + */ + mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT]; + + mp->lcd_config.video_port_width = 24; + mp->lcd_config.clock_info.lck_div = 1; + mp->lcd_config.clock_info.pck_div = 1; +} + +/* + * A LCD manager's stallmode decides whether it is in manual or auto update. TV + * manager is always auto update, stallmode field for TV manager is false by + * default + */ +static bool ovl_manual_update(struct omap_overlay *ovl) +{ + struct mgr_priv_data *mp = get_mgr_priv(ovl->manager); + + return mp->lcd_config.stallmode; +} + +static bool mgr_manual_update(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + return mp->lcd_config.stallmode; +} + +static int dss_check_settings_low(struct omap_overlay_manager *mgr, + bool applying) +{ + struct omap_overlay_info *oi; + struct omap_overlay_manager_info *mi; + struct omap_overlay *ovl; + struct omap_overlay_info *ois[MAX_DSS_OVERLAYS]; + struct ovl_priv_data *op; + struct mgr_priv_data *mp; + + mp = get_mgr_priv(mgr); + + if (!mp->enabled) + return 0; + + if (applying && mp->user_info_dirty) + mi = &mp->user_info; + else + mi = &mp->info; + + /* collect the infos to be tested into the array */ + list_for_each_entry(ovl, &mgr->overlays, list) { + op = get_ovl_priv(ovl); + + if (!op->enabled && !op->enabling) + oi = NULL; + else if (applying && op->user_info_dirty) + oi = &op->user_info; + else + oi = &op->info; + + ois[ovl->id] = oi; + } + + return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois); +} + +/* + * check manager and overlay settings using overlay_info from data->info + */ +static int dss_check_settings(struct omap_overlay_manager *mgr) +{ + return dss_check_settings_low(mgr, false); +} + +/* + * check manager and overlay settings using overlay_info from ovl->info if + * dirty and from data->info otherwise + */ +static int dss_check_settings_apply(struct omap_overlay_manager *mgr) +{ + return dss_check_settings_low(mgr, true); +} + +static bool need_isr(void) +{ + const int num_mgrs = dss_feat_get_num_mgrs(); + int i; + + for (i = 0; i < num_mgrs; ++i) { + struct omap_overlay_manager *mgr; + struct mgr_priv_data *mp; + struct omap_overlay *ovl; + + mgr = omap_dss_get_overlay_manager(i); + mp = get_mgr_priv(mgr); + + if (!mp->enabled) + continue; + + if (mgr_manual_update(mgr)) { + /* to catch FRAMEDONE */ + if (mp->updating) + return true; + } else { + /* to catch GO bit going down */ + if (mp->busy) + return true; + + /* to write new values to registers */ + if (mp->info_dirty) + return true; + + /* to set GO bit */ + if (mp->shadow_info_dirty) + return true; + + /* + * NOTE: we don't check extra_info flags for disabled + * managers, once the manager is enabled, the extra_info + * related manager changes will be taken in by HW. + */ + + /* to write new values to registers */ + if (mp->extra_info_dirty) + return true; + + /* to set GO bit */ + if (mp->shadow_extra_info_dirty) + return true; + + list_for_each_entry(ovl, &mgr->overlays, list) { + struct ovl_priv_data *op; + + op = get_ovl_priv(ovl); + + /* + * NOTE: we check extra_info flags even for + * disabled overlays, as extra_infos need to be + * always written. + */ + + /* to write new values to registers */ + if (op->extra_info_dirty) + return true; + + /* to set GO bit */ + if (op->shadow_extra_info_dirty) + return true; + + if (!op->enabled) + continue; + + /* to write new values to registers */ + if (op->info_dirty) + return true; + + /* to set GO bit */ + if (op->shadow_info_dirty) + return true; + } + } + } + + return false; +} + +static bool need_go(struct omap_overlay_manager *mgr) +{ + struct omap_overlay *ovl; + struct mgr_priv_data *mp; + struct ovl_priv_data *op; + + mp = get_mgr_priv(mgr); + + if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty) + return true; + + list_for_each_entry(ovl, &mgr->overlays, list) { + op = get_ovl_priv(ovl); + if (op->shadow_info_dirty || op->shadow_extra_info_dirty) + return true; + } + + return false; +} + +/* returns true if an extra_info field is currently being updated */ +static bool extra_info_update_ongoing(void) +{ + const int num_mgrs = dss_feat_get_num_mgrs(); + int i; + + for (i = 0; i < num_mgrs; ++i) { + struct omap_overlay_manager *mgr; + struct omap_overlay *ovl; + struct mgr_priv_data *mp; + + mgr = omap_dss_get_overlay_manager(i); + mp = get_mgr_priv(mgr); + + if (!mp->enabled) + continue; + + if (!mp->updating) + continue; + + if (mp->extra_info_dirty || mp->shadow_extra_info_dirty) + return true; + + list_for_each_entry(ovl, &mgr->overlays, list) { + struct ovl_priv_data *op = get_ovl_priv(ovl); + + if (op->extra_info_dirty || op->shadow_extra_info_dirty) + return true; + } + } + + return false; +} + +/* wait until no extra_info updates are pending */ +static void wait_pending_extra_info_updates(void) +{ + bool updating; + unsigned long flags; + unsigned long t; + int r; + + spin_lock_irqsave(&data_lock, flags); + + updating = extra_info_update_ongoing(); + + if (!updating) { + spin_unlock_irqrestore(&data_lock, flags); + return; + } + + init_completion(&extra_updated_completion); + + spin_unlock_irqrestore(&data_lock, flags); + + t = msecs_to_jiffies(500); + r = wait_for_completion_timeout(&extra_updated_completion, t); + if (r == 0) + DSSWARN("timeout in wait_pending_extra_info_updates\n"); +} + +static struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr) +{ + struct omap_dss_device *dssdev; + + dssdev = mgr->output; + if (dssdev == NULL) + return NULL; + + while (dssdev->dst) + dssdev = dssdev->dst; + + if (dssdev->driver) + return dssdev; + else + return NULL; +} + +static struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl) +{ + return ovl->manager ? dss_mgr_get_device(ovl->manager) : NULL; +} + +static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr) +{ + unsigned long timeout = msecs_to_jiffies(500); + u32 irq; + int r; + + if (mgr->output == NULL) + return -ENODEV; + + r = dispc_runtime_get(); + if (r) + return r; + + switch (mgr->output->id) { + case OMAP_DSS_OUTPUT_VENC: + irq = DISPC_IRQ_EVSYNC_ODD; + break; + case OMAP_DSS_OUTPUT_HDMI: + irq = DISPC_IRQ_EVSYNC_EVEN; + break; + default: + irq = dispc_mgr_get_vsync_irq(mgr->id); + break; + } + + r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); + + dispc_runtime_put(); + + return r; +} + +static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) +{ + unsigned long timeout = msecs_to_jiffies(500); + struct mgr_priv_data *mp = get_mgr_priv(mgr); + u32 irq; + unsigned long flags; + int r; + int i; + + spin_lock_irqsave(&data_lock, flags); + + if (mgr_manual_update(mgr)) { + spin_unlock_irqrestore(&data_lock, flags); + return 0; + } + + if (!mp->enabled) { + spin_unlock_irqrestore(&data_lock, flags); + return 0; + } + + spin_unlock_irqrestore(&data_lock, flags); + + r = dispc_runtime_get(); + if (r) + return r; + + irq = dispc_mgr_get_vsync_irq(mgr->id); + + i = 0; + while (1) { + bool shadow_dirty, dirty; + + spin_lock_irqsave(&data_lock, flags); + dirty = mp->info_dirty; + shadow_dirty = mp->shadow_info_dirty; + spin_unlock_irqrestore(&data_lock, flags); + + if (!dirty && !shadow_dirty) { + r = 0; + break; + } + + /* 4 iterations is the worst case: + * 1 - initial iteration, dirty = true (between VFP and VSYNC) + * 2 - first VSYNC, dirty = true + * 3 - dirty = false, shadow_dirty = true + * 4 - shadow_dirty = false */ + if (i++ == 3) { + DSSERR("mgr(%d)->wait_for_go() not finishing\n", + mgr->id); + r = 0; + break; + } + + r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); + if (r == -ERESTARTSYS) + break; + + if (r) { + DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id); + break; + } + } + + dispc_runtime_put(); + + return r; +} + +static int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl) +{ + unsigned long timeout = msecs_to_jiffies(500); + struct ovl_priv_data *op; + struct mgr_priv_data *mp; + u32 irq; + unsigned long flags; + int r; + int i; + + if (!ovl->manager) + return 0; + + mp = get_mgr_priv(ovl->manager); + + spin_lock_irqsave(&data_lock, flags); + + if (ovl_manual_update(ovl)) { + spin_unlock_irqrestore(&data_lock, flags); + return 0; + } + + if (!mp->enabled) { + spin_unlock_irqrestore(&data_lock, flags); + return 0; + } + + spin_unlock_irqrestore(&data_lock, flags); + + r = dispc_runtime_get(); + if (r) + return r; + + irq = dispc_mgr_get_vsync_irq(ovl->manager->id); + + op = get_ovl_priv(ovl); + i = 0; + while (1) { + bool shadow_dirty, dirty; + + spin_lock_irqsave(&data_lock, flags); + dirty = op->info_dirty; + shadow_dirty = op->shadow_info_dirty; + spin_unlock_irqrestore(&data_lock, flags); + + if (!dirty && !shadow_dirty) { + r = 0; + break; + } + + /* 4 iterations is the worst case: + * 1 - initial iteration, dirty = true (between VFP and VSYNC) + * 2 - first VSYNC, dirty = true + * 3 - dirty = false, shadow_dirty = true + * 4 - shadow_dirty = false */ + if (i++ == 3) { + DSSERR("ovl(%d)->wait_for_go() not finishing\n", + ovl->id); + r = 0; + break; + } + + r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); + if (r == -ERESTARTSYS) + break; + + if (r) { + DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id); + break; + } + } + + dispc_runtime_put(); + + return r; +} + +static void dss_ovl_write_regs(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + struct omap_overlay_info *oi; + bool replication; + struct mgr_priv_data *mp; + int r; + + DSSDBG("writing ovl %d regs\n", ovl->id); + + if (!op->enabled || !op->info_dirty) + return; + + oi = &op->info; + + mp = get_mgr_priv(ovl->manager); + + replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode); + + r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings, false); + if (r) { + /* + * We can't do much here, as this function can be called from + * vsync interrupt. + */ + DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id); + + /* This will leave fifo configurations in a nonoptimal state */ + op->enabled = false; + dispc_ovl_enable(ovl->id, false); + return; + } + + op->info_dirty = false; + if (mp->updating) + op->shadow_info_dirty = true; +} + +static void dss_ovl_write_regs_extra(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + struct mgr_priv_data *mp; + + DSSDBG("writing ovl %d regs extra\n", ovl->id); + + if (!op->extra_info_dirty) + return; + + /* note: write also when op->enabled == false, so that the ovl gets + * disabled */ + + dispc_ovl_enable(ovl->id, op->enabled); + dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high); + + mp = get_mgr_priv(ovl->manager); + + op->extra_info_dirty = false; + if (mp->updating) + op->shadow_extra_info_dirty = true; +} + +static void dss_mgr_write_regs(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + struct omap_overlay *ovl; + + DSSDBG("writing mgr %d regs\n", mgr->id); + + if (!mp->enabled) + return; + + WARN_ON(mp->busy); + + /* Commit overlay settings */ + list_for_each_entry(ovl, &mgr->overlays, list) { + dss_ovl_write_regs(ovl); + dss_ovl_write_regs_extra(ovl); + } + + if (mp->info_dirty) { + dispc_mgr_setup(mgr->id, &mp->info); + + mp->info_dirty = false; + if (mp->updating) + mp->shadow_info_dirty = true; + } +} + +static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + DSSDBG("writing mgr %d regs extra\n", mgr->id); + + if (!mp->extra_info_dirty) + return; + + dispc_mgr_set_timings(mgr->id, &mp->timings); + + /* lcd_config parameters */ + if (dss_mgr_is_lcd(mgr->id)) + dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config); + + mp->extra_info_dirty = false; + if (mp->updating) + mp->shadow_extra_info_dirty = true; +} + +static void dss_write_regs(void) +{ + const int num_mgrs = omap_dss_get_num_overlay_managers(); + int i; + + for (i = 0; i < num_mgrs; ++i) { + struct omap_overlay_manager *mgr; + struct mgr_priv_data *mp; + int r; + + mgr = omap_dss_get_overlay_manager(i); + mp = get_mgr_priv(mgr); + + if (!mp->enabled || mgr_manual_update(mgr) || mp->busy) + continue; + + r = dss_check_settings(mgr); + if (r) { + DSSERR("cannot write registers for manager %s: " + "illegal configuration\n", mgr->name); + continue; + } + + dss_mgr_write_regs(mgr); + dss_mgr_write_regs_extra(mgr); + } +} + +static void dss_set_go_bits(void) +{ + const int num_mgrs = omap_dss_get_num_overlay_managers(); + int i; + + for (i = 0; i < num_mgrs; ++i) { + struct omap_overlay_manager *mgr; + struct mgr_priv_data *mp; + + mgr = omap_dss_get_overlay_manager(i); + mp = get_mgr_priv(mgr); + + if (!mp->enabled || mgr_manual_update(mgr) || mp->busy) + continue; + + if (!need_go(mgr)) + continue; + + mp->busy = true; + + if (!dss_data.irq_enabled && need_isr()) + dss_register_vsync_isr(); + + dispc_mgr_go(mgr->id); + } + +} + +static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr) +{ + struct omap_overlay *ovl; + struct mgr_priv_data *mp; + struct ovl_priv_data *op; + + mp = get_mgr_priv(mgr); + mp->shadow_info_dirty = false; + mp->shadow_extra_info_dirty = false; + + list_for_each_entry(ovl, &mgr->overlays, list) { + op = get_ovl_priv(ovl); + op->shadow_info_dirty = false; + op->shadow_extra_info_dirty = false; + } +} + +static int dss_mgr_connect_compat(struct omap_overlay_manager *mgr, + struct omap_dss_device *dst) +{ + return mgr->set_output(mgr, dst); +} + +static void dss_mgr_disconnect_compat(struct omap_overlay_manager *mgr, + struct omap_dss_device *dst) +{ + mgr->unset_output(mgr); +} + +static void dss_mgr_start_update_compat(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + unsigned long flags; + int r; + + spin_lock_irqsave(&data_lock, flags); + + WARN_ON(mp->updating); + + r = dss_check_settings(mgr); + if (r) { + DSSERR("cannot start manual update: illegal configuration\n"); + spin_unlock_irqrestore(&data_lock, flags); + return; + } + + dss_mgr_write_regs(mgr); + dss_mgr_write_regs_extra(mgr); + + mp->updating = true; + + if (!dss_data.irq_enabled && need_isr()) + dss_register_vsync_isr(); + + dispc_mgr_enable_sync(mgr->id); + + spin_unlock_irqrestore(&data_lock, flags); +} + +static void dss_apply_irq_handler(void *data, u32 mask); + +static void dss_register_vsync_isr(void) +{ + const int num_mgrs = dss_feat_get_num_mgrs(); + u32 mask; + int r, i; + + mask = 0; + for (i = 0; i < num_mgrs; ++i) + mask |= dispc_mgr_get_vsync_irq(i); + + for (i = 0; i < num_mgrs; ++i) + mask |= dispc_mgr_get_framedone_irq(i); + + r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask); + WARN_ON(r); + + dss_data.irq_enabled = true; +} + +static void dss_unregister_vsync_isr(void) +{ + const int num_mgrs = dss_feat_get_num_mgrs(); + u32 mask; + int r, i; + + mask = 0; + for (i = 0; i < num_mgrs; ++i) + mask |= dispc_mgr_get_vsync_irq(i); + + for (i = 0; i < num_mgrs; ++i) + mask |= dispc_mgr_get_framedone_irq(i); + + r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask); + WARN_ON(r); + + dss_data.irq_enabled = false; +} + +static void dss_apply_irq_handler(void *data, u32 mask) +{ + const int num_mgrs = dss_feat_get_num_mgrs(); + int i; + bool extra_updating; + + spin_lock(&data_lock); + + /* clear busy, updating flags, shadow_dirty flags */ + for (i = 0; i < num_mgrs; i++) { + struct omap_overlay_manager *mgr; + struct mgr_priv_data *mp; + + mgr = omap_dss_get_overlay_manager(i); + mp = get_mgr_priv(mgr); + + if (!mp->enabled) + continue; + + mp->updating = dispc_mgr_is_enabled(i); + + if (!mgr_manual_update(mgr)) { + bool was_busy = mp->busy; + mp->busy = dispc_mgr_go_busy(i); + + if (was_busy && !mp->busy) + mgr_clear_shadow_dirty(mgr); + } + } + + dss_write_regs(); + dss_set_go_bits(); + + extra_updating = extra_info_update_ongoing(); + if (!extra_updating) + complete_all(&extra_updated_completion); + + /* call framedone handlers for manual update displays */ + for (i = 0; i < num_mgrs; i++) { + struct omap_overlay_manager *mgr; + struct mgr_priv_data *mp; + + mgr = omap_dss_get_overlay_manager(i); + mp = get_mgr_priv(mgr); + + if (!mgr_manual_update(mgr) || !mp->framedone_handler) + continue; + + if (mask & dispc_mgr_get_framedone_irq(i)) + mp->framedone_handler(mp->framedone_handler_data); + } + + if (!need_isr()) + dss_unregister_vsync_isr(); + + spin_unlock(&data_lock); +} + +static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op; + + op = get_ovl_priv(ovl); + + if (!op->user_info_dirty) + return; + + op->user_info_dirty = false; + op->info_dirty = true; + op->info = op->user_info; +} + +static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp; + + mp = get_mgr_priv(mgr); + + if (!mp->user_info_dirty) + return; + + mp->user_info_dirty = false; + mp->info_dirty = true; + mp->info = mp->user_info; +} + +static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) +{ + unsigned long flags; + struct omap_overlay *ovl; + int r; + + DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name); + + spin_lock_irqsave(&data_lock, flags); + + r = dss_check_settings_apply(mgr); + if (r) { + spin_unlock_irqrestore(&data_lock, flags); + DSSERR("failed to apply settings: illegal configuration.\n"); + return r; + } + + /* Configure overlays */ + list_for_each_entry(ovl, &mgr->overlays, list) + omap_dss_mgr_apply_ovl(ovl); + + /* Configure manager */ + omap_dss_mgr_apply_mgr(mgr); + + dss_write_regs(); + dss_set_go_bits(); + + spin_unlock_irqrestore(&data_lock, flags); + + return 0; +} + +static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable) +{ + struct ovl_priv_data *op; + + op = get_ovl_priv(ovl); + + if (op->enabled == enable) + return; + + op->enabled = enable; + op->extra_info_dirty = true; +} + +static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl, + u32 fifo_low, u32 fifo_high) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + + if (op->fifo_low == fifo_low && op->fifo_high == fifo_high) + return; + + op->fifo_low = fifo_low; + op->fifo_high = fifo_high; + op->extra_info_dirty = true; +} + +static void dss_ovl_setup_fifo(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + u32 fifo_low, fifo_high; + bool use_fifo_merge = false; + + if (!op->enabled && !op->enabling) + return; + + dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high, + use_fifo_merge, ovl_manual_update(ovl)); + + dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high); +} + +static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr) +{ + struct omap_overlay *ovl; + struct mgr_priv_data *mp; + + mp = get_mgr_priv(mgr); + + if (!mp->enabled) + return; + + list_for_each_entry(ovl, &mgr->overlays, list) + dss_ovl_setup_fifo(ovl); +} + +static void dss_setup_fifos(void) +{ + const int num_mgrs = omap_dss_get_num_overlay_managers(); + struct omap_overlay_manager *mgr; + int i; + + for (i = 0; i < num_mgrs; ++i) { + mgr = omap_dss_get_overlay_manager(i); + dss_mgr_setup_fifos(mgr); + } +} + +static int dss_mgr_enable_compat(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + unsigned long flags; + int r; + + mutex_lock(&apply_lock); + + if (mp->enabled) + goto out; + + spin_lock_irqsave(&data_lock, flags); + + mp->enabled = true; + + r = dss_check_settings(mgr); + if (r) { + DSSERR("failed to enable manager %d: check_settings failed\n", + mgr->id); + goto err; + } + + dss_setup_fifos(); + + dss_write_regs(); + dss_set_go_bits(); + + if (!mgr_manual_update(mgr)) + mp->updating = true; + + if (!dss_data.irq_enabled && need_isr()) + dss_register_vsync_isr(); + + spin_unlock_irqrestore(&data_lock, flags); + + if (!mgr_manual_update(mgr)) + dispc_mgr_enable_sync(mgr->id); + +out: + mutex_unlock(&apply_lock); + + return 0; + +err: + mp->enabled = false; + spin_unlock_irqrestore(&data_lock, flags); + mutex_unlock(&apply_lock); + return r; +} + +static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + unsigned long flags; + + mutex_lock(&apply_lock); + + if (!mp->enabled) + goto out; + + wait_pending_extra_info_updates(); + + if (!mgr_manual_update(mgr)) + dispc_mgr_disable_sync(mgr->id); + + spin_lock_irqsave(&data_lock, flags); + + mp->updating = false; + mp->enabled = false; + + spin_unlock_irqrestore(&data_lock, flags); + +out: + mutex_unlock(&apply_lock); +} + +static int dss_mgr_set_info(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + unsigned long flags; + int r; + + r = dss_mgr_simple_check(mgr, info); + if (r) + return r; + + spin_lock_irqsave(&data_lock, flags); + + mp->user_info = *info; + mp->user_info_dirty = true; + + spin_unlock_irqrestore(&data_lock, flags); + + return 0; +} + +static void dss_mgr_get_info(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + unsigned long flags; + + spin_lock_irqsave(&data_lock, flags); + + *info = mp->user_info; + + spin_unlock_irqrestore(&data_lock, flags); +} + +static int dss_mgr_set_output(struct omap_overlay_manager *mgr, + struct omap_dss_device *output) +{ + int r; + + mutex_lock(&apply_lock); + + if (mgr->output) { + DSSERR("manager %s is already connected to an output\n", + mgr->name); + r = -EINVAL; + goto err; + } + + if ((mgr->supported_outputs & output->id) == 0) { + DSSERR("output does not support manager %s\n", + mgr->name); + r = -EINVAL; + goto err; + } + + output->manager = mgr; + mgr->output = output; + + mutex_unlock(&apply_lock); + + return 0; +err: + mutex_unlock(&apply_lock); + return r; +} + +static int dss_mgr_unset_output(struct omap_overlay_manager *mgr) +{ + int r; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + unsigned long flags; + + mutex_lock(&apply_lock); + + if (!mgr->output) { + DSSERR("failed to unset output, output not set\n"); + r = -EINVAL; + goto err; + } + + spin_lock_irqsave(&data_lock, flags); + + if (mp->enabled) { + DSSERR("output can't be unset when manager is enabled\n"); + r = -EINVAL; + goto err1; + } + + spin_unlock_irqrestore(&data_lock, flags); + + mgr->output->manager = NULL; + mgr->output = NULL; + + mutex_unlock(&apply_lock); + + return 0; +err1: + spin_unlock_irqrestore(&data_lock, flags); +err: + mutex_unlock(&apply_lock); + + return r; +} + +static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr, + const struct omap_video_timings *timings) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mp->timings = *timings; + mp->extra_info_dirty = true; +} + +static void dss_mgr_set_timings_compat(struct omap_overlay_manager *mgr, + const struct omap_video_timings *timings) +{ + unsigned long flags; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + spin_lock_irqsave(&data_lock, flags); + + if (mp->updating) { + DSSERR("cannot set timings for %s: manager needs to be disabled\n", + mgr->name); + goto out; + } + + dss_apply_mgr_timings(mgr, timings); +out: + spin_unlock_irqrestore(&data_lock, flags); +} + +static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mp->lcd_config = *config; + mp->extra_info_dirty = true; +} + +static void dss_mgr_set_lcd_config_compat(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + unsigned long flags; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + spin_lock_irqsave(&data_lock, flags); + + if (mp->enabled) { + DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n", + mgr->name); + goto out; + } + + dss_apply_mgr_lcd_config(mgr, config); +out: + spin_unlock_irqrestore(&data_lock, flags); +} + +static int dss_ovl_set_info(struct omap_overlay *ovl, + struct omap_overlay_info *info) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + unsigned long flags; + int r; + + r = dss_ovl_simple_check(ovl, info); + if (r) + return r; + + spin_lock_irqsave(&data_lock, flags); + + op->user_info = *info; + op->user_info_dirty = true; + + spin_unlock_irqrestore(&data_lock, flags); + + return 0; +} + +static void dss_ovl_get_info(struct omap_overlay *ovl, + struct omap_overlay_info *info) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + unsigned long flags; + + spin_lock_irqsave(&data_lock, flags); + + *info = op->user_info; + + spin_unlock_irqrestore(&data_lock, flags); +} + +static int dss_ovl_set_manager(struct omap_overlay *ovl, + struct omap_overlay_manager *mgr) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + unsigned long flags; + int r; + + if (!mgr) + return -EINVAL; + + mutex_lock(&apply_lock); + + if (ovl->manager) { + DSSERR("overlay '%s' already has a manager '%s'\n", + ovl->name, ovl->manager->name); + r = -EINVAL; + goto err; + } + + r = dispc_runtime_get(); + if (r) + goto err; + + spin_lock_irqsave(&data_lock, flags); + + if (op->enabled) { + spin_unlock_irqrestore(&data_lock, flags); + DSSERR("overlay has to be disabled to change the manager\n"); + r = -EINVAL; + goto err1; + } + + dispc_ovl_set_channel_out(ovl->id, mgr->id); + + ovl->manager = mgr; + list_add_tail(&ovl->list, &mgr->overlays); + + spin_unlock_irqrestore(&data_lock, flags); + + dispc_runtime_put(); + + mutex_unlock(&apply_lock); + + return 0; + +err1: + dispc_runtime_put(); +err: + mutex_unlock(&apply_lock); + return r; +} + +static int dss_ovl_unset_manager(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + unsigned long flags; + int r; + + mutex_lock(&apply_lock); + + if (!ovl->manager) { + DSSERR("failed to detach overlay: manager not set\n"); + r = -EINVAL; + goto err; + } + + spin_lock_irqsave(&data_lock, flags); + + if (op->enabled) { + spin_unlock_irqrestore(&data_lock, flags); + DSSERR("overlay has to be disabled to unset the manager\n"); + r = -EINVAL; + goto err; + } + + spin_unlock_irqrestore(&data_lock, flags); + + /* wait for pending extra_info updates to ensure the ovl is disabled */ + wait_pending_extra_info_updates(); + + /* + * For a manual update display, there is no guarantee that the overlay + * is really disabled in HW, we may need an extra update from this + * manager before the configurations can go in. Return an error if the + * overlay needed an update from the manager. + * + * TODO: Instead of returning an error, try to do a dummy manager update + * here to disable the overlay in hardware. Use the *GATED fields in + * the DISPC_CONFIG registers to do a dummy update. + */ + spin_lock_irqsave(&data_lock, flags); + + if (ovl_manual_update(ovl) && op->extra_info_dirty) { + spin_unlock_irqrestore(&data_lock, flags); + DSSERR("need an update to change the manager\n"); + r = -EINVAL; + goto err; + } + + ovl->manager = NULL; + list_del(&ovl->list); + + spin_unlock_irqrestore(&data_lock, flags); + + mutex_unlock(&apply_lock); + + return 0; +err: + mutex_unlock(&apply_lock); + return r; +} + +static bool dss_ovl_is_enabled(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + unsigned long flags; + bool e; + + spin_lock_irqsave(&data_lock, flags); + + e = op->enabled; + + spin_unlock_irqrestore(&data_lock, flags); + + return e; +} + +static int dss_ovl_enable(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + unsigned long flags; + int r; + + mutex_lock(&apply_lock); + + if (op->enabled) { + r = 0; + goto err1; + } + + if (ovl->manager == NULL || ovl->manager->output == NULL) { + r = -EINVAL; + goto err1; + } + + spin_lock_irqsave(&data_lock, flags); + + op->enabling = true; + + r = dss_check_settings(ovl->manager); + if (r) { + DSSERR("failed to enable overlay %d: check_settings failed\n", + ovl->id); + goto err2; + } + + dss_setup_fifos(); + + op->enabling = false; + dss_apply_ovl_enable(ovl, true); + + dss_write_regs(); + dss_set_go_bits(); + + spin_unlock_irqrestore(&data_lock, flags); + + mutex_unlock(&apply_lock); + + return 0; +err2: + op->enabling = false; + spin_unlock_irqrestore(&data_lock, flags); +err1: + mutex_unlock(&apply_lock); + return r; +} + +static int dss_ovl_disable(struct omap_overlay *ovl) +{ + struct ovl_priv_data *op = get_ovl_priv(ovl); + unsigned long flags; + int r; + + mutex_lock(&apply_lock); + + if (!op->enabled) { + r = 0; + goto err; + } + + if (ovl->manager == NULL || ovl->manager->output == NULL) { + r = -EINVAL; + goto err; + } + + spin_lock_irqsave(&data_lock, flags); + + dss_apply_ovl_enable(ovl, false); + dss_write_regs(); + dss_set_go_bits(); + + spin_unlock_irqrestore(&data_lock, flags); + + mutex_unlock(&apply_lock); + + return 0; + +err: + mutex_unlock(&apply_lock); + return r; +} + +static int dss_mgr_register_framedone_handler_compat(struct omap_overlay_manager *mgr, + void (*handler)(void *), void *data) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + if (mp->framedone_handler) + return -EBUSY; + + mp->framedone_handler = handler; + mp->framedone_handler_data = data; + + return 0; +} + +static void dss_mgr_unregister_framedone_handler_compat(struct omap_overlay_manager *mgr, + void (*handler)(void *), void *data) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + WARN_ON(mp->framedone_handler != handler || + mp->framedone_handler_data != data); + + mp->framedone_handler = NULL; + mp->framedone_handler_data = NULL; +} + +static const struct dss_mgr_ops apply_mgr_ops = { + .connect = dss_mgr_connect_compat, + .disconnect = dss_mgr_disconnect_compat, + .start_update = dss_mgr_start_update_compat, + .enable = dss_mgr_enable_compat, + .disable = dss_mgr_disable_compat, + .set_timings = dss_mgr_set_timings_compat, + .set_lcd_config = dss_mgr_set_lcd_config_compat, + .register_framedone_handler = dss_mgr_register_framedone_handler_compat, + .unregister_framedone_handler = dss_mgr_unregister_framedone_handler_compat, +}; + +static int compat_refcnt; +static DEFINE_MUTEX(compat_init_lock); + +int omapdss_compat_init(void) +{ + struct platform_device *pdev = dss_get_core_pdev(); + int i, r; + + mutex_lock(&compat_init_lock); + + if (compat_refcnt++ > 0) + goto out; + + apply_init_priv(); + + dss_init_overlay_managers_sysfs(pdev); + dss_init_overlays(pdev); + + for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) { + struct omap_overlay_manager *mgr; + + mgr = omap_dss_get_overlay_manager(i); + + mgr->set_output = &dss_mgr_set_output; + mgr->unset_output = &dss_mgr_unset_output; + mgr->apply = &omap_dss_mgr_apply; + mgr->set_manager_info = &dss_mgr_set_info; + mgr->get_manager_info = &dss_mgr_get_info; + mgr->wait_for_go = &dss_mgr_wait_for_go; + mgr->wait_for_vsync = &dss_mgr_wait_for_vsync; + mgr->get_device = &dss_mgr_get_device; + } + + for (i = 0; i < omap_dss_get_num_overlays(); i++) { + struct omap_overlay *ovl = omap_dss_get_overlay(i); + + ovl->is_enabled = &dss_ovl_is_enabled; + ovl->enable = &dss_ovl_enable; + ovl->disable = &dss_ovl_disable; + ovl->set_manager = &dss_ovl_set_manager; + ovl->unset_manager = &dss_ovl_unset_manager; + ovl->set_overlay_info = &dss_ovl_set_info; + ovl->get_overlay_info = &dss_ovl_get_info; + ovl->wait_for_go = &dss_mgr_wait_for_go_ovl; + ovl->get_device = &dss_ovl_get_device; + } + + r = dss_install_mgr_ops(&apply_mgr_ops); + if (r) + goto err_mgr_ops; + + r = display_init_sysfs(pdev); + if (r) + goto err_disp_sysfs; + + dispc_runtime_get(); + + r = dss_dispc_initialize_irq(); + if (r) + goto err_init_irq; + + dispc_runtime_put(); + +out: + mutex_unlock(&compat_init_lock); + + return 0; + +err_init_irq: + dispc_runtime_put(); + display_uninit_sysfs(pdev); + +err_disp_sysfs: + dss_uninstall_mgr_ops(); + +err_mgr_ops: + dss_uninit_overlay_managers_sysfs(pdev); + dss_uninit_overlays(pdev); + + compat_refcnt--; + + mutex_unlock(&compat_init_lock); + + return r; +} +EXPORT_SYMBOL(omapdss_compat_init); + +void omapdss_compat_uninit(void) +{ + struct platform_device *pdev = dss_get_core_pdev(); + + mutex_lock(&compat_init_lock); + + if (--compat_refcnt > 0) + goto out; + + dss_dispc_uninitialize_irq(); + + display_uninit_sysfs(pdev); + + dss_uninstall_mgr_ops(); + + dss_uninit_overlay_managers_sysfs(pdev); + dss_uninit_overlays(pdev); +out: + mutex_unlock(&compat_init_lock); +} +EXPORT_SYMBOL(omapdss_compat_uninit); diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c new file mode 100644 index 000000000000..54eeb507f9b3 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/core.c @@ -0,0 +1,343 @@ +/* + * linux/drivers/video/omap2/dss/core.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "CORE" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/io.h> +#include <linux/device.h> +#include <linux/regulator/consumer.h> +#include <linux/suspend.h> +#include <linux/slab.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +static struct { + struct platform_device *pdev; + + const char *default_display_name; +} core; + +static char *def_disp_name; +module_param_named(def_disp, def_disp_name, charp, 0); +MODULE_PARM_DESC(def_disp, "default display name"); + +const char *omapdss_get_default_display_name(void) +{ + return core.default_display_name; +} +EXPORT_SYMBOL(omapdss_get_default_display_name); + +enum omapdss_version omapdss_get_version(void) +{ + struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; + return pdata->version; +} +EXPORT_SYMBOL(omapdss_get_version); + +struct platform_device *dss_get_core_pdev(void) +{ + return core.pdev; +} + +int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask) +{ + struct omap_dss_board_info *board_data = core.pdev->dev.platform_data; + + if (!board_data->dsi_enable_pads) + return -ENOENT; + + return board_data->dsi_enable_pads(dsi_id, lane_mask); +} + +void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask) +{ + struct omap_dss_board_info *board_data = core.pdev->dev.platform_data; + + if (!board_data->dsi_disable_pads) + return; + + return board_data->dsi_disable_pads(dsi_id, lane_mask); +} + +int dss_set_min_bus_tput(struct device *dev, unsigned long tput) +{ + struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; + + if (pdata->set_min_bus_tput) + return pdata->set_min_bus_tput(dev, tput); + else + return 0; +} + +#if defined(CONFIG_OMAP2_DSS_DEBUGFS) +static int dss_debug_show(struct seq_file *s, void *unused) +{ + void (*func)(struct seq_file *) = s->private; + func(s); + return 0; +} + +static int dss_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, dss_debug_show, inode->i_private); +} + +static const struct file_operations dss_debug_fops = { + .open = dss_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *dss_debugfs_dir; + +static int dss_initialize_debugfs(void) +{ + dss_debugfs_dir = debugfs_create_dir("omapdss", NULL); + if (IS_ERR(dss_debugfs_dir)) { + int err = PTR_ERR(dss_debugfs_dir); + dss_debugfs_dir = NULL; + return err; + } + + debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, + &dss_debug_dump_clocks, &dss_debug_fops); + + return 0; +} + +static void dss_uninitialize_debugfs(void) +{ + if (dss_debugfs_dir) + debugfs_remove_recursive(dss_debugfs_dir); +} + +int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) +{ + struct dentry *d; + + d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir, + write, &dss_debug_fops); + + return PTR_ERR_OR_ZERO(d); +} +#else /* CONFIG_OMAP2_DSS_DEBUGFS */ +static inline int dss_initialize_debugfs(void) +{ + return 0; +} +static inline void dss_uninitialize_debugfs(void) +{ +} +int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) +{ + return 0; +} +#endif /* CONFIG_OMAP2_DSS_DEBUGFS */ + +/* PLATFORM DEVICE */ +static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d) +{ + DSSDBG("pm notif %lu\n", v); + + switch (v) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + case PM_RESTORE_PREPARE: + DSSDBG("suspending displays\n"); + return dss_suspend_all_devices(); + + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + DSSDBG("resuming displays\n"); + return dss_resume_all_devices(); + + default: + return 0; + } +} + +static struct notifier_block omap_dss_pm_notif_block = { + .notifier_call = omap_dss_pm_notif, +}; + +static int __init omap_dss_probe(struct platform_device *pdev) +{ + struct omap_dss_board_info *pdata = pdev->dev.platform_data; + int r; + + core.pdev = pdev; + + dss_features_init(omapdss_get_version()); + + r = dss_initialize_debugfs(); + if (r) + goto err_debugfs; + + if (def_disp_name) + core.default_display_name = def_disp_name; + else if (pdata->default_display_name) + core.default_display_name = pdata->default_display_name; + else if (pdata->default_device) + core.default_display_name = pdata->default_device->name; + + register_pm_notifier(&omap_dss_pm_notif_block); + + return 0; + +err_debugfs: + + return r; +} + +static int omap_dss_remove(struct platform_device *pdev) +{ + unregister_pm_notifier(&omap_dss_pm_notif_block); + + dss_uninitialize_debugfs(); + + return 0; +} + +static void omap_dss_shutdown(struct platform_device *pdev) +{ + DSSDBG("shutdown\n"); + dss_disable_all_devices(); +} + +static struct platform_driver omap_dss_driver = { + .remove = omap_dss_remove, + .shutdown = omap_dss_shutdown, + .driver = { + .name = "omapdss", + }, +}; + +/* INIT */ +static int (*dss_output_drv_reg_funcs[])(void) __initdata = { + dss_init_platform_driver, + dispc_init_platform_driver, +#ifdef CONFIG_OMAP2_DSS_DSI + dsi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_DPI + dpi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_SDI + sdi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_RFBI + rfbi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_VENC + venc_init_platform_driver, +#endif +#ifdef CONFIG_OMAP4_DSS_HDMI + hdmi4_init_platform_driver, +#endif +#ifdef CONFIG_OMAP5_DSS_HDMI + hdmi5_init_platform_driver, +#endif +}; + +static void (*dss_output_drv_unreg_funcs[])(void) = { +#ifdef CONFIG_OMAP5_DSS_HDMI + hdmi5_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP4_DSS_HDMI + hdmi4_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_VENC + venc_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_RFBI + rfbi_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_SDI + sdi_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_DPI + dpi_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_DSI + dsi_uninit_platform_driver, +#endif + dispc_uninit_platform_driver, + dss_uninit_platform_driver, +}; + +static int __init omap_dss_init(void) +{ + int r; + int i; + + r = platform_driver_probe(&omap_dss_driver, omap_dss_probe); + if (r) + return r; + + for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) { + r = dss_output_drv_reg_funcs[i](); + if (r) + goto err_reg; + } + + return 0; + +err_reg: + for (i = ARRAY_SIZE(dss_output_drv_reg_funcs) - i; + i < ARRAY_SIZE(dss_output_drv_reg_funcs); + ++i) + dss_output_drv_unreg_funcs[i](); + + platform_driver_unregister(&omap_dss_driver); + + return r; +} + +static void __exit omap_dss_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) + dss_output_drv_unreg_funcs[i](); + + platform_driver_unregister(&omap_dss_driver); +} + +module_init(omap_dss_init); +module_exit(omap_dss_exit); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>"); +MODULE_DESCRIPTION("OMAP2/3 Display Subsystem"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/gpu/drm/omapdrm/dss/dispc-compat.c b/drivers/gpu/drm/omapdrm/dss/dispc-compat.c new file mode 100644 index 000000000000..0918b3bfe82a --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dispc-compat.c @@ -0,0 +1,667 @@ +/* + * Copyright (C) 2012 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "APPLY" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/jiffies.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/seq_file.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" +#include "dispc-compat.h" + +#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \ + DISPC_IRQ_OCP_ERR | \ + DISPC_IRQ_VID1_FIFO_UNDERFLOW | \ + DISPC_IRQ_VID2_FIFO_UNDERFLOW | \ + DISPC_IRQ_SYNC_LOST | \ + DISPC_IRQ_SYNC_LOST_DIGIT) + +#define DISPC_MAX_NR_ISRS 8 + +struct omap_dispc_isr_data { + omap_dispc_isr_t isr; + void *arg; + u32 mask; +}; + +struct dispc_irq_stats { + unsigned long last_reset; + unsigned irq_count; + unsigned irqs[32]; +}; + +static struct { + spinlock_t irq_lock; + u32 irq_error_mask; + struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS]; + u32 error_irqs; + struct work_struct error_work; + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + spinlock_t irq_stats_lock; + struct dispc_irq_stats irq_stats; +#endif +} dispc_compat; + + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS +static void dispc_dump_irqs(struct seq_file *s) +{ + unsigned long flags; + struct dispc_irq_stats stats; + + spin_lock_irqsave(&dispc_compat.irq_stats_lock, flags); + + stats = dispc_compat.irq_stats; + memset(&dispc_compat.irq_stats, 0, sizeof(dispc_compat.irq_stats)); + dispc_compat.irq_stats.last_reset = jiffies; + + spin_unlock_irqrestore(&dispc_compat.irq_stats_lock, flags); + + seq_printf(s, "period %u ms\n", + jiffies_to_msecs(jiffies - stats.last_reset)); + + seq_printf(s, "irqs %d\n", stats.irq_count); +#define PIS(x) \ + seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]); + + PIS(FRAMEDONE); + PIS(VSYNC); + PIS(EVSYNC_EVEN); + PIS(EVSYNC_ODD); + PIS(ACBIAS_COUNT_STAT); + PIS(PROG_LINE_NUM); + PIS(GFX_FIFO_UNDERFLOW); + PIS(GFX_END_WIN); + PIS(PAL_GAMMA_MASK); + PIS(OCP_ERR); + PIS(VID1_FIFO_UNDERFLOW); + PIS(VID1_END_WIN); + PIS(VID2_FIFO_UNDERFLOW); + PIS(VID2_END_WIN); + if (dss_feat_get_num_ovls() > 3) { + PIS(VID3_FIFO_UNDERFLOW); + PIS(VID3_END_WIN); + } + PIS(SYNC_LOST); + PIS(SYNC_LOST_DIGIT); + PIS(WAKEUP); + if (dss_has_feature(FEAT_MGR_LCD2)) { + PIS(FRAMEDONE2); + PIS(VSYNC2); + PIS(ACBIAS_COUNT_STAT2); + PIS(SYNC_LOST2); + } + if (dss_has_feature(FEAT_MGR_LCD3)) { + PIS(FRAMEDONE3); + PIS(VSYNC3); + PIS(ACBIAS_COUNT_STAT3); + PIS(SYNC_LOST3); + } +#undef PIS +} +#endif + +/* dispc.irq_lock has to be locked by the caller */ +static void _omap_dispc_set_irqs(void) +{ + u32 mask; + int i; + struct omap_dispc_isr_data *isr_data; + + mask = dispc_compat.irq_error_mask; + + for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { + isr_data = &dispc_compat.registered_isr[i]; + + if (isr_data->isr == NULL) + continue; + + mask |= isr_data->mask; + } + + dispc_write_irqenable(mask); +} + +int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask) +{ + int i; + int ret; + unsigned long flags; + struct omap_dispc_isr_data *isr_data; + + if (isr == NULL) + return -EINVAL; + + spin_lock_irqsave(&dispc_compat.irq_lock, flags); + + /* check for duplicate entry */ + for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { + isr_data = &dispc_compat.registered_isr[i]; + if (isr_data->isr == isr && isr_data->arg == arg && + isr_data->mask == mask) { + ret = -EINVAL; + goto err; + } + } + + isr_data = NULL; + ret = -EBUSY; + + for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { + isr_data = &dispc_compat.registered_isr[i]; + + if (isr_data->isr != NULL) + continue; + + isr_data->isr = isr; + isr_data->arg = arg; + isr_data->mask = mask; + ret = 0; + + break; + } + + if (ret) + goto err; + + _omap_dispc_set_irqs(); + + spin_unlock_irqrestore(&dispc_compat.irq_lock, flags); + + return 0; +err: + spin_unlock_irqrestore(&dispc_compat.irq_lock, flags); + + return ret; +} +EXPORT_SYMBOL(omap_dispc_register_isr); + +int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask) +{ + int i; + unsigned long flags; + int ret = -EINVAL; + struct omap_dispc_isr_data *isr_data; + + spin_lock_irqsave(&dispc_compat.irq_lock, flags); + + for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { + isr_data = &dispc_compat.registered_isr[i]; + if (isr_data->isr != isr || isr_data->arg != arg || + isr_data->mask != mask) + continue; + + /* found the correct isr */ + + isr_data->isr = NULL; + isr_data->arg = NULL; + isr_data->mask = 0; + + ret = 0; + break; + } + + if (ret == 0) + _omap_dispc_set_irqs(); + + spin_unlock_irqrestore(&dispc_compat.irq_lock, flags); + + return ret; +} +EXPORT_SYMBOL(omap_dispc_unregister_isr); + +static void print_irq_status(u32 status) +{ + if ((status & dispc_compat.irq_error_mask) == 0) + return; + +#define PIS(x) (status & DISPC_IRQ_##x) ? (#x " ") : "" + + pr_debug("DISPC IRQ: 0x%x: %s%s%s%s%s%s%s%s%s\n", + status, + PIS(OCP_ERR), + PIS(GFX_FIFO_UNDERFLOW), + PIS(VID1_FIFO_UNDERFLOW), + PIS(VID2_FIFO_UNDERFLOW), + dss_feat_get_num_ovls() > 3 ? PIS(VID3_FIFO_UNDERFLOW) : "", + PIS(SYNC_LOST), + PIS(SYNC_LOST_DIGIT), + dss_has_feature(FEAT_MGR_LCD2) ? PIS(SYNC_LOST2) : "", + dss_has_feature(FEAT_MGR_LCD3) ? PIS(SYNC_LOST3) : ""); +#undef PIS +} + +/* Called from dss.c. Note that we don't touch clocks here, + * but we presume they are on because we got an IRQ. However, + * an irq handler may turn the clocks off, so we may not have + * clock later in the function. */ +static irqreturn_t omap_dispc_irq_handler(int irq, void *arg) +{ + int i; + u32 irqstatus, irqenable; + u32 handledirqs = 0; + u32 unhandled_errors; + struct omap_dispc_isr_data *isr_data; + struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS]; + + spin_lock(&dispc_compat.irq_lock); + + irqstatus = dispc_read_irqstatus(); + irqenable = dispc_read_irqenable(); + + /* IRQ is not for us */ + if (!(irqstatus & irqenable)) { + spin_unlock(&dispc_compat.irq_lock); + return IRQ_NONE; + } + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + spin_lock(&dispc_compat.irq_stats_lock); + dispc_compat.irq_stats.irq_count++; + dss_collect_irq_stats(irqstatus, dispc_compat.irq_stats.irqs); + spin_unlock(&dispc_compat.irq_stats_lock); +#endif + + print_irq_status(irqstatus); + + /* Ack the interrupt. Do it here before clocks are possibly turned + * off */ + dispc_clear_irqstatus(irqstatus); + /* flush posted write */ + dispc_read_irqstatus(); + + /* make a copy and unlock, so that isrs can unregister + * themselves */ + memcpy(registered_isr, dispc_compat.registered_isr, + sizeof(registered_isr)); + + spin_unlock(&dispc_compat.irq_lock); + + for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { + isr_data = ®istered_isr[i]; + + if (!isr_data->isr) + continue; + + if (isr_data->mask & irqstatus) { + isr_data->isr(isr_data->arg, irqstatus); + handledirqs |= isr_data->mask; + } + } + + spin_lock(&dispc_compat.irq_lock); + + unhandled_errors = irqstatus & ~handledirqs & dispc_compat.irq_error_mask; + + if (unhandled_errors) { + dispc_compat.error_irqs |= unhandled_errors; + + dispc_compat.irq_error_mask &= ~unhandled_errors; + _omap_dispc_set_irqs(); + + schedule_work(&dispc_compat.error_work); + } + + spin_unlock(&dispc_compat.irq_lock); + + return IRQ_HANDLED; +} + +static void dispc_error_worker(struct work_struct *work) +{ + int i; + u32 errors; + unsigned long flags; + static const unsigned fifo_underflow_bits[] = { + DISPC_IRQ_GFX_FIFO_UNDERFLOW, + DISPC_IRQ_VID1_FIFO_UNDERFLOW, + DISPC_IRQ_VID2_FIFO_UNDERFLOW, + DISPC_IRQ_VID3_FIFO_UNDERFLOW, + }; + + spin_lock_irqsave(&dispc_compat.irq_lock, flags); + errors = dispc_compat.error_irqs; + dispc_compat.error_irqs = 0; + spin_unlock_irqrestore(&dispc_compat.irq_lock, flags); + + dispc_runtime_get(); + + for (i = 0; i < omap_dss_get_num_overlays(); ++i) { + struct omap_overlay *ovl; + unsigned bit; + + ovl = omap_dss_get_overlay(i); + bit = fifo_underflow_bits[i]; + + if (bit & errors) { + DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n", + ovl->name); + ovl->disable(ovl); + msleep(50); + } + } + + for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { + struct omap_overlay_manager *mgr; + unsigned bit; + + mgr = omap_dss_get_overlay_manager(i); + bit = dispc_mgr_get_sync_lost_irq(i); + + if (bit & errors) { + int j; + + DSSERR("SYNC_LOST on channel %s, restarting the output " + "with video overlays disabled\n", + mgr->name); + + dss_mgr_disable(mgr); + + for (j = 0; j < omap_dss_get_num_overlays(); ++j) { + struct omap_overlay *ovl; + ovl = omap_dss_get_overlay(j); + + if (ovl->id != OMAP_DSS_GFX && + ovl->manager == mgr) + ovl->disable(ovl); + } + + dss_mgr_enable(mgr); + } + } + + if (errors & DISPC_IRQ_OCP_ERR) { + DSSERR("OCP_ERR\n"); + for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { + struct omap_overlay_manager *mgr; + + mgr = omap_dss_get_overlay_manager(i); + dss_mgr_disable(mgr); + } + } + + spin_lock_irqsave(&dispc_compat.irq_lock, flags); + dispc_compat.irq_error_mask |= errors; + _omap_dispc_set_irqs(); + spin_unlock_irqrestore(&dispc_compat.irq_lock, flags); + + dispc_runtime_put(); +} + +int dss_dispc_initialize_irq(void) +{ + int r; + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + spin_lock_init(&dispc_compat.irq_stats_lock); + dispc_compat.irq_stats.last_reset = jiffies; + dss_debugfs_create_file("dispc_irq", dispc_dump_irqs); +#endif + + spin_lock_init(&dispc_compat.irq_lock); + + memset(dispc_compat.registered_isr, 0, + sizeof(dispc_compat.registered_isr)); + + dispc_compat.irq_error_mask = DISPC_IRQ_MASK_ERROR; + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST2; + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST3; + if (dss_feat_get_num_ovls() > 3) + dispc_compat.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW; + + /* + * there's SYNC_LOST_DIGIT waiting after enabling the DSS, + * so clear it + */ + dispc_clear_irqstatus(dispc_read_irqstatus()); + + INIT_WORK(&dispc_compat.error_work, dispc_error_worker); + + _omap_dispc_set_irqs(); + + r = dispc_request_irq(omap_dispc_irq_handler, &dispc_compat); + if (r) { + DSSERR("dispc_request_irq failed\n"); + return r; + } + + return 0; +} + +void dss_dispc_uninitialize_irq(void) +{ + dispc_free_irq(&dispc_compat); +} + +static void dispc_mgr_disable_isr(void *data, u32 mask) +{ + struct completion *compl = data; + complete(compl); +} + +static void dispc_mgr_enable_lcd_out(enum omap_channel channel) +{ + dispc_mgr_enable(channel, true); +} + +static void dispc_mgr_disable_lcd_out(enum omap_channel channel) +{ + DECLARE_COMPLETION_ONSTACK(framedone_compl); + int r; + u32 irq; + + if (!dispc_mgr_is_enabled(channel)) + return; + + /* + * When we disable LCD output, we need to wait for FRAMEDONE to know + * that DISPC has finished with the LCD output. + */ + + irq = dispc_mgr_get_framedone_irq(channel); + + r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl, + irq); + if (r) + DSSERR("failed to register FRAMEDONE isr\n"); + + dispc_mgr_enable(channel, false); + + /* if we couldn't register for framedone, just sleep and exit */ + if (r) { + msleep(100); + return; + } + + if (!wait_for_completion_timeout(&framedone_compl, + msecs_to_jiffies(100))) + DSSERR("timeout waiting for FRAME DONE\n"); + + r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl, + irq); + if (r) + DSSERR("failed to unregister FRAMEDONE isr\n"); +} + +static void dispc_digit_out_enable_isr(void *data, u32 mask) +{ + struct completion *compl = data; + + /* ignore any sync lost interrupts */ + if (mask & (DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD)) + complete(compl); +} + +static void dispc_mgr_enable_digit_out(void) +{ + DECLARE_COMPLETION_ONSTACK(vsync_compl); + int r; + u32 irq_mask; + + if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT)) + return; + + /* + * Digit output produces some sync lost interrupts during the first + * frame when enabling. Those need to be ignored, so we register for the + * sync lost irq to prevent the error handler from triggering. + */ + + irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT) | + dispc_mgr_get_sync_lost_irq(OMAP_DSS_CHANNEL_DIGIT); + + r = omap_dispc_register_isr(dispc_digit_out_enable_isr, &vsync_compl, + irq_mask); + if (r) { + DSSERR("failed to register %x isr\n", irq_mask); + return; + } + + dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, true); + + /* wait for the first evsync */ + if (!wait_for_completion_timeout(&vsync_compl, msecs_to_jiffies(100))) + DSSERR("timeout waiting for digit out to start\n"); + + r = omap_dispc_unregister_isr(dispc_digit_out_enable_isr, &vsync_compl, + irq_mask); + if (r) + DSSERR("failed to unregister %x isr\n", irq_mask); +} + +static void dispc_mgr_disable_digit_out(void) +{ + DECLARE_COMPLETION_ONSTACK(framedone_compl); + int r, i; + u32 irq_mask; + int num_irqs; + + if (!dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT)) + return; + + /* + * When we disable the digit output, we need to wait for FRAMEDONE to + * know that DISPC has finished with the output. + */ + + irq_mask = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_DIGIT); + num_irqs = 1; + + if (!irq_mask) { + /* + * omap 2/3 don't have framedone irq for TV, so we need to use + * vsyncs for this. + */ + + irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT); + /* + * We need to wait for both even and odd vsyncs. Note that this + * is not totally reliable, as we could get a vsync interrupt + * before we disable the output, which leads to timeout in the + * wait_for_completion. + */ + num_irqs = 2; + } + + r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl, + irq_mask); + if (r) + DSSERR("failed to register %x isr\n", irq_mask); + + dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, false); + + /* if we couldn't register the irq, just sleep and exit */ + if (r) { + msleep(100); + return; + } + + for (i = 0; i < num_irqs; ++i) { + if (!wait_for_completion_timeout(&framedone_compl, + msecs_to_jiffies(100))) + DSSERR("timeout waiting for digit out to stop\n"); + } + + r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl, + irq_mask); + if (r) + DSSERR("failed to unregister %x isr\n", irq_mask); +} + +void dispc_mgr_enable_sync(enum omap_channel channel) +{ + if (dss_mgr_is_lcd(channel)) + dispc_mgr_enable_lcd_out(channel); + else if (channel == OMAP_DSS_CHANNEL_DIGIT) + dispc_mgr_enable_digit_out(); + else + WARN_ON(1); +} + +void dispc_mgr_disable_sync(enum omap_channel channel) +{ + if (dss_mgr_is_lcd(channel)) + dispc_mgr_disable_lcd_out(channel); + else if (channel == OMAP_DSS_CHANNEL_DIGIT) + dispc_mgr_disable_digit_out(); + else + WARN_ON(1); +} + +static inline void dispc_irq_wait_handler(void *data, u32 mask) +{ + complete((struct completion *)data); +} + +int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask, + unsigned long timeout) +{ + + int r; + DECLARE_COMPLETION_ONSTACK(completion); + + r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion, + irqmask); + + if (r) + return r; + + timeout = wait_for_completion_interruptible_timeout(&completion, + timeout); + + omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask); + + if (timeout == 0) + return -ETIMEDOUT; + + if (timeout == -ERESTARTSYS) + return -ERESTARTSYS; + + return 0; +} diff --git a/drivers/gpu/drm/omapdrm/dss/dispc-compat.h b/drivers/gpu/drm/omapdrm/dss/dispc-compat.h new file mode 100644 index 000000000000..14a69b3d4fb0 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dispc-compat.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2012 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __OMAP2_DSS_DISPC_COMPAT_H +#define __OMAP2_DSS_DISPC_COMPAT_H + +void dispc_mgr_enable_sync(enum omap_channel channel); +void dispc_mgr_disable_sync(enum omap_channel channel); + +int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask, + unsigned long timeout); + +int dss_dispc_initialize_irq(void); +void dss_dispc_uninitialize_irq(void); + +#endif diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c new file mode 100644 index 000000000000..6b50476ec669 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -0,0 +1,4234 @@ +/* + * linux/drivers/video/omap2/dss/dispc.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "DISPC" + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/export.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/jiffies.h> +#include <linux/seq_file.h> +#include <linux/delay.h> +#include <linux/workqueue.h> +#include <linux/hardirq.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/sizes.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/of.h> +#include <linux/component.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" +#include "dispc.h" + +/* DISPC */ +#define DISPC_SZ_REGS SZ_4K + +enum omap_burst_size { + BURST_SIZE_X2 = 0, + BURST_SIZE_X4 = 1, + BURST_SIZE_X8 = 2, +}; + +#define REG_GET(idx, start, end) \ + FLD_GET(dispc_read_reg(idx), start, end) + +#define REG_FLD_MOD(idx, val, start, end) \ + dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end)) + +struct dispc_features { + u8 sw_start; + u8 fp_start; + u8 bp_start; + u16 sw_max; + u16 vp_max; + u16 hp_max; + u8 mgr_width_start; + u8 mgr_height_start; + u16 mgr_width_max; + u16 mgr_height_max; + unsigned long max_lcd_pclk; + unsigned long max_tv_pclk; + int (*calc_scaling) (unsigned long pclk, unsigned long lclk, + const struct omap_video_timings *mgr_timings, + u16 width, u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode, bool *five_taps, + int *x_predecim, int *y_predecim, int *decim_x, int *decim_y, + u16 pos_x, unsigned long *core_clk, bool mem_to_mem); + unsigned long (*calc_core_clk) (unsigned long pclk, + u16 width, u16 height, u16 out_width, u16 out_height, + bool mem_to_mem); + u8 num_fifos; + + /* swap GFX & WB fifos */ + bool gfx_fifo_workaround:1; + + /* no DISPC_IRQ_FRAMEDONETV on this SoC */ + bool no_framedone_tv:1; + + /* revert to the OMAP4 mechanism of DISPC Smart Standby operation */ + bool mstandby_workaround:1; + + bool set_max_preload:1; + + /* PIXEL_INC is not added to the last pixel of a line */ + bool last_pixel_inc_missing:1; + + /* POL_FREQ has ALIGN bit */ + bool supports_sync_align:1; + + bool has_writeback:1; +}; + +#define DISPC_MAX_NR_FIFOS 5 + +static struct { + struct platform_device *pdev; + void __iomem *base; + + int irq; + irq_handler_t user_handler; + void *user_data; + + unsigned long core_clk_rate; + unsigned long tv_pclk_rate; + + u32 fifo_size[DISPC_MAX_NR_FIFOS]; + /* maps which plane is using a fifo. fifo-id -> plane-id */ + int fifo_assignment[DISPC_MAX_NR_FIFOS]; + + bool ctx_valid; + u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; + + const struct dispc_features *feat; + + bool is_enabled; + + struct regmap *syscon_pol; + u32 syscon_pol_offset; + + /* DISPC_CONTROL & DISPC_CONFIG lock*/ + spinlock_t control_lock; +} dispc; + +enum omap_color_component { + /* used for all color formats for OMAP3 and earlier + * and for RGB and Y color component on OMAP4 + */ + DISPC_COLOR_COMPONENT_RGB_Y = 1 << 0, + /* used for UV component for + * OMAP_DSS_COLOR_YUV2, OMAP_DSS_COLOR_UYVY, OMAP_DSS_COLOR_NV12 + * color formats on OMAP4 + */ + DISPC_COLOR_COMPONENT_UV = 1 << 1, +}; + +enum mgr_reg_fields { + DISPC_MGR_FLD_ENABLE, + DISPC_MGR_FLD_STNTFT, + DISPC_MGR_FLD_GO, + DISPC_MGR_FLD_TFTDATALINES, + DISPC_MGR_FLD_STALLMODE, + DISPC_MGR_FLD_TCKENABLE, + DISPC_MGR_FLD_TCKSELECTION, + DISPC_MGR_FLD_CPR, + DISPC_MGR_FLD_FIFOHANDCHECK, + /* used to maintain a count of the above fields */ + DISPC_MGR_FLD_NUM, +}; + +struct dispc_reg_field { + u16 reg; + u8 high; + u8 low; +}; + +static const struct { + const char *name; + u32 vsync_irq; + u32 framedone_irq; + u32 sync_lost_irq; + struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM]; +} mgr_desc[] = { + [OMAP_DSS_CHANNEL_LCD] = { + .name = "LCD", + .vsync_irq = DISPC_IRQ_VSYNC, + .framedone_irq = DISPC_IRQ_FRAMEDONE, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_DIGIT] = { + .name = "DIGIT", + .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, + .framedone_irq = DISPC_IRQ_FRAMEDONETV, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, + [DISPC_MGR_FLD_STNTFT] = { }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 6, 6 }, + [DISPC_MGR_FLD_TFTDATALINES] = { }, + [DISPC_MGR_FLD_STALLMODE] = { }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 12, 12 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 13, 13 }, + [DISPC_MGR_FLD_CPR] = { }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD2] = { + .name = "LCD2", + .vsync_irq = DISPC_IRQ_VSYNC2, + .framedone_irq = DISPC_IRQ_FRAMEDONE2, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL2, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL2, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL2, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG2, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG2, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG2, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG2, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD3] = { + .name = "LCD3", + .vsync_irq = DISPC_IRQ_VSYNC3, + .framedone_irq = DISPC_IRQ_FRAMEDONE3, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL3, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL3, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL3, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG3, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG3, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG3, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG3, 16, 16 }, + }, + }, +}; + +struct color_conv_coef { + int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb; + int full_range; +}; + +static unsigned long dispc_fclk_rate(void); +static unsigned long dispc_core_clk_rate(void); +static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); +static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel); + +static unsigned long dispc_plane_pclk_rate(enum omap_plane plane); +static unsigned long dispc_plane_lclk_rate(enum omap_plane plane); + +static inline void dispc_write_reg(const u16 idx, u32 val) +{ + __raw_writel(val, dispc.base + idx); +} + +static inline u32 dispc_read_reg(const u16 idx) +{ + return __raw_readl(dispc.base + idx); +} + +static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld) +{ + const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + return REG_GET(rfld.reg, rfld.high, rfld.low); +} + +static void mgr_fld_write(enum omap_channel channel, + enum mgr_reg_fields regfld, int val) { + const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG; + unsigned long flags; + + if (need_lock) + spin_lock_irqsave(&dispc.control_lock, flags); + + REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low); + + if (need_lock) + spin_unlock_irqrestore(&dispc.control_lock, flags); +} + +#define SR(reg) \ + dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) +#define RR(reg) \ + dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)]) + +static void dispc_save_context(void) +{ + int i, j; + + DSSDBG("dispc_save_context\n"); + + SR(IRQENABLE); + SR(CONTROL); + SR(CONFIG); + SR(LINE_NUMBER); + if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) || + dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) + SR(GLOBAL_ALPHA); + if (dss_has_feature(FEAT_MGR_LCD2)) { + SR(CONTROL2); + SR(CONFIG2); + } + if (dss_has_feature(FEAT_MGR_LCD3)) { + SR(CONTROL3); + SR(CONFIG3); + } + + for (i = 0; i < dss_feat_get_num_mgrs(); i++) { + SR(DEFAULT_COLOR(i)); + SR(TRANS_COLOR(i)); + SR(SIZE_MGR(i)); + if (i == OMAP_DSS_CHANNEL_DIGIT) + continue; + SR(TIMING_H(i)); + SR(TIMING_V(i)); + SR(POL_FREQ(i)); + SR(DIVISORo(i)); + + SR(DATA_CYCLE1(i)); + SR(DATA_CYCLE2(i)); + SR(DATA_CYCLE3(i)); + + if (dss_has_feature(FEAT_CPR)) { + SR(CPR_COEF_R(i)); + SR(CPR_COEF_G(i)); + SR(CPR_COEF_B(i)); + } + } + + for (i = 0; i < dss_feat_get_num_ovls(); i++) { + SR(OVL_BA0(i)); + SR(OVL_BA1(i)); + SR(OVL_POSITION(i)); + SR(OVL_SIZE(i)); + SR(OVL_ATTRIBUTES(i)); + SR(OVL_FIFO_THRESHOLD(i)); + SR(OVL_ROW_INC(i)); + SR(OVL_PIXEL_INC(i)); + if (dss_has_feature(FEAT_PRELOAD)) + SR(OVL_PRELOAD(i)); + if (i == OMAP_DSS_GFX) { + SR(OVL_WINDOW_SKIP(i)); + SR(OVL_TABLE_BA(i)); + continue; + } + SR(OVL_FIR(i)); + SR(OVL_PICTURE_SIZE(i)); + SR(OVL_ACCU0(i)); + SR(OVL_ACCU1(i)); + + for (j = 0; j < 8; j++) + SR(OVL_FIR_COEF_H(i, j)); + + for (j = 0; j < 8; j++) + SR(OVL_FIR_COEF_HV(i, j)); + + for (j = 0; j < 5; j++) + SR(OVL_CONV_COEF(i, j)); + + if (dss_has_feature(FEAT_FIR_COEF_V)) { + for (j = 0; j < 8; j++) + SR(OVL_FIR_COEF_V(i, j)); + } + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + SR(OVL_BA0_UV(i)); + SR(OVL_BA1_UV(i)); + SR(OVL_FIR2(i)); + SR(OVL_ACCU2_0(i)); + SR(OVL_ACCU2_1(i)); + + for (j = 0; j < 8; j++) + SR(OVL_FIR_COEF_H2(i, j)); + + for (j = 0; j < 8; j++) + SR(OVL_FIR_COEF_HV2(i, j)); + + for (j = 0; j < 8; j++) + SR(OVL_FIR_COEF_V2(i, j)); + } + if (dss_has_feature(FEAT_ATTR2)) + SR(OVL_ATTRIBUTES2(i)); + } + + if (dss_has_feature(FEAT_CORE_CLK_DIV)) + SR(DIVISOR); + + dispc.ctx_valid = true; + + DSSDBG("context saved\n"); +} + +static void dispc_restore_context(void) +{ + int i, j; + + DSSDBG("dispc_restore_context\n"); + + if (!dispc.ctx_valid) + return; + + /*RR(IRQENABLE);*/ + /*RR(CONTROL);*/ + RR(CONFIG); + RR(LINE_NUMBER); + if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) || + dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) + RR(GLOBAL_ALPHA); + if (dss_has_feature(FEAT_MGR_LCD2)) + RR(CONFIG2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONFIG3); + + for (i = 0; i < dss_feat_get_num_mgrs(); i++) { + RR(DEFAULT_COLOR(i)); + RR(TRANS_COLOR(i)); + RR(SIZE_MGR(i)); + if (i == OMAP_DSS_CHANNEL_DIGIT) + continue; + RR(TIMING_H(i)); + RR(TIMING_V(i)); + RR(POL_FREQ(i)); + RR(DIVISORo(i)); + + RR(DATA_CYCLE1(i)); + RR(DATA_CYCLE2(i)); + RR(DATA_CYCLE3(i)); + + if (dss_has_feature(FEAT_CPR)) { + RR(CPR_COEF_R(i)); + RR(CPR_COEF_G(i)); + RR(CPR_COEF_B(i)); + } + } + + for (i = 0; i < dss_feat_get_num_ovls(); i++) { + RR(OVL_BA0(i)); + RR(OVL_BA1(i)); + RR(OVL_POSITION(i)); + RR(OVL_SIZE(i)); + RR(OVL_ATTRIBUTES(i)); + RR(OVL_FIFO_THRESHOLD(i)); + RR(OVL_ROW_INC(i)); + RR(OVL_PIXEL_INC(i)); + if (dss_has_feature(FEAT_PRELOAD)) + RR(OVL_PRELOAD(i)); + if (i == OMAP_DSS_GFX) { + RR(OVL_WINDOW_SKIP(i)); + RR(OVL_TABLE_BA(i)); + continue; + } + RR(OVL_FIR(i)); + RR(OVL_PICTURE_SIZE(i)); + RR(OVL_ACCU0(i)); + RR(OVL_ACCU1(i)); + + for (j = 0; j < 8; j++) + RR(OVL_FIR_COEF_H(i, j)); + + for (j = 0; j < 8; j++) + RR(OVL_FIR_COEF_HV(i, j)); + + for (j = 0; j < 5; j++) + RR(OVL_CONV_COEF(i, j)); + + if (dss_has_feature(FEAT_FIR_COEF_V)) { + for (j = 0; j < 8; j++) + RR(OVL_FIR_COEF_V(i, j)); + } + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + RR(OVL_BA0_UV(i)); + RR(OVL_BA1_UV(i)); + RR(OVL_FIR2(i)); + RR(OVL_ACCU2_0(i)); + RR(OVL_ACCU2_1(i)); + + for (j = 0; j < 8; j++) + RR(OVL_FIR_COEF_H2(i, j)); + + for (j = 0; j < 8; j++) + RR(OVL_FIR_COEF_HV2(i, j)); + + for (j = 0; j < 8; j++) + RR(OVL_FIR_COEF_V2(i, j)); + } + if (dss_has_feature(FEAT_ATTR2)) + RR(OVL_ATTRIBUTES2(i)); + } + + if (dss_has_feature(FEAT_CORE_CLK_DIV)) + RR(DIVISOR); + + /* enable last, because LCD & DIGIT enable are here */ + RR(CONTROL); + if (dss_has_feature(FEAT_MGR_LCD2)) + RR(CONTROL2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONTROL3); + /* clear spurious SYNC_LOST_DIGIT interrupts */ + dispc_clear_irqstatus(DISPC_IRQ_SYNC_LOST_DIGIT); + + /* + * enable last so IRQs won't trigger before + * the context is fully restored + */ + RR(IRQENABLE); + + DSSDBG("context restored\n"); +} + +#undef SR +#undef RR + +int dispc_runtime_get(void) +{ + int r; + + DSSDBG("dispc_runtime_get\n"); + + r = pm_runtime_get_sync(&dispc.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} +EXPORT_SYMBOL(dispc_runtime_get); + +void dispc_runtime_put(void) +{ + int r; + + DSSDBG("dispc_runtime_put\n"); + + r = pm_runtime_put_sync(&dispc.pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} +EXPORT_SYMBOL(dispc_runtime_put); + +u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) +{ + return mgr_desc[channel].vsync_irq; +} +EXPORT_SYMBOL(dispc_mgr_get_vsync_irq); + +u32 dispc_mgr_get_framedone_irq(enum omap_channel channel) +{ + if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc.feat->no_framedone_tv) + return 0; + + return mgr_desc[channel].framedone_irq; +} +EXPORT_SYMBOL(dispc_mgr_get_framedone_irq); + +u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel) +{ + return mgr_desc[channel].sync_lost_irq; +} +EXPORT_SYMBOL(dispc_mgr_get_sync_lost_irq); + +u32 dispc_wb_get_framedone_irq(void) +{ + return DISPC_IRQ_FRAMEDONEWB; +} + +bool dispc_mgr_go_busy(enum omap_channel channel) +{ + return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; +} +EXPORT_SYMBOL(dispc_mgr_go_busy); + +void dispc_mgr_go(enum omap_channel channel) +{ + WARN_ON(!dispc_mgr_is_enabled(channel)); + WARN_ON(dispc_mgr_go_busy(channel)); + + DSSDBG("GO %s\n", mgr_desc[channel].name); + + mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1); +} +EXPORT_SYMBOL(dispc_mgr_go); + +bool dispc_wb_go_busy(void) +{ + return REG_GET(DISPC_CONTROL2, 6, 6) == 1; +} + +void dispc_wb_go(void) +{ + enum omap_plane plane = OMAP_DSS_WB; + bool enable, go; + + enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1; + + if (!enable) + return; + + go = REG_GET(DISPC_CONTROL2, 6, 6) == 1; + if (go) { + DSSERR("GO bit not down for WB\n"); + return; + } + + REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6); +} + +static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value) +{ + dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value); +} + +static void dispc_ovl_write_firhv_reg(enum omap_plane plane, int reg, u32 value) +{ + dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value); +} + +static void dispc_ovl_write_firv_reg(enum omap_plane plane, int reg, u32 value) +{ + dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value); +} + +static void dispc_ovl_write_firh2_reg(enum omap_plane plane, int reg, u32 value) +{ + BUG_ON(plane == OMAP_DSS_GFX); + + dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value); +} + +static void dispc_ovl_write_firhv2_reg(enum omap_plane plane, int reg, + u32 value) +{ + BUG_ON(plane == OMAP_DSS_GFX); + + dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value); +} + +static void dispc_ovl_write_firv2_reg(enum omap_plane plane, int reg, u32 value) +{ + BUG_ON(plane == OMAP_DSS_GFX); + + dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value); +} + +static void dispc_ovl_set_scale_coef(enum omap_plane plane, int fir_hinc, + int fir_vinc, int five_taps, + enum omap_color_component color_comp) +{ + const struct dispc_coef *h_coef, *v_coef; + int i; + + h_coef = dispc_ovl_get_scale_coef(fir_hinc, true); + v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps); + + for (i = 0; i < 8; i++) { + u32 h, hv; + + h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0) + | FLD_VAL(h_coef[i].hc1_vc0, 15, 8) + | FLD_VAL(h_coef[i].hc2_vc1, 23, 16) + | FLD_VAL(h_coef[i].hc3_vc2, 31, 24); + hv = FLD_VAL(h_coef[i].hc4_vc22, 7, 0) + | FLD_VAL(v_coef[i].hc1_vc0, 15, 8) + | FLD_VAL(v_coef[i].hc2_vc1, 23, 16) + | FLD_VAL(v_coef[i].hc3_vc2, 31, 24); + + if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) { + dispc_ovl_write_firh_reg(plane, i, h); + dispc_ovl_write_firhv_reg(plane, i, hv); + } else { + dispc_ovl_write_firh2_reg(plane, i, h); + dispc_ovl_write_firhv2_reg(plane, i, hv); + } + + } + + if (five_taps) { + for (i = 0; i < 8; i++) { + u32 v; + v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0) + | FLD_VAL(v_coef[i].hc4_vc22, 15, 8); + if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) + dispc_ovl_write_firv_reg(plane, i, v); + else + dispc_ovl_write_firv2_reg(plane, i, v); + } + } +} + + +static void dispc_ovl_write_color_conv_coef(enum omap_plane plane, + const struct color_conv_coef *ct) +{ +#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0)) + + dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry)); + dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb)); + dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr)); + dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by)); + dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb)); + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11); + +#undef CVAL +} + +static void dispc_setup_color_conv_coef(void) +{ + int i; + int num_ovl = dss_feat_get_num_ovls(); + const struct color_conv_coef ctbl_bt601_5_ovl = { + /* YUV -> RGB */ + 298, 409, 0, 298, -208, -100, 298, 0, 517, 0, + }; + const struct color_conv_coef ctbl_bt601_5_wb = { + /* RGB -> YUV */ + 66, 129, 25, 112, -94, -18, -38, -74, 112, 0, + }; + + for (i = 1; i < num_ovl; i++) + dispc_ovl_write_color_conv_coef(i, &ctbl_bt601_5_ovl); + + if (dispc.feat->has_writeback) + dispc_ovl_write_color_conv_coef(OMAP_DSS_WB, &ctbl_bt601_5_wb); +} + +static void dispc_ovl_set_ba0(enum omap_plane plane, u32 paddr) +{ + dispc_write_reg(DISPC_OVL_BA0(plane), paddr); +} + +static void dispc_ovl_set_ba1(enum omap_plane plane, u32 paddr) +{ + dispc_write_reg(DISPC_OVL_BA1(plane), paddr); +} + +static void dispc_ovl_set_ba0_uv(enum omap_plane plane, u32 paddr) +{ + dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr); +} + +static void dispc_ovl_set_ba1_uv(enum omap_plane plane, u32 paddr) +{ + dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr); +} + +static void dispc_ovl_set_pos(enum omap_plane plane, + enum omap_overlay_caps caps, int x, int y) +{ + u32 val; + + if ((caps & OMAP_DSS_OVL_CAP_POS) == 0) + return; + + val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0); + + dispc_write_reg(DISPC_OVL_POSITION(plane), val); +} + +static void dispc_ovl_set_input_size(enum omap_plane plane, int width, + int height) +{ + u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); + + if (plane == OMAP_DSS_GFX || plane == OMAP_DSS_WB) + dispc_write_reg(DISPC_OVL_SIZE(plane), val); + else + dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val); +} + +static void dispc_ovl_set_output_size(enum omap_plane plane, int width, + int height) +{ + u32 val; + + BUG_ON(plane == OMAP_DSS_GFX); + + val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); + + if (plane == OMAP_DSS_WB) + dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val); + else + dispc_write_reg(DISPC_OVL_SIZE(plane), val); +} + +static void dispc_ovl_set_zorder(enum omap_plane plane, + enum omap_overlay_caps caps, u8 zorder) +{ + if ((caps & OMAP_DSS_OVL_CAP_ZORDER) == 0) + return; + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26); +} + +static void dispc_ovl_enable_zorder_planes(void) +{ + int i; + + if (!dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) + return; + + for (i = 0; i < dss_feat_get_num_ovls(); i++) + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25); +} + +static void dispc_ovl_set_pre_mult_alpha(enum omap_plane plane, + enum omap_overlay_caps caps, bool enable) +{ + if ((caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0) + return; + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28); +} + +static void dispc_ovl_setup_global_alpha(enum omap_plane plane, + enum omap_overlay_caps caps, u8 global_alpha) +{ + static const unsigned shifts[] = { 0, 8, 16, 24, }; + int shift; + + if ((caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0) + return; + + shift = shifts[plane]; + REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift); +} + +static void dispc_ovl_set_pix_inc(enum omap_plane plane, s32 inc) +{ + dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc); +} + +static void dispc_ovl_set_row_inc(enum omap_plane plane, s32 inc) +{ + dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc); +} + +static void dispc_ovl_set_color_mode(enum omap_plane plane, + enum omap_color_mode color_mode) +{ + u32 m = 0; + if (plane != OMAP_DSS_GFX) { + switch (color_mode) { + case OMAP_DSS_COLOR_NV12: + m = 0x0; break; + case OMAP_DSS_COLOR_RGBX16: + m = 0x1; break; + case OMAP_DSS_COLOR_RGBA16: + m = 0x2; break; + case OMAP_DSS_COLOR_RGB12U: + m = 0x4; break; + case OMAP_DSS_COLOR_ARGB16: + m = 0x5; break; + case OMAP_DSS_COLOR_RGB16: + m = 0x6; break; + case OMAP_DSS_COLOR_ARGB16_1555: + m = 0x7; break; + case OMAP_DSS_COLOR_RGB24U: + m = 0x8; break; + case OMAP_DSS_COLOR_RGB24P: + m = 0x9; break; + case OMAP_DSS_COLOR_YUV2: + m = 0xa; break; + case OMAP_DSS_COLOR_UYVY: + m = 0xb; break; + case OMAP_DSS_COLOR_ARGB32: + m = 0xc; break; + case OMAP_DSS_COLOR_RGBA32: + m = 0xd; break; + case OMAP_DSS_COLOR_RGBX32: + m = 0xe; break; + case OMAP_DSS_COLOR_XRGB16_1555: + m = 0xf; break; + default: + BUG(); return; + } + } else { + switch (color_mode) { + case OMAP_DSS_COLOR_CLUT1: + m = 0x0; break; + case OMAP_DSS_COLOR_CLUT2: + m = 0x1; break; + case OMAP_DSS_COLOR_CLUT4: + m = 0x2; break; + case OMAP_DSS_COLOR_CLUT8: + m = 0x3; break; + case OMAP_DSS_COLOR_RGB12U: + m = 0x4; break; + case OMAP_DSS_COLOR_ARGB16: + m = 0x5; break; + case OMAP_DSS_COLOR_RGB16: + m = 0x6; break; + case OMAP_DSS_COLOR_ARGB16_1555: + m = 0x7; break; + case OMAP_DSS_COLOR_RGB24U: + m = 0x8; break; + case OMAP_DSS_COLOR_RGB24P: + m = 0x9; break; + case OMAP_DSS_COLOR_RGBX16: + m = 0xa; break; + case OMAP_DSS_COLOR_RGBA16: + m = 0xb; break; + case OMAP_DSS_COLOR_ARGB32: + m = 0xc; break; + case OMAP_DSS_COLOR_RGBA32: + m = 0xd; break; + case OMAP_DSS_COLOR_RGBX32: + m = 0xe; break; + case OMAP_DSS_COLOR_XRGB16_1555: + m = 0xf; break; + default: + BUG(); return; + } + } + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); +} + +static void dispc_ovl_configure_burst_type(enum omap_plane plane, + enum omap_dss_rotation_type rotation_type) +{ + if (dss_has_feature(FEAT_BURST_2D) == 0) + return; + + if (rotation_type == OMAP_DSS_ROT_TILER) + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29); + else + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29); +} + +void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) +{ + int shift; + u32 val; + int chan = 0, chan2 = 0; + + switch (plane) { + case OMAP_DSS_GFX: + shift = 8; + break; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + case OMAP_DSS_VIDEO3: + shift = 16; + break; + default: + BUG(); + return; + } + + val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); + if (dss_has_feature(FEAT_MGR_LCD2)) { + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + chan = 0; + chan2 = 0; + break; + case OMAP_DSS_CHANNEL_DIGIT: + chan = 1; + chan2 = 0; + break; + case OMAP_DSS_CHANNEL_LCD2: + chan = 0; + chan2 = 1; + break; + case OMAP_DSS_CHANNEL_LCD3: + if (dss_has_feature(FEAT_MGR_LCD3)) { + chan = 0; + chan2 = 2; + } else { + BUG(); + return; + } + break; + case OMAP_DSS_CHANNEL_WB: + chan = 0; + chan2 = 3; + break; + default: + BUG(); + return; + } + + val = FLD_MOD(val, chan, shift, shift); + val = FLD_MOD(val, chan2, 31, 30); + } else { + val = FLD_MOD(val, channel, shift, shift); + } + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); +} +EXPORT_SYMBOL(dispc_ovl_set_channel_out); + +static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane) +{ + int shift; + u32 val; + + switch (plane) { + case OMAP_DSS_GFX: + shift = 8; + break; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + case OMAP_DSS_VIDEO3: + shift = 16; + break; + default: + BUG(); + return 0; + } + + val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); + + if (FLD_GET(val, shift, shift) == 1) + return OMAP_DSS_CHANNEL_DIGIT; + + if (!dss_has_feature(FEAT_MGR_LCD2)) + return OMAP_DSS_CHANNEL_LCD; + + switch (FLD_GET(val, 31, 30)) { + case 0: + default: + return OMAP_DSS_CHANNEL_LCD; + case 1: + return OMAP_DSS_CHANNEL_LCD2; + case 2: + return OMAP_DSS_CHANNEL_LCD3; + case 3: + return OMAP_DSS_CHANNEL_WB; + } +} + +void dispc_wb_set_channel_in(enum dss_writeback_channel channel) +{ + enum omap_plane plane = OMAP_DSS_WB; + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16); +} + +static void dispc_ovl_set_burst_size(enum omap_plane plane, + enum omap_burst_size burst_size) +{ + static const unsigned shifts[] = { 6, 14, 14, 14, 14, }; + int shift; + + shift = shifts[plane]; + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift); +} + +static void dispc_configure_burst_sizes(void) +{ + int i; + const int burst_size = BURST_SIZE_X8; + + /* Configure burst size always to maximum size */ + for (i = 0; i < dss_feat_get_num_ovls(); ++i) + dispc_ovl_set_burst_size(i, burst_size); + if (dispc.feat->has_writeback) + dispc_ovl_set_burst_size(OMAP_DSS_WB, burst_size); +} + +static u32 dispc_ovl_get_burst_size(enum omap_plane plane) +{ + unsigned unit = dss_feat_get_burst_size_unit(); + /* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */ + return unit * 8; +} + +void dispc_enable_gamma_table(bool enable) +{ + /* + * This is partially implemented to support only disabling of + * the gamma table. + */ + if (enable) { + DSSWARN("Gamma table enabling for TV not yet supported"); + return; + } + + REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); +} + +static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) +{ + if (channel == OMAP_DSS_CHANNEL_DIGIT) + return; + + mgr_fld_write(channel, DISPC_MGR_FLD_CPR, enable); +} + +static void dispc_mgr_set_cpr_coef(enum omap_channel channel, + const struct omap_dss_cpr_coefs *coefs) +{ + u32 coef_r, coef_g, coef_b; + + if (!dss_mgr_is_lcd(channel)) + return; + + coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) | + FLD_VAL(coefs->rb, 9, 0); + coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) | + FLD_VAL(coefs->gb, 9, 0); + coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) | + FLD_VAL(coefs->bb, 9, 0); + + dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r); + dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g); + dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b); +} + +static void dispc_ovl_set_vid_color_conv(enum omap_plane plane, bool enable) +{ + u32 val; + + BUG_ON(plane == OMAP_DSS_GFX); + + val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); + val = FLD_MOD(val, enable, 9, 9); + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); +} + +static void dispc_ovl_enable_replication(enum omap_plane plane, + enum omap_overlay_caps caps, bool enable) +{ + static const unsigned shifts[] = { 5, 10, 10, 10 }; + int shift; + + if ((caps & OMAP_DSS_OVL_CAP_REPLICATION) == 0) + return; + + shift = shifts[plane]; + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift); +} + +static void dispc_mgr_set_size(enum omap_channel channel, u16 width, + u16 height) +{ + u32 val; + + val = FLD_VAL(height - 1, dispc.feat->mgr_height_start, 16) | + FLD_VAL(width - 1, dispc.feat->mgr_width_start, 0); + + dispc_write_reg(DISPC_SIZE_MGR(channel), val); +} + +static void dispc_init_fifos(void) +{ + u32 size; + int fifo; + u8 start, end; + u32 unit; + int i; + + unit = dss_feat_get_buffer_size_unit(); + + dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); + + for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) { + size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(fifo), start, end); + size *= unit; + dispc.fifo_size[fifo] = size; + + /* + * By default fifos are mapped directly to overlays, fifo 0 to + * ovl 0, fifo 1 to ovl 1, etc. + */ + dispc.fifo_assignment[fifo] = fifo; + } + + /* + * The GFX fifo on OMAP4 is smaller than the other fifos. The small fifo + * causes problems with certain use cases, like using the tiler in 2D + * mode. The below hack swaps the fifos of GFX and WB planes, thus + * giving GFX plane a larger fifo. WB but should work fine with a + * smaller fifo. + */ + if (dispc.feat->gfx_fifo_workaround) { + u32 v; + + v = dispc_read_reg(DISPC_GLOBAL_BUFFER); + + v = FLD_MOD(v, 4, 2, 0); /* GFX BUF top to WB */ + v = FLD_MOD(v, 4, 5, 3); /* GFX BUF bottom to WB */ + v = FLD_MOD(v, 0, 26, 24); /* WB BUF top to GFX */ + v = FLD_MOD(v, 0, 29, 27); /* WB BUF bottom to GFX */ + + dispc_write_reg(DISPC_GLOBAL_BUFFER, v); + + dispc.fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB; + dispc.fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX; + } + + /* + * Setup default fifo thresholds. + */ + for (i = 0; i < dss_feat_get_num_ovls(); ++i) { + u32 low, high; + const bool use_fifomerge = false; + const bool manual_update = false; + + dispc_ovl_compute_fifo_thresholds(i, &low, &high, + use_fifomerge, manual_update); + + dispc_ovl_set_fifo_threshold(i, low, high); + } + + if (dispc.feat->has_writeback) { + u32 low, high; + const bool use_fifomerge = false; + const bool manual_update = false; + + dispc_ovl_compute_fifo_thresholds(OMAP_DSS_WB, &low, &high, + use_fifomerge, manual_update); + + dispc_ovl_set_fifo_threshold(OMAP_DSS_WB, low, high); + } +} + +static u32 dispc_ovl_get_fifo_size(enum omap_plane plane) +{ + int fifo; + u32 size = 0; + + for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) { + if (dispc.fifo_assignment[fifo] == plane) + size += dispc.fifo_size[fifo]; + } + + return size; +} + +void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high) +{ + u8 hi_start, hi_end, lo_start, lo_end; + u32 unit; + + unit = dss_feat_get_buffer_size_unit(); + + WARN_ON(low % unit != 0); + WARN_ON(high % unit != 0); + + low /= unit; + high /= unit; + + dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); + dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end); + + DSSDBG("fifo(%d) threshold (bytes), old %u/%u, new %u/%u\n", + plane, + REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), + lo_start, lo_end) * unit, + REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), + hi_start, hi_end) * unit, + low * unit, high * unit); + + dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane), + FLD_VAL(high, hi_start, hi_end) | + FLD_VAL(low, lo_start, lo_end)); + + /* + * configure the preload to the pipeline's high threhold, if HT it's too + * large for the preload field, set the threshold to the maximum value + * that can be held by the preload register + */ + if (dss_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload && + plane != OMAP_DSS_WB) + dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu)); +} + +void dispc_enable_fifomerge(bool enable) +{ + if (!dss_has_feature(FEAT_FIFO_MERGE)) { + WARN_ON(enable); + return; + } + + DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); + REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14); +} + +void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, + u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, + bool manual_update) +{ + /* + * All sizes are in bytes. Both the buffer and burst are made of + * buffer_units, and the fifo thresholds must be buffer_unit aligned. + */ + + unsigned buf_unit = dss_feat_get_buffer_size_unit(); + unsigned ovl_fifo_size, total_fifo_size, burst_size; + int i; + + burst_size = dispc_ovl_get_burst_size(plane); + ovl_fifo_size = dispc_ovl_get_fifo_size(plane); + + if (use_fifomerge) { + total_fifo_size = 0; + for (i = 0; i < dss_feat_get_num_ovls(); ++i) + total_fifo_size += dispc_ovl_get_fifo_size(i); + } else { + total_fifo_size = ovl_fifo_size; + } + + /* + * We use the same low threshold for both fifomerge and non-fifomerge + * cases, but for fifomerge we calculate the high threshold using the + * combined fifo size + */ + + if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) { + *fifo_low = ovl_fifo_size - burst_size * 2; + *fifo_high = total_fifo_size - burst_size; + } else if (plane == OMAP_DSS_WB) { + /* + * Most optimal configuration for writeback is to push out data + * to the interconnect the moment writeback pushes enough pixels + * in the FIFO to form a burst + */ + *fifo_low = 0; + *fifo_high = burst_size; + } else { + *fifo_low = ovl_fifo_size - burst_size; + *fifo_high = total_fifo_size - buf_unit; + } +} + +static void dispc_ovl_set_mflag(enum omap_plane plane, bool enable) +{ + int bit; + + if (plane == OMAP_DSS_GFX) + bit = 14; + else + bit = 23; + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit); +} + +static void dispc_ovl_set_mflag_threshold(enum omap_plane plane, + int low, int high) +{ + dispc_write_reg(DISPC_OVL_MFLAG_THRESHOLD(plane), + FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0)); +} + +static void dispc_init_mflag(void) +{ + int i; + + /* + * HACK: NV12 color format and MFLAG seem to have problems working + * together: using two displays, and having an NV12 overlay on one of + * the displays will cause underflows/synclosts when MFLAG_CTRL=2. + * Changing MFLAG thresholds and PRELOAD to certain values seem to + * remove the errors, but there doesn't seem to be a clear logic on + * which values work and which not. + * + * As a work-around, set force MFLAG to always on. + */ + dispc_write_reg(DISPC_GLOBAL_MFLAG_ATTRIBUTE, + (1 << 0) | /* MFLAG_CTRL = force always on */ + (0 << 2)); /* MFLAG_START = disable */ + + for (i = 0; i < dss_feat_get_num_ovls(); ++i) { + u32 size = dispc_ovl_get_fifo_size(i); + u32 unit = dss_feat_get_buffer_size_unit(); + u32 low, high; + + dispc_ovl_set_mflag(i, true); + + /* + * Simulation team suggests below thesholds: + * HT = fifosize * 5 / 8; + * LT = fifosize * 4 / 8; + */ + + low = size * 4 / 8 / unit; + high = size * 5 / 8 / unit; + + dispc_ovl_set_mflag_threshold(i, low, high); + } + + if (dispc.feat->has_writeback) { + u32 size = dispc_ovl_get_fifo_size(OMAP_DSS_WB); + u32 unit = dss_feat_get_buffer_size_unit(); + u32 low, high; + + dispc_ovl_set_mflag(OMAP_DSS_WB, true); + + /* + * Simulation team suggests below thesholds: + * HT = fifosize * 5 / 8; + * LT = fifosize * 4 / 8; + */ + + low = size * 4 / 8 / unit; + high = size * 5 / 8 / unit; + + dispc_ovl_set_mflag_threshold(OMAP_DSS_WB, low, high); + } +} + +static void dispc_ovl_set_fir(enum omap_plane plane, + int hinc, int vinc, + enum omap_color_component color_comp) +{ + u32 val; + + if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) { + u8 hinc_start, hinc_end, vinc_start, vinc_end; + + dss_feat_get_reg_field(FEAT_REG_FIRHINC, + &hinc_start, &hinc_end); + dss_feat_get_reg_field(FEAT_REG_FIRVINC, + &vinc_start, &vinc_end); + val = FLD_VAL(vinc, vinc_start, vinc_end) | + FLD_VAL(hinc, hinc_start, hinc_end); + + dispc_write_reg(DISPC_OVL_FIR(plane), val); + } else { + val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0); + dispc_write_reg(DISPC_OVL_FIR2(plane), val); + } +} + +static void dispc_ovl_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu) +{ + u32 val; + u8 hor_start, hor_end, vert_start, vert_end; + + dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); + dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); + + val = FLD_VAL(vaccu, vert_start, vert_end) | + FLD_VAL(haccu, hor_start, hor_end); + + dispc_write_reg(DISPC_OVL_ACCU0(plane), val); +} + +static void dispc_ovl_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu) +{ + u32 val; + u8 hor_start, hor_end, vert_start, vert_end; + + dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); + dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); + + val = FLD_VAL(vaccu, vert_start, vert_end) | + FLD_VAL(haccu, hor_start, hor_end); + + dispc_write_reg(DISPC_OVL_ACCU1(plane), val); +} + +static void dispc_ovl_set_vid_accu2_0(enum omap_plane plane, int haccu, + int vaccu) +{ + u32 val; + + val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0); + dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val); +} + +static void dispc_ovl_set_vid_accu2_1(enum omap_plane plane, int haccu, + int vaccu) +{ + u32 val; + + val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0); + dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val); +} + +static void dispc_ovl_set_scale_param(enum omap_plane plane, + u16 orig_width, u16 orig_height, + u16 out_width, u16 out_height, + bool five_taps, u8 rotation, + enum omap_color_component color_comp) +{ + int fir_hinc, fir_vinc; + + fir_hinc = 1024 * orig_width / out_width; + fir_vinc = 1024 * orig_height / out_height; + + dispc_ovl_set_scale_coef(plane, fir_hinc, fir_vinc, five_taps, + color_comp); + dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp); +} + +static void dispc_ovl_set_accu_uv(enum omap_plane plane, + u16 orig_width, u16 orig_height, u16 out_width, u16 out_height, + bool ilace, enum omap_color_mode color_mode, u8 rotation) +{ + int h_accu2_0, h_accu2_1; + int v_accu2_0, v_accu2_1; + int chroma_hinc, chroma_vinc; + int idx; + + struct accu { + s8 h0_m, h0_n; + s8 h1_m, h1_n; + s8 v0_m, v0_n; + s8 v1_m, v1_n; + }; + + const struct accu *accu_table; + const struct accu *accu_val; + + static const struct accu accu_nv12[4] = { + { 0, 1, 0, 1 , -1, 2, 0, 1 }, + { 1, 2, -3, 4 , 0, 1, 0, 1 }, + { -1, 1, 0, 1 , -1, 2, 0, 1 }, + { -1, 2, -1, 2 , -1, 1, 0, 1 }, + }; + + static const struct accu accu_nv12_ilace[4] = { + { 0, 1, 0, 1 , -3, 4, -1, 4 }, + { -1, 4, -3, 4 , 0, 1, 0, 1 }, + { -1, 1, 0, 1 , -1, 4, -3, 4 }, + { -3, 4, -3, 4 , -1, 1, 0, 1 }, + }; + + static const struct accu accu_yuv[4] = { + { 0, 1, 0, 1, 0, 1, 0, 1 }, + { 0, 1, 0, 1, 0, 1, 0, 1 }, + { -1, 1, 0, 1, 0, 1, 0, 1 }, + { 0, 1, 0, 1, -1, 1, 0, 1 }, + }; + + switch (rotation) { + case OMAP_DSS_ROT_0: + idx = 0; + break; + case OMAP_DSS_ROT_90: + idx = 1; + break; + case OMAP_DSS_ROT_180: + idx = 2; + break; + case OMAP_DSS_ROT_270: + idx = 3; + break; + default: + BUG(); + return; + } + + switch (color_mode) { + case OMAP_DSS_COLOR_NV12: + if (ilace) + accu_table = accu_nv12_ilace; + else + accu_table = accu_nv12; + break; + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + accu_table = accu_yuv; + break; + default: + BUG(); + return; + } + + accu_val = &accu_table[idx]; + + chroma_hinc = 1024 * orig_width / out_width; + chroma_vinc = 1024 * orig_height / out_height; + + h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024; + h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024; + v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024; + v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024; + + dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0); + dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1); +} + +static void dispc_ovl_set_scaling_common(enum omap_plane plane, + u16 orig_width, u16 orig_height, + u16 out_width, u16 out_height, + bool ilace, bool five_taps, + bool fieldmode, enum omap_color_mode color_mode, + u8 rotation) +{ + int accu0 = 0; + int accu1 = 0; + u32 l; + + dispc_ovl_set_scale_param(plane, orig_width, orig_height, + out_width, out_height, five_taps, + rotation, DISPC_COLOR_COMPONENT_RGB_Y); + l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); + + /* RESIZEENABLE and VERTICALTAPS */ + l &= ~((0x3 << 5) | (0x1 << 21)); + l |= (orig_width != out_width) ? (1 << 5) : 0; + l |= (orig_height != out_height) ? (1 << 6) : 0; + l |= five_taps ? (1 << 21) : 0; + + /* VRESIZECONF and HRESIZECONF */ + if (dss_has_feature(FEAT_RESIZECONF)) { + l &= ~(0x3 << 7); + l |= (orig_width <= out_width) ? 0 : (1 << 7); + l |= (orig_height <= out_height) ? 0 : (1 << 8); + } + + /* LINEBUFFERSPLIT */ + if (dss_has_feature(FEAT_LINEBUFFERSPLIT)) { + l &= ~(0x1 << 22); + l |= five_taps ? (1 << 22) : 0; + } + + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l); + + /* + * field 0 = even field = bottom field + * field 1 = odd field = top field + */ + if (ilace && !fieldmode) { + accu1 = 0; + accu0 = ((1024 * orig_height / out_height) / 2) & 0x3ff; + if (accu0 >= 1024/2) { + accu1 = 1024/2; + accu0 -= accu1; + } + } + + dispc_ovl_set_vid_accu0(plane, 0, accu0); + dispc_ovl_set_vid_accu1(plane, 0, accu1); +} + +static void dispc_ovl_set_scaling_uv(enum omap_plane plane, + u16 orig_width, u16 orig_height, + u16 out_width, u16 out_height, + bool ilace, bool five_taps, + bool fieldmode, enum omap_color_mode color_mode, + u8 rotation) +{ + int scale_x = out_width != orig_width; + int scale_y = out_height != orig_height; + bool chroma_upscale = plane != OMAP_DSS_WB ? true : false; + + if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) + return; + if ((color_mode != OMAP_DSS_COLOR_YUV2 && + color_mode != OMAP_DSS_COLOR_UYVY && + color_mode != OMAP_DSS_COLOR_NV12)) { + /* reset chroma resampling for RGB formats */ + if (plane != OMAP_DSS_WB) + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8); + return; + } + + dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width, + out_height, ilace, color_mode, rotation); + + switch (color_mode) { + case OMAP_DSS_COLOR_NV12: + if (chroma_upscale) { + /* UV is subsampled by 2 horizontally and vertically */ + orig_height >>= 1; + orig_width >>= 1; + } else { + /* UV is downsampled by 2 horizontally and vertically */ + orig_height <<= 1; + orig_width <<= 1; + } + + break; + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + /* For YUV422 with 90/270 rotation, we don't upsample chroma */ + if (rotation == OMAP_DSS_ROT_0 || + rotation == OMAP_DSS_ROT_180) { + if (chroma_upscale) + /* UV is subsampled by 2 horizontally */ + orig_width >>= 1; + else + /* UV is downsampled by 2 horizontally */ + orig_width <<= 1; + } + + /* must use FIR for YUV422 if rotated */ + if (rotation != OMAP_DSS_ROT_0) + scale_x = scale_y = true; + + break; + default: + BUG(); + return; + } + + if (out_width != orig_width) + scale_x = true; + if (out_height != orig_height) + scale_y = true; + + dispc_ovl_set_scale_param(plane, orig_width, orig_height, + out_width, out_height, five_taps, + rotation, DISPC_COLOR_COMPONENT_UV); + + if (plane != OMAP_DSS_WB) + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), + (scale_x || scale_y) ? 1 : 0, 8, 8); + + /* set H scaling */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5); + /* set V scaling */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6); +} + +static void dispc_ovl_set_scaling(enum omap_plane plane, + u16 orig_width, u16 orig_height, + u16 out_width, u16 out_height, + bool ilace, bool five_taps, + bool fieldmode, enum omap_color_mode color_mode, + u8 rotation) +{ + BUG_ON(plane == OMAP_DSS_GFX); + + dispc_ovl_set_scaling_common(plane, + orig_width, orig_height, + out_width, out_height, + ilace, five_taps, + fieldmode, color_mode, + rotation); + + dispc_ovl_set_scaling_uv(plane, + orig_width, orig_height, + out_width, out_height, + ilace, five_taps, + fieldmode, color_mode, + rotation); +} + +static void dispc_ovl_set_rotation_attrs(enum omap_plane plane, u8 rotation, + enum omap_dss_rotation_type rotation_type, + bool mirroring, enum omap_color_mode color_mode) +{ + bool row_repeat = false; + int vidrot = 0; + + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) { + + if (mirroring) { + switch (rotation) { + case OMAP_DSS_ROT_0: + vidrot = 2; + break; + case OMAP_DSS_ROT_90: + vidrot = 1; + break; + case OMAP_DSS_ROT_180: + vidrot = 0; + break; + case OMAP_DSS_ROT_270: + vidrot = 3; + break; + } + } else { + switch (rotation) { + case OMAP_DSS_ROT_0: + vidrot = 0; + break; + case OMAP_DSS_ROT_90: + vidrot = 1; + break; + case OMAP_DSS_ROT_180: + vidrot = 2; + break; + case OMAP_DSS_ROT_270: + vidrot = 3; + break; + } + } + + if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270) + row_repeat = true; + else + row_repeat = false; + } + + /* + * OMAP4/5 Errata i631: + * NV12 in 1D mode must use ROTATION=1. Otherwise DSS will fetch extra + * rows beyond the framebuffer, which may cause OCP error. + */ + if (color_mode == OMAP_DSS_COLOR_NV12 && + rotation_type != OMAP_DSS_ROT_TILER) + vidrot = 1; + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12); + if (dss_has_feature(FEAT_ROWREPEATENABLE)) + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), + row_repeat ? 1 : 0, 18, 18); + + if (color_mode == OMAP_DSS_COLOR_NV12) { + bool doublestride = (rotation_type == OMAP_DSS_ROT_TILER) && + (rotation == OMAP_DSS_ROT_0 || + rotation == OMAP_DSS_ROT_180); + /* DOUBLESTRIDE */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22); + } + +} + +static int color_mode_to_bpp(enum omap_color_mode color_mode) +{ + switch (color_mode) { + case OMAP_DSS_COLOR_CLUT1: + return 1; + case OMAP_DSS_COLOR_CLUT2: + return 2; + case OMAP_DSS_COLOR_CLUT4: + return 4; + case OMAP_DSS_COLOR_CLUT8: + case OMAP_DSS_COLOR_NV12: + return 8; + case OMAP_DSS_COLOR_RGB12U: + case OMAP_DSS_COLOR_RGB16: + case OMAP_DSS_COLOR_ARGB16: + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + case OMAP_DSS_COLOR_RGBA16: + case OMAP_DSS_COLOR_RGBX16: + case OMAP_DSS_COLOR_ARGB16_1555: + case OMAP_DSS_COLOR_XRGB16_1555: + return 16; + case OMAP_DSS_COLOR_RGB24P: + return 24; + case OMAP_DSS_COLOR_RGB24U: + case OMAP_DSS_COLOR_ARGB32: + case OMAP_DSS_COLOR_RGBA32: + case OMAP_DSS_COLOR_RGBX32: + return 32; + default: + BUG(); + return 0; + } +} + +static s32 pixinc(int pixels, u8 ps) +{ + if (pixels == 1) + return 1; + else if (pixels > 1) + return 1 + (pixels - 1) * ps; + else if (pixels < 0) + return 1 - (-pixels + 1) * ps; + else + BUG(); + return 0; +} + +static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, + u16 screen_width, + u16 width, u16 height, + enum omap_color_mode color_mode, bool fieldmode, + unsigned int field_offset, + unsigned *offset0, unsigned *offset1, + s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim) +{ + u8 ps; + + /* FIXME CLUT formats */ + switch (color_mode) { + case OMAP_DSS_COLOR_CLUT1: + case OMAP_DSS_COLOR_CLUT2: + case OMAP_DSS_COLOR_CLUT4: + case OMAP_DSS_COLOR_CLUT8: + BUG(); + return; + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + ps = 4; + break; + default: + ps = color_mode_to_bpp(color_mode) / 8; + break; + } + + DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width, + width, height); + + /* + * field 0 = even field = bottom field + * field 1 = odd field = top field + */ + switch (rotation + mirror * 4) { + case OMAP_DSS_ROT_0: + case OMAP_DSS_ROT_180: + /* + * If the pixel format is YUV or UYVY divide the width + * of the image by 2 for 0 and 180 degree rotation. + */ + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + width = width >> 1; + case OMAP_DSS_ROT_90: + case OMAP_DSS_ROT_270: + *offset1 = 0; + if (field_offset) + *offset0 = field_offset * screen_width * ps; + else + *offset0 = 0; + + *row_inc = pixinc(1 + + (y_predecim * screen_width - x_predecim * width) + + (fieldmode ? screen_width : 0), ps); + *pix_inc = pixinc(x_predecim, ps); + break; + + case OMAP_DSS_ROT_0 + 4: + case OMAP_DSS_ROT_180 + 4: + /* If the pixel format is YUV or UYVY divide the width + * of the image by 2 for 0 degree and 180 degree + */ + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + width = width >> 1; + case OMAP_DSS_ROT_90 + 4: + case OMAP_DSS_ROT_270 + 4: + *offset1 = 0; + if (field_offset) + *offset0 = field_offset * screen_width * ps; + else + *offset0 = 0; + *row_inc = pixinc(1 - + (y_predecim * screen_width + x_predecim * width) - + (fieldmode ? screen_width : 0), ps); + *pix_inc = pixinc(x_predecim, ps); + break; + + default: + BUG(); + return; + } +} + +static void calc_dma_rotation_offset(u8 rotation, bool mirror, + u16 screen_width, + u16 width, u16 height, + enum omap_color_mode color_mode, bool fieldmode, + unsigned int field_offset, + unsigned *offset0, unsigned *offset1, + s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim) +{ + u8 ps; + u16 fbw, fbh; + + /* FIXME CLUT formats */ + switch (color_mode) { + case OMAP_DSS_COLOR_CLUT1: + case OMAP_DSS_COLOR_CLUT2: + case OMAP_DSS_COLOR_CLUT4: + case OMAP_DSS_COLOR_CLUT8: + BUG(); + return; + default: + ps = color_mode_to_bpp(color_mode) / 8; + break; + } + + DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width, + width, height); + + /* width & height are overlay sizes, convert to fb sizes */ + + if (rotation == OMAP_DSS_ROT_0 || rotation == OMAP_DSS_ROT_180) { + fbw = width; + fbh = height; + } else { + fbw = height; + fbh = width; + } + + /* + * field 0 = even field = bottom field + * field 1 = odd field = top field + */ + switch (rotation + mirror * 4) { + case OMAP_DSS_ROT_0: + *offset1 = 0; + if (field_offset) + *offset0 = *offset1 + field_offset * screen_width * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(1 + + (y_predecim * screen_width - fbw * x_predecim) + + (fieldmode ? screen_width : 0), ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(x_predecim, 2 * ps); + else + *pix_inc = pixinc(x_predecim, ps); + break; + case OMAP_DSS_ROT_90: + *offset1 = screen_width * (fbh - 1) * ps; + if (field_offset) + *offset0 = *offset1 + field_offset * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) + + y_predecim + (fieldmode ? 1 : 0), ps); + *pix_inc = pixinc(-x_predecim * screen_width, ps); + break; + case OMAP_DSS_ROT_180: + *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps; + if (field_offset) + *offset0 = *offset1 - field_offset * screen_width * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(-1 - + (y_predecim * screen_width - fbw * x_predecim) - + (fieldmode ? screen_width : 0), ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(-x_predecim, 2 * ps); + else + *pix_inc = pixinc(-x_predecim, ps); + break; + case OMAP_DSS_ROT_270: + *offset1 = (fbw - 1) * ps; + if (field_offset) + *offset0 = *offset1 - field_offset * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) - + y_predecim - (fieldmode ? 1 : 0), ps); + *pix_inc = pixinc(x_predecim * screen_width, ps); + break; + + /* mirroring */ + case OMAP_DSS_ROT_0 + 4: + *offset1 = (fbw - 1) * ps; + if (field_offset) + *offset0 = *offset1 + field_offset * screen_width * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(y_predecim * screen_width * 2 - 1 + + (fieldmode ? screen_width : 0), + ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(-x_predecim, 2 * ps); + else + *pix_inc = pixinc(-x_predecim, ps); + break; + + case OMAP_DSS_ROT_90 + 4: + *offset1 = 0; + if (field_offset) + *offset0 = *offset1 + field_offset * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) + + y_predecim + (fieldmode ? 1 : 0), + ps); + *pix_inc = pixinc(x_predecim * screen_width, ps); + break; + + case OMAP_DSS_ROT_180 + 4: + *offset1 = screen_width * (fbh - 1) * ps; + if (field_offset) + *offset0 = *offset1 - field_offset * screen_width * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(1 - y_predecim * screen_width * 2 - + (fieldmode ? screen_width : 0), + ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(x_predecim, 2 * ps); + else + *pix_inc = pixinc(x_predecim, ps); + break; + + case OMAP_DSS_ROT_270 + 4: + *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps; + if (field_offset) + *offset0 = *offset1 - field_offset * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) - + y_predecim - (fieldmode ? 1 : 0), + ps); + *pix_inc = pixinc(-x_predecim * screen_width, ps); + break; + + default: + BUG(); + return; + } +} + +static void calc_tiler_rotation_offset(u16 screen_width, u16 width, + enum omap_color_mode color_mode, bool fieldmode, + unsigned int field_offset, unsigned *offset0, unsigned *offset1, + s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim) +{ + u8 ps; + + switch (color_mode) { + case OMAP_DSS_COLOR_CLUT1: + case OMAP_DSS_COLOR_CLUT2: + case OMAP_DSS_COLOR_CLUT4: + case OMAP_DSS_COLOR_CLUT8: + BUG(); + return; + default: + ps = color_mode_to_bpp(color_mode) / 8; + break; + } + + DSSDBG("scrw %d, width %d\n", screen_width, width); + + /* + * field 0 = even field = bottom field + * field 1 = odd field = top field + */ + *offset1 = 0; + if (field_offset) + *offset0 = *offset1 + field_offset * screen_width * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) + + (fieldmode ? screen_width : 0), ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(x_predecim, 2 * ps); + else + *pix_inc = pixinc(x_predecim, ps); +} + +/* + * This function is used to avoid synclosts in OMAP3, because of some + * undocumented horizontal position and timing related limitations. + */ +static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk, + const struct omap_video_timings *t, u16 pos_x, + u16 width, u16 height, u16 out_width, u16 out_height, + bool five_taps) +{ + const int ds = DIV_ROUND_UP(height, out_height); + unsigned long nonactive; + static const u8 limits[3] = { 8, 10, 20 }; + u64 val, blank; + int i; + + nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width; + + i = 0; + if (out_height < height) + i++; + if (out_width < width) + i++; + blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk); + DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]); + if (blank <= limits[i]) + return -EINVAL; + + /* FIXME add checks for 3-tap filter once the limitations are known */ + if (!five_taps) + return 0; + + /* + * Pixel data should be prepared before visible display point starts. + * So, atleast DS-2 lines must have already been fetched by DISPC + * during nonactive - pos_x period. + */ + val = div_u64((u64)(nonactive - pos_x) * lclk, pclk); + DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n", + val, max(0, ds - 2) * width); + if (val < max(0, ds - 2) * width) + return -EINVAL; + + /* + * All lines need to be refilled during the nonactive period of which + * only one line can be loaded during the active period. So, atleast + * DS - 1 lines should be loaded during nonactive period. + */ + val = div_u64((u64)nonactive * lclk, pclk); + DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n", + val, max(0, ds - 1) * width); + if (val < max(0, ds - 1) * width) + return -EINVAL; + + return 0; +} + +static unsigned long calc_core_clk_five_taps(unsigned long pclk, + const struct omap_video_timings *mgr_timings, u16 width, + u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode) +{ + u32 core_clk = 0; + u64 tmp; + + if (height <= out_height && width <= out_width) + return (unsigned long) pclk; + + if (height > out_height) { + unsigned int ppl = mgr_timings->x_res; + + tmp = (u64)pclk * height * out_width; + do_div(tmp, 2 * out_height * ppl); + core_clk = tmp; + + if (height > 2 * out_height) { + if (ppl == out_width) + return 0; + + tmp = (u64)pclk * (height - 2 * out_height) * out_width; + do_div(tmp, 2 * out_height * (ppl - out_width)); + core_clk = max_t(u32, core_clk, tmp); + } + } + + if (width > out_width) { + tmp = (u64)pclk * width; + do_div(tmp, out_width); + core_clk = max_t(u32, core_clk, tmp); + + if (color_mode == OMAP_DSS_COLOR_RGB24U) + core_clk <<= 1; + } + + return core_clk; +} + +static unsigned long calc_core_clk_24xx(unsigned long pclk, u16 width, + u16 height, u16 out_width, u16 out_height, bool mem_to_mem) +{ + if (height > out_height && width > out_width) + return pclk * 4; + else + return pclk * 2; +} + +static unsigned long calc_core_clk_34xx(unsigned long pclk, u16 width, + u16 height, u16 out_width, u16 out_height, bool mem_to_mem) +{ + unsigned int hf, vf; + + /* + * FIXME how to determine the 'A' factor + * for the no downscaling case ? + */ + + if (width > 3 * out_width) + hf = 4; + else if (width > 2 * out_width) + hf = 3; + else if (width > out_width) + hf = 2; + else + hf = 1; + if (height > out_height) + vf = 2; + else + vf = 1; + + return pclk * vf * hf; +} + +static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width, + u16 height, u16 out_width, u16 out_height, bool mem_to_mem) +{ + /* + * If the overlay/writeback is in mem to mem mode, there are no + * downscaling limitations with respect to pixel clock, return 1 as + * required core clock to represent that we have sufficient enough + * core clock to do maximum downscaling + */ + if (mem_to_mem) + return 1; + + if (width > out_width) + return DIV_ROUND_UP(pclk, out_width) * width; + else + return pclk; +} + +static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk, + const struct omap_video_timings *mgr_timings, + u16 width, u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode, bool *five_taps, + int *x_predecim, int *y_predecim, int *decim_x, int *decim_y, + u16 pos_x, unsigned long *core_clk, bool mem_to_mem) +{ + int error; + u16 in_width, in_height; + int min_factor = min(*decim_x, *decim_y); + const int maxsinglelinewidth = + dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); + + *five_taps = false; + + do { + in_height = height / *decim_y; + in_width = width / *decim_x; + *core_clk = dispc.feat->calc_core_clk(pclk, in_width, + in_height, out_width, out_height, mem_to_mem); + error = (in_width > maxsinglelinewidth || !*core_clk || + *core_clk > dispc_core_clk_rate()); + if (error) { + if (*decim_x == *decim_y) { + *decim_x = min_factor; + ++*decim_y; + } else { + swap(*decim_x, *decim_y); + if (*decim_x < *decim_y) + ++*decim_x; + } + } + } while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error); + + if (error) { + DSSERR("failed to find scaling settings\n"); + return -EINVAL; + } + + if (in_width > maxsinglelinewidth) { + DSSERR("Cannot scale max input width exceeded"); + return -EINVAL; + } + return 0; +} + +static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk, + const struct omap_video_timings *mgr_timings, + u16 width, u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode, bool *five_taps, + int *x_predecim, int *y_predecim, int *decim_x, int *decim_y, + u16 pos_x, unsigned long *core_clk, bool mem_to_mem) +{ + int error; + u16 in_width, in_height; + const int maxsinglelinewidth = + dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); + + do { + in_height = height / *decim_y; + in_width = width / *decim_x; + *five_taps = in_height > out_height; + + if (in_width > maxsinglelinewidth) + if (in_height > out_height && + in_height < out_height * 2) + *five_taps = false; +again: + if (*five_taps) + *core_clk = calc_core_clk_five_taps(pclk, mgr_timings, + in_width, in_height, out_width, + out_height, color_mode); + else + *core_clk = dispc.feat->calc_core_clk(pclk, in_width, + in_height, out_width, out_height, + mem_to_mem); + + error = check_horiz_timing_omap3(pclk, lclk, mgr_timings, + pos_x, in_width, in_height, out_width, + out_height, *five_taps); + if (error && *five_taps) { + *five_taps = false; + goto again; + } + + error = (error || in_width > maxsinglelinewidth * 2 || + (in_width > maxsinglelinewidth && *five_taps) || + !*core_clk || *core_clk > dispc_core_clk_rate()); + + if (!error) { + /* verify that we're inside the limits of scaler */ + if (in_width / 4 > out_width) + error = 1; + + if (*five_taps) { + if (in_height / 4 > out_height) + error = 1; + } else { + if (in_height / 2 > out_height) + error = 1; + } + } + + if (error) + ++*decim_y; + } while (*decim_x <= *x_predecim && *decim_y <= *y_predecim && error); + + if (error) { + DSSERR("failed to find scaling settings\n"); + return -EINVAL; + } + + if (check_horiz_timing_omap3(pclk, lclk, mgr_timings, pos_x, in_width, + in_height, out_width, out_height, *five_taps)) { + DSSERR("horizontal timing too tight\n"); + return -EINVAL; + } + + if (in_width > (maxsinglelinewidth * 2)) { + DSSERR("Cannot setup scaling"); + DSSERR("width exceeds maximum width possible"); + return -EINVAL; + } + + if (in_width > maxsinglelinewidth && *five_taps) { + DSSERR("cannot setup scaling with five taps"); + return -EINVAL; + } + return 0; +} + +static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, + const struct omap_video_timings *mgr_timings, + u16 width, u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode, bool *five_taps, + int *x_predecim, int *y_predecim, int *decim_x, int *decim_y, + u16 pos_x, unsigned long *core_clk, bool mem_to_mem) +{ + u16 in_width, in_width_max; + int decim_x_min = *decim_x; + u16 in_height = height / *decim_y; + const int maxsinglelinewidth = + dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); + const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); + + if (mem_to_mem) { + in_width_max = out_width * maxdownscale; + } else { + in_width_max = dispc_core_clk_rate() / + DIV_ROUND_UP(pclk, out_width); + } + + *decim_x = DIV_ROUND_UP(width, in_width_max); + + *decim_x = *decim_x > decim_x_min ? *decim_x : decim_x_min; + if (*decim_x > *x_predecim) + return -EINVAL; + + do { + in_width = width / *decim_x; + } while (*decim_x <= *x_predecim && + in_width > maxsinglelinewidth && ++*decim_x); + + if (in_width > maxsinglelinewidth) { + DSSERR("Cannot scale width exceeds max line width"); + return -EINVAL; + } + + *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height, + out_width, out_height, mem_to_mem); + return 0; +} + +#define DIV_FRAC(dividend, divisor) \ + ((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100)) + +static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk, + enum omap_overlay_caps caps, + const struct omap_video_timings *mgr_timings, + u16 width, u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode, bool *five_taps, + int *x_predecim, int *y_predecim, u16 pos_x, + enum omap_dss_rotation_type rotation_type, bool mem_to_mem) +{ + const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); + const int max_decim_limit = 16; + unsigned long core_clk = 0; + int decim_x, decim_y, ret; + + if (width == out_width && height == out_height) + return 0; + + if (!mem_to_mem && (pclk == 0 || mgr_timings->pixelclock == 0)) { + DSSERR("cannot calculate scaling settings: pclk is zero\n"); + return -EINVAL; + } + + if ((caps & OMAP_DSS_OVL_CAP_SCALE) == 0) + return -EINVAL; + + if (mem_to_mem) { + *x_predecim = *y_predecim = 1; + } else { + *x_predecim = max_decim_limit; + *y_predecim = (rotation_type == OMAP_DSS_ROT_TILER && + dss_has_feature(FEAT_BURST_2D)) ? + 2 : max_decim_limit; + } + + if (color_mode == OMAP_DSS_COLOR_CLUT1 || + color_mode == OMAP_DSS_COLOR_CLUT2 || + color_mode == OMAP_DSS_COLOR_CLUT4 || + color_mode == OMAP_DSS_COLOR_CLUT8) { + *x_predecim = 1; + *y_predecim = 1; + *five_taps = false; + return 0; + } + + decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale); + decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale); + + if (decim_x > *x_predecim || out_width > width * 8) + return -EINVAL; + + if (decim_y > *y_predecim || out_height > height * 8) + return -EINVAL; + + ret = dispc.feat->calc_scaling(pclk, lclk, mgr_timings, width, height, + out_width, out_height, color_mode, five_taps, + x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk, + mem_to_mem); + if (ret) + return ret; + + DSSDBG("%dx%d -> %dx%d (%d.%02d x %d.%02d), decim %dx%d %dx%d (%d.%02d x %d.%02d), taps %d, req clk %lu, cur clk %lu\n", + width, height, + out_width, out_height, + out_width / width, DIV_FRAC(out_width, width), + out_height / height, DIV_FRAC(out_height, height), + + decim_x, decim_y, + width / decim_x, height / decim_y, + out_width / (width / decim_x), DIV_FRAC(out_width, width / decim_x), + out_height / (height / decim_y), DIV_FRAC(out_height, height / decim_y), + + *five_taps ? 5 : 3, + core_clk, dispc_core_clk_rate()); + + if (!core_clk || core_clk > dispc_core_clk_rate()) { + DSSERR("failed to set up scaling, " + "required core clk rate = %lu Hz, " + "current core clk rate = %lu Hz\n", + core_clk, dispc_core_clk_rate()); + return -EINVAL; + } + + *x_predecim = decim_x; + *y_predecim = decim_y; + return 0; +} + +int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel, + const struct omap_overlay_info *oi, + const struct omap_video_timings *timings, + int *x_predecim, int *y_predecim) +{ + enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane); + bool five_taps = true; + bool fieldmode = false; + u16 in_height = oi->height; + u16 in_width = oi->width; + bool ilace = timings->interlace; + u16 out_width, out_height; + int pos_x = oi->pos_x; + unsigned long pclk = dispc_mgr_pclk_rate(channel); + unsigned long lclk = dispc_mgr_lclk_rate(channel); + + out_width = oi->out_width == 0 ? oi->width : oi->out_width; + out_height = oi->out_height == 0 ? oi->height : oi->out_height; + + if (ilace && oi->height == out_height) + fieldmode = true; + + if (ilace) { + if (fieldmode) + in_height /= 2; + out_height /= 2; + + DSSDBG("adjusting for ilace: height %d, out_height %d\n", + in_height, out_height); + } + + if (!dss_feat_color_mode_supported(plane, oi->color_mode)) + return -EINVAL; + + return dispc_ovl_calc_scaling(pclk, lclk, caps, timings, in_width, + in_height, out_width, out_height, oi->color_mode, + &five_taps, x_predecim, y_predecim, pos_x, + oi->rotation_type, false); +} +EXPORT_SYMBOL(dispc_ovl_check); + +static int dispc_ovl_setup_common(enum omap_plane plane, + enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr, + u16 screen_width, int pos_x, int pos_y, u16 width, u16 height, + u16 out_width, u16 out_height, enum omap_color_mode color_mode, + u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha, + u8 global_alpha, enum omap_dss_rotation_type rotation_type, + bool replication, const struct omap_video_timings *mgr_timings, + bool mem_to_mem) +{ + bool five_taps = true; + bool fieldmode = false; + int r, cconv = 0; + unsigned offset0, offset1; + s32 row_inc; + s32 pix_inc; + u16 frame_width, frame_height; + unsigned int field_offset = 0; + u16 in_height = height; + u16 in_width = width; + int x_predecim = 1, y_predecim = 1; + bool ilace = mgr_timings->interlace; + unsigned long pclk = dispc_plane_pclk_rate(plane); + unsigned long lclk = dispc_plane_lclk_rate(plane); + + if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER) + return -EINVAL; + + switch (color_mode) { + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + case OMAP_DSS_COLOR_NV12: + if (in_width & 1) { + DSSERR("input width %d is not even for YUV format\n", + in_width); + return -EINVAL; + } + break; + + default: + break; + } + + out_width = out_width == 0 ? width : out_width; + out_height = out_height == 0 ? height : out_height; + + if (ilace && height == out_height) + fieldmode = true; + + if (ilace) { + if (fieldmode) + in_height /= 2; + pos_y /= 2; + out_height /= 2; + + DSSDBG("adjusting for ilace: height %d, pos_y %d, " + "out_height %d\n", in_height, pos_y, + out_height); + } + + if (!dss_feat_color_mode_supported(plane, color_mode)) + return -EINVAL; + + r = dispc_ovl_calc_scaling(pclk, lclk, caps, mgr_timings, in_width, + in_height, out_width, out_height, color_mode, + &five_taps, &x_predecim, &y_predecim, pos_x, + rotation_type, mem_to_mem); + if (r) + return r; + + in_width = in_width / x_predecim; + in_height = in_height / y_predecim; + + if (x_predecim > 1 || y_predecim > 1) + DSSDBG("predecimation %d x %x, new input size %d x %d\n", + x_predecim, y_predecim, in_width, in_height); + + switch (color_mode) { + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + case OMAP_DSS_COLOR_NV12: + if (in_width & 1) { + DSSDBG("predecimated input width is not even for YUV format\n"); + DSSDBG("adjusting input width %d -> %d\n", + in_width, in_width & ~1); + + in_width &= ~1; + } + break; + + default: + break; + } + + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY || + color_mode == OMAP_DSS_COLOR_NV12) + cconv = 1; + + if (ilace && !fieldmode) { + /* + * when downscaling the bottom field may have to start several + * source lines below the top field. Unfortunately ACCUI + * registers will only hold the fractional part of the offset + * so the integer part must be added to the base address of the + * bottom field. + */ + if (!in_height || in_height == out_height) + field_offset = 0; + else + field_offset = in_height / out_height / 2; + } + + /* Fields are independent but interleaved in memory. */ + if (fieldmode) + field_offset = 1; + + offset0 = 0; + offset1 = 0; + row_inc = 0; + pix_inc = 0; + + if (plane == OMAP_DSS_WB) { + frame_width = out_width; + frame_height = out_height; + } else { + frame_width = in_width; + frame_height = height; + } + + if (rotation_type == OMAP_DSS_ROT_TILER) + calc_tiler_rotation_offset(screen_width, frame_width, + color_mode, fieldmode, field_offset, + &offset0, &offset1, &row_inc, &pix_inc, + x_predecim, y_predecim); + else if (rotation_type == OMAP_DSS_ROT_DMA) + calc_dma_rotation_offset(rotation, mirror, screen_width, + frame_width, frame_height, + color_mode, fieldmode, field_offset, + &offset0, &offset1, &row_inc, &pix_inc, + x_predecim, y_predecim); + else + calc_vrfb_rotation_offset(rotation, mirror, + screen_width, frame_width, frame_height, + color_mode, fieldmode, field_offset, + &offset0, &offset1, &row_inc, &pix_inc, + x_predecim, y_predecim); + + DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n", + offset0, offset1, row_inc, pix_inc); + + dispc_ovl_set_color_mode(plane, color_mode); + + dispc_ovl_configure_burst_type(plane, rotation_type); + + dispc_ovl_set_ba0(plane, paddr + offset0); + dispc_ovl_set_ba1(plane, paddr + offset1); + + if (OMAP_DSS_COLOR_NV12 == color_mode) { + dispc_ovl_set_ba0_uv(plane, p_uv_addr + offset0); + dispc_ovl_set_ba1_uv(plane, p_uv_addr + offset1); + } + + if (dispc.feat->last_pixel_inc_missing) + row_inc += pix_inc - 1; + + dispc_ovl_set_row_inc(plane, row_inc); + dispc_ovl_set_pix_inc(plane, pix_inc); + + DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, in_width, + in_height, out_width, out_height); + + dispc_ovl_set_pos(plane, caps, pos_x, pos_y); + + dispc_ovl_set_input_size(plane, in_width, in_height); + + if (caps & OMAP_DSS_OVL_CAP_SCALE) { + dispc_ovl_set_scaling(plane, in_width, in_height, out_width, + out_height, ilace, five_taps, fieldmode, + color_mode, rotation); + dispc_ovl_set_output_size(plane, out_width, out_height); + dispc_ovl_set_vid_color_conv(plane, cconv); + } + + dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, mirror, + color_mode); + + dispc_ovl_set_zorder(plane, caps, zorder); + dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha); + dispc_ovl_setup_global_alpha(plane, caps, global_alpha); + + dispc_ovl_enable_replication(plane, caps, replication); + + return 0; +} + +int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi, + bool replication, const struct omap_video_timings *mgr_timings, + bool mem_to_mem) +{ + int r; + enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane); + enum omap_channel channel; + + channel = dispc_ovl_get_channel_out(plane); + + DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->" + " %dx%d, cmode %x, rot %d, mir %d, chan %d repl %d\n", + plane, &oi->paddr, &oi->p_uv_addr, oi->screen_width, oi->pos_x, + oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height, + oi->color_mode, oi->rotation, oi->mirror, channel, replication); + + r = dispc_ovl_setup_common(plane, caps, oi->paddr, oi->p_uv_addr, + oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height, + oi->out_width, oi->out_height, oi->color_mode, oi->rotation, + oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha, + oi->rotation_type, replication, mgr_timings, mem_to_mem); + + return r; +} +EXPORT_SYMBOL(dispc_ovl_setup); + +int dispc_wb_setup(const struct omap_dss_writeback_info *wi, + bool mem_to_mem, const struct omap_video_timings *mgr_timings) +{ + int r; + u32 l; + enum omap_plane plane = OMAP_DSS_WB; + const int pos_x = 0, pos_y = 0; + const u8 zorder = 0, global_alpha = 0; + const bool replication = false; + bool truncation; + int in_width = mgr_timings->x_res; + int in_height = mgr_timings->y_res; + enum omap_overlay_caps caps = + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA; + + DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, " + "rot %d, mir %d\n", wi->paddr, wi->p_uv_addr, in_width, + in_height, wi->width, wi->height, wi->color_mode, wi->rotation, + wi->mirror); + + r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr, + wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width, + wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder, + wi->pre_mult_alpha, global_alpha, wi->rotation_type, + replication, mgr_timings, mem_to_mem); + + switch (wi->color_mode) { + case OMAP_DSS_COLOR_RGB16: + case OMAP_DSS_COLOR_RGB24P: + case OMAP_DSS_COLOR_ARGB16: + case OMAP_DSS_COLOR_RGBA16: + case OMAP_DSS_COLOR_RGB12U: + case OMAP_DSS_COLOR_ARGB16_1555: + case OMAP_DSS_COLOR_XRGB16_1555: + case OMAP_DSS_COLOR_RGBX16: + truncation = true; + break; + default: + truncation = false; + break; + } + + /* setup extra DISPC_WB_ATTRIBUTES */ + l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); + l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */ + l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */ + if (mem_to_mem) + l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */ + else + l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */ + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l); + + if (mem_to_mem) { + /* WBDELAYCOUNT */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0); + } else { + int wbdelay; + + wbdelay = min(mgr_timings->vfp + mgr_timings->vsw + + mgr_timings->vbp, 255); + + /* WBDELAYCOUNT */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0); + } + + return r; +} + +int dispc_ovl_enable(enum omap_plane plane, bool enable) +{ + DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0); + + return 0; +} +EXPORT_SYMBOL(dispc_ovl_enable); + +bool dispc_ovl_enabled(enum omap_plane plane) +{ + return REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0); +} +EXPORT_SYMBOL(dispc_ovl_enabled); + +void dispc_mgr_enable(enum omap_channel channel, bool enable) +{ + mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); + /* flush posted write */ + mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); +} +EXPORT_SYMBOL(dispc_mgr_enable); + +bool dispc_mgr_is_enabled(enum omap_channel channel) +{ + return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); +} +EXPORT_SYMBOL(dispc_mgr_is_enabled); + +void dispc_wb_enable(bool enable) +{ + dispc_ovl_enable(OMAP_DSS_WB, enable); +} + +bool dispc_wb_is_enabled(void) +{ + return dispc_ovl_enabled(OMAP_DSS_WB); +} + +static void dispc_lcd_enable_signal_polarity(bool act_high) +{ + if (!dss_has_feature(FEAT_LCDENABLEPOL)) + return; + + REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29); +} + +void dispc_lcd_enable_signal(bool enable) +{ + if (!dss_has_feature(FEAT_LCDENABLESIGNAL)) + return; + + REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28); +} + +void dispc_pck_free_enable(bool enable) +{ + if (!dss_has_feature(FEAT_PCKFREEENABLE)) + return; + + REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27); +} + +static void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable) +{ + mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable); +} + + +static void dispc_mgr_set_lcd_type_tft(enum omap_channel channel) +{ + mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1); +} + +static void dispc_set_loadmode(enum omap_dss_load_mode mode) +{ + REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1); +} + + +static void dispc_mgr_set_default_color(enum omap_channel channel, u32 color) +{ + dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color); +} + +static void dispc_mgr_set_trans_key(enum omap_channel ch, + enum omap_dss_trans_key_type type, + u32 trans_key) +{ + mgr_fld_write(ch, DISPC_MGR_FLD_TCKSELECTION, type); + + dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); +} + +static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable) +{ + mgr_fld_write(ch, DISPC_MGR_FLD_TCKENABLE, enable); +} + +static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, + bool enable) +{ + if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) + return; + + if (ch == OMAP_DSS_CHANNEL_LCD) + REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18); + else if (ch == OMAP_DSS_CHANNEL_DIGIT) + REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19); +} + +void dispc_mgr_setup(enum omap_channel channel, + const struct omap_overlay_manager_info *info) +{ + dispc_mgr_set_default_color(channel, info->default_color); + dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key); + dispc_mgr_enable_trans_key(channel, info->trans_enabled); + dispc_mgr_enable_alpha_fixed_zorder(channel, + info->partial_alpha_enabled); + if (dss_has_feature(FEAT_CPR)) { + dispc_mgr_enable_cpr(channel, info->cpr_enable); + dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs); + } +} +EXPORT_SYMBOL(dispc_mgr_setup); + +static void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines) +{ + int code; + + switch (data_lines) { + case 12: + code = 0; + break; + case 16: + code = 1; + break; + case 18: + code = 2; + break; + case 24: + code = 3; + break; + default: + BUG(); + return; + } + + mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code); +} + +static void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode) +{ + u32 l; + int gpout0, gpout1; + + switch (mode) { + case DSS_IO_PAD_MODE_RESET: + gpout0 = 0; + gpout1 = 0; + break; + case DSS_IO_PAD_MODE_RFBI: + gpout0 = 1; + gpout1 = 0; + break; + case DSS_IO_PAD_MODE_BYPASS: + gpout0 = 1; + gpout1 = 1; + break; + default: + BUG(); + return; + } + + l = dispc_read_reg(DISPC_CONTROL); + l = FLD_MOD(l, gpout0, 15, 15); + l = FLD_MOD(l, gpout1, 16, 16); + dispc_write_reg(DISPC_CONTROL, l); +} + +static void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable) +{ + mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable); +} + +void dispc_mgr_set_lcd_config(enum omap_channel channel, + const struct dss_lcd_mgr_config *config) +{ + dispc_mgr_set_io_pad_mode(config->io_pad_mode); + + dispc_mgr_enable_stallmode(channel, config->stallmode); + dispc_mgr_enable_fifohandcheck(channel, config->fifohandcheck); + + dispc_mgr_set_clock_div(channel, &config->clock_info); + + dispc_mgr_set_tft_data_lines(channel, config->video_port_width); + + dispc_lcd_enable_signal_polarity(config->lcden_sig_polarity); + + dispc_mgr_set_lcd_type_tft(channel); +} +EXPORT_SYMBOL(dispc_mgr_set_lcd_config); + +static bool _dispc_mgr_size_ok(u16 width, u16 height) +{ + return width <= dispc.feat->mgr_width_max && + height <= dispc.feat->mgr_height_max; +} + +static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, + int vsw, int vfp, int vbp) +{ + if (hsw < 1 || hsw > dispc.feat->sw_max || + hfp < 1 || hfp > dispc.feat->hp_max || + hbp < 1 || hbp > dispc.feat->hp_max || + vsw < 1 || vsw > dispc.feat->sw_max || + vfp < 0 || vfp > dispc.feat->vp_max || + vbp < 0 || vbp > dispc.feat->vp_max) + return false; + return true; +} + +static bool _dispc_mgr_pclk_ok(enum omap_channel channel, + unsigned long pclk) +{ + if (dss_mgr_is_lcd(channel)) + return pclk <= dispc.feat->max_lcd_pclk ? true : false; + else + return pclk <= dispc.feat->max_tv_pclk ? true : false; +} + +bool dispc_mgr_timings_ok(enum omap_channel channel, + const struct omap_video_timings *timings) +{ + if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res)) + return false; + + if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock)) + return false; + + if (dss_mgr_is_lcd(channel)) { + /* TODO: OMAP4+ supports interlace for LCD outputs */ + if (timings->interlace) + return false; + + if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp, + timings->hbp, timings->vsw, timings->vfp, + timings->vbp)) + return false; + } + + return true; +} + +static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, + int hfp, int hbp, int vsw, int vfp, int vbp, + enum omap_dss_signal_level vsync_level, + enum omap_dss_signal_level hsync_level, + enum omap_dss_signal_edge data_pclk_edge, + enum omap_dss_signal_level de_level, + enum omap_dss_signal_edge sync_pclk_edge) + +{ + u32 timing_h, timing_v, l; + bool onoff, rf, ipc, vs, hs, de; + + timing_h = FLD_VAL(hsw-1, dispc.feat->sw_start, 0) | + FLD_VAL(hfp-1, dispc.feat->fp_start, 8) | + FLD_VAL(hbp-1, dispc.feat->bp_start, 20); + timing_v = FLD_VAL(vsw-1, dispc.feat->sw_start, 0) | + FLD_VAL(vfp, dispc.feat->fp_start, 8) | + FLD_VAL(vbp, dispc.feat->bp_start, 20); + + dispc_write_reg(DISPC_TIMING_H(channel), timing_h); + dispc_write_reg(DISPC_TIMING_V(channel), timing_v); + + switch (vsync_level) { + case OMAPDSS_SIG_ACTIVE_LOW: + vs = true; + break; + case OMAPDSS_SIG_ACTIVE_HIGH: + vs = false; + break; + default: + BUG(); + } + + switch (hsync_level) { + case OMAPDSS_SIG_ACTIVE_LOW: + hs = true; + break; + case OMAPDSS_SIG_ACTIVE_HIGH: + hs = false; + break; + default: + BUG(); + } + + switch (de_level) { + case OMAPDSS_SIG_ACTIVE_LOW: + de = true; + break; + case OMAPDSS_SIG_ACTIVE_HIGH: + de = false; + break; + default: + BUG(); + } + + switch (data_pclk_edge) { + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + ipc = false; + break; + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + ipc = true; + break; + default: + BUG(); + } + + /* always use the 'rf' setting */ + onoff = true; + + switch (sync_pclk_edge) { + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + rf = false; + break; + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + rf = true; + break; + default: + BUG(); + } + + l = FLD_VAL(onoff, 17, 17) | + FLD_VAL(rf, 16, 16) | + FLD_VAL(de, 15, 15) | + FLD_VAL(ipc, 14, 14) | + FLD_VAL(hs, 13, 13) | + FLD_VAL(vs, 12, 12); + + /* always set ALIGN bit when available */ + if (dispc.feat->supports_sync_align) + l |= (1 << 18); + + dispc_write_reg(DISPC_POL_FREQ(channel), l); + + if (dispc.syscon_pol) { + const int shifts[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 1, + [OMAP_DSS_CHANNEL_LCD3] = 2, + }; + + u32 mask, val; + + mask = (1 << 0) | (1 << 3) | (1 << 6); + val = (rf << 0) | (ipc << 3) | (onoff << 6); + + mask <<= 16 + shifts[channel]; + val <<= 16 + shifts[channel]; + + regmap_update_bits(dispc.syscon_pol, dispc.syscon_pol_offset, + mask, val); + } +} + +/* change name to mode? */ +void dispc_mgr_set_timings(enum omap_channel channel, + const struct omap_video_timings *timings) +{ + unsigned xtot, ytot; + unsigned long ht, vt; + struct omap_video_timings t = *timings; + + DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res); + + if (!dispc_mgr_timings_ok(channel, &t)) { + BUG(); + return; + } + + if (dss_mgr_is_lcd(channel)) { + _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw, + t.vfp, t.vbp, t.vsync_level, t.hsync_level, + t.data_pclk_edge, t.de_level, t.sync_pclk_edge); + + xtot = t.x_res + t.hfp + t.hsw + t.hbp; + ytot = t.y_res + t.vfp + t.vsw + t.vbp; + + ht = timings->pixelclock / xtot; + vt = timings->pixelclock / xtot / ytot; + + DSSDBG("pck %u\n", timings->pixelclock); + DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", + t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp); + DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n", + t.vsync_level, t.hsync_level, t.data_pclk_edge, + t.de_level, t.sync_pclk_edge); + + DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); + } else { + if (t.interlace) + t.y_res /= 2; + } + + dispc_mgr_set_size(channel, t.x_res, t.y_res); +} +EXPORT_SYMBOL(dispc_mgr_set_timings); + +static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div, + u16 pck_div) +{ + BUG_ON(lck_div < 1); + BUG_ON(pck_div < 1); + + dispc_write_reg(DISPC_DIVISORo(channel), + FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0)); + + if (!dss_has_feature(FEAT_CORE_CLK_DIV) && + channel == OMAP_DSS_CHANNEL_LCD) + dispc.core_clk_rate = dispc_fclk_rate() / lck_div; +} + +static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div, + int *pck_div) +{ + u32 l; + l = dispc_read_reg(DISPC_DIVISORo(channel)); + *lck_div = FLD_GET(l, 23, 16); + *pck_div = FLD_GET(l, 7, 0); +} + +static unsigned long dispc_fclk_rate(void) +{ + struct dss_pll *pll; + unsigned long r = 0; + + switch (dss_get_dispc_clk_source()) { + case OMAP_DSS_CLK_SRC_FCK: + r = dss_get_dispc_clk_rate(); + break; + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + pll = dss_pll_find("dsi0"); + if (!pll) + pll = dss_pll_find("video0"); + + r = pll->cinfo.clkout[0]; + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + pll = dss_pll_find("dsi1"); + if (!pll) + pll = dss_pll_find("video1"); + + r = pll->cinfo.clkout[0]; + break; + default: + BUG(); + return 0; + } + + return r; +} + +static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) +{ + struct dss_pll *pll; + int lcd; + unsigned long r; + u32 l; + + if (dss_mgr_is_lcd(channel)) { + l = dispc_read_reg(DISPC_DIVISORo(channel)); + + lcd = FLD_GET(l, 23, 16); + + switch (dss_get_lcd_clk_source(channel)) { + case OMAP_DSS_CLK_SRC_FCK: + r = dss_get_dispc_clk_rate(); + break; + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + pll = dss_pll_find("dsi0"); + if (!pll) + pll = dss_pll_find("video0"); + + r = pll->cinfo.clkout[0]; + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + pll = dss_pll_find("dsi1"); + if (!pll) + pll = dss_pll_find("video1"); + + r = pll->cinfo.clkout[0]; + break; + default: + BUG(); + return 0; + } + + return r / lcd; + } else { + return dispc_fclk_rate(); + } +} + +static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) +{ + unsigned long r; + + if (dss_mgr_is_lcd(channel)) { + int pcd; + u32 l; + + l = dispc_read_reg(DISPC_DIVISORo(channel)); + + pcd = FLD_GET(l, 7, 0); + + r = dispc_mgr_lclk_rate(channel); + + return r / pcd; + } else { + return dispc.tv_pclk_rate; + } +} + +void dispc_set_tv_pclk(unsigned long pclk) +{ + dispc.tv_pclk_rate = pclk; +} + +static unsigned long dispc_core_clk_rate(void) +{ + return dispc.core_clk_rate; +} + +static unsigned long dispc_plane_pclk_rate(enum omap_plane plane) +{ + enum omap_channel channel; + + if (plane == OMAP_DSS_WB) + return 0; + + channel = dispc_ovl_get_channel_out(plane); + + return dispc_mgr_pclk_rate(channel); +} + +static unsigned long dispc_plane_lclk_rate(enum omap_plane plane) +{ + enum omap_channel channel; + + if (plane == OMAP_DSS_WB) + return 0; + + channel = dispc_ovl_get_channel_out(plane); + + return dispc_mgr_lclk_rate(channel); +} + +static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) +{ + int lcd, pcd; + enum omap_dss_clk_source lcd_clk_src; + + seq_printf(s, "- %s -\n", mgr_desc[channel].name); + + lcd_clk_src = dss_get_lcd_clk_source(channel); + + seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, + dss_get_generic_clk_source_name(lcd_clk_src), + dss_feat_get_clk_source_name(lcd_clk_src)); + + dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); + + seq_printf(s, "lck\t\t%-16lulck div\t%u\n", + dispc_mgr_lclk_rate(channel), lcd); + seq_printf(s, "pck\t\t%-16lupck div\t%u\n", + dispc_mgr_pclk_rate(channel), pcd); +} + +void dispc_dump_clocks(struct seq_file *s) +{ + int lcd; + u32 l; + enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); + + if (dispc_runtime_get()) + return; + + seq_printf(s, "- DISPC -\n"); + + seq_printf(s, "dispc fclk source = %s (%s)\n", + dss_get_generic_clk_source_name(dispc_clk_src), + dss_feat_get_clk_source_name(dispc_clk_src)); + + seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); + + if (dss_has_feature(FEAT_CORE_CLK_DIV)) { + seq_printf(s, "- DISPC-CORE-CLK -\n"); + l = dispc_read_reg(DISPC_DIVISOR); + lcd = FLD_GET(l, 23, 16); + + seq_printf(s, "lck\t\t%-16lulck div\t%u\n", + (dispc_fclk_rate()/lcd), lcd); + } + + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD); + + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2); + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3); + + dispc_runtime_put(); +} + +static void dispc_dump_regs(struct seq_file *s) +{ + int i, j; + const char *mgr_names[] = { + [OMAP_DSS_CHANNEL_LCD] = "LCD", + [OMAP_DSS_CHANNEL_DIGIT] = "TV", + [OMAP_DSS_CHANNEL_LCD2] = "LCD2", + [OMAP_DSS_CHANNEL_LCD3] = "LCD3", + }; + const char *ovl_names[] = { + [OMAP_DSS_GFX] = "GFX", + [OMAP_DSS_VIDEO1] = "VID1", + [OMAP_DSS_VIDEO2] = "VID2", + [OMAP_DSS_VIDEO3] = "VID3", + [OMAP_DSS_WB] = "WB", + }; + const char **p_names; + +#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r)) + + if (dispc_runtime_get()) + return; + + /* DISPC common registers */ + DUMPREG(DISPC_REVISION); + DUMPREG(DISPC_SYSCONFIG); + DUMPREG(DISPC_SYSSTATUS); + DUMPREG(DISPC_IRQSTATUS); + DUMPREG(DISPC_IRQENABLE); + DUMPREG(DISPC_CONTROL); + DUMPREG(DISPC_CONFIG); + DUMPREG(DISPC_CAPABLE); + DUMPREG(DISPC_LINE_STATUS); + DUMPREG(DISPC_LINE_NUMBER); + if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) || + dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) + DUMPREG(DISPC_GLOBAL_ALPHA); + if (dss_has_feature(FEAT_MGR_LCD2)) { + DUMPREG(DISPC_CONTROL2); + DUMPREG(DISPC_CONFIG2); + } + if (dss_has_feature(FEAT_MGR_LCD3)) { + DUMPREG(DISPC_CONTROL3); + DUMPREG(DISPC_CONFIG3); + } + if (dss_has_feature(FEAT_MFLAG)) + DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE); + +#undef DUMPREG + +#define DISPC_REG(i, name) name(i) +#define DUMPREG(i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \ + (int)(48 - strlen(#r) - strlen(p_names[i])), " ", \ + dispc_read_reg(DISPC_REG(i, r))) + + p_names = mgr_names; + + /* DISPC channel specific registers */ + for (i = 0; i < dss_feat_get_num_mgrs(); i++) { + DUMPREG(i, DISPC_DEFAULT_COLOR); + DUMPREG(i, DISPC_TRANS_COLOR); + DUMPREG(i, DISPC_SIZE_MGR); + + if (i == OMAP_DSS_CHANNEL_DIGIT) + continue; + + DUMPREG(i, DISPC_TIMING_H); + DUMPREG(i, DISPC_TIMING_V); + DUMPREG(i, DISPC_POL_FREQ); + DUMPREG(i, DISPC_DIVISORo); + + DUMPREG(i, DISPC_DATA_CYCLE1); + DUMPREG(i, DISPC_DATA_CYCLE2); + DUMPREG(i, DISPC_DATA_CYCLE3); + + if (dss_has_feature(FEAT_CPR)) { + DUMPREG(i, DISPC_CPR_COEF_R); + DUMPREG(i, DISPC_CPR_COEF_G); + DUMPREG(i, DISPC_CPR_COEF_B); + } + } + + p_names = ovl_names; + + for (i = 0; i < dss_feat_get_num_ovls(); i++) { + DUMPREG(i, DISPC_OVL_BA0); + DUMPREG(i, DISPC_OVL_BA1); + DUMPREG(i, DISPC_OVL_POSITION); + DUMPREG(i, DISPC_OVL_SIZE); + DUMPREG(i, DISPC_OVL_ATTRIBUTES); + DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD); + DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS); + DUMPREG(i, DISPC_OVL_ROW_INC); + DUMPREG(i, DISPC_OVL_PIXEL_INC); + + if (dss_has_feature(FEAT_PRELOAD)) + DUMPREG(i, DISPC_OVL_PRELOAD); + if (dss_has_feature(FEAT_MFLAG)) + DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD); + + if (i == OMAP_DSS_GFX) { + DUMPREG(i, DISPC_OVL_WINDOW_SKIP); + DUMPREG(i, DISPC_OVL_TABLE_BA); + continue; + } + + DUMPREG(i, DISPC_OVL_FIR); + DUMPREG(i, DISPC_OVL_PICTURE_SIZE); + DUMPREG(i, DISPC_OVL_ACCU0); + DUMPREG(i, DISPC_OVL_ACCU1); + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + DUMPREG(i, DISPC_OVL_BA0_UV); + DUMPREG(i, DISPC_OVL_BA1_UV); + DUMPREG(i, DISPC_OVL_FIR2); + DUMPREG(i, DISPC_OVL_ACCU2_0); + DUMPREG(i, DISPC_OVL_ACCU2_1); + } + if (dss_has_feature(FEAT_ATTR2)) + DUMPREG(i, DISPC_OVL_ATTRIBUTES2); + } + + if (dispc.feat->has_writeback) { + i = OMAP_DSS_WB; + DUMPREG(i, DISPC_OVL_BA0); + DUMPREG(i, DISPC_OVL_BA1); + DUMPREG(i, DISPC_OVL_SIZE); + DUMPREG(i, DISPC_OVL_ATTRIBUTES); + DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD); + DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS); + DUMPREG(i, DISPC_OVL_ROW_INC); + DUMPREG(i, DISPC_OVL_PIXEL_INC); + + if (dss_has_feature(FEAT_MFLAG)) + DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD); + + DUMPREG(i, DISPC_OVL_FIR); + DUMPREG(i, DISPC_OVL_PICTURE_SIZE); + DUMPREG(i, DISPC_OVL_ACCU0); + DUMPREG(i, DISPC_OVL_ACCU1); + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + DUMPREG(i, DISPC_OVL_BA0_UV); + DUMPREG(i, DISPC_OVL_BA1_UV); + DUMPREG(i, DISPC_OVL_FIR2); + DUMPREG(i, DISPC_OVL_ACCU2_0); + DUMPREG(i, DISPC_OVL_ACCU2_1); + } + if (dss_has_feature(FEAT_ATTR2)) + DUMPREG(i, DISPC_OVL_ATTRIBUTES2); + } + +#undef DISPC_REG +#undef DUMPREG + +#define DISPC_REG(plane, name, i) name(plane, i) +#define DUMPREG(plane, name, i) \ + seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \ + (int)(46 - strlen(#name) - strlen(p_names[plane])), " ", \ + dispc_read_reg(DISPC_REG(plane, name, i))) + + /* Video pipeline coefficient registers */ + + /* start from OMAP_DSS_VIDEO1 */ + for (i = 1; i < dss_feat_get_num_ovls(); i++) { + for (j = 0; j < 8; j++) + DUMPREG(i, DISPC_OVL_FIR_COEF_H, j); + + for (j = 0; j < 8; j++) + DUMPREG(i, DISPC_OVL_FIR_COEF_HV, j); + + for (j = 0; j < 5; j++) + DUMPREG(i, DISPC_OVL_CONV_COEF, j); + + if (dss_has_feature(FEAT_FIR_COEF_V)) { + for (j = 0; j < 8; j++) + DUMPREG(i, DISPC_OVL_FIR_COEF_V, j); + } + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + for (j = 0; j < 8; j++) + DUMPREG(i, DISPC_OVL_FIR_COEF_H2, j); + + for (j = 0; j < 8; j++) + DUMPREG(i, DISPC_OVL_FIR_COEF_HV2, j); + + for (j = 0; j < 8; j++) + DUMPREG(i, DISPC_OVL_FIR_COEF_V2, j); + } + } + + dispc_runtime_put(); + +#undef DISPC_REG +#undef DUMPREG +} + +/* calculate clock rates using dividers in cinfo */ +int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, + struct dispc_clock_info *cinfo) +{ + if (cinfo->lck_div > 255 || cinfo->lck_div == 0) + return -EINVAL; + if (cinfo->pck_div < 1 || cinfo->pck_div > 255) + return -EINVAL; + + cinfo->lck = dispc_fclk_rate / cinfo->lck_div; + cinfo->pck = cinfo->lck / cinfo->pck_div; + + return 0; +} + +bool dispc_div_calc(unsigned long dispc, + unsigned long pck_min, unsigned long pck_max, + dispc_div_calc_func func, void *data) +{ + int lckd, lckd_start, lckd_stop; + int pckd, pckd_start, pckd_stop; + unsigned long pck, lck; + unsigned long lck_max; + unsigned long pckd_hw_min, pckd_hw_max; + unsigned min_fck_per_pck; + unsigned long fck; + +#ifdef CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK + min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK; +#else + min_fck_per_pck = 0; +#endif + + pckd_hw_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD); + pckd_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD); + + lck_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); + + pck_min = pck_min ? pck_min : 1; + pck_max = pck_max ? pck_max : ULONG_MAX; + + lckd_start = max(DIV_ROUND_UP(dispc, lck_max), 1ul); + lckd_stop = min(dispc / pck_min, 255ul); + + for (lckd = lckd_start; lckd <= lckd_stop; ++lckd) { + lck = dispc / lckd; + + pckd_start = max(DIV_ROUND_UP(lck, pck_max), pckd_hw_min); + pckd_stop = min(lck / pck_min, pckd_hw_max); + + for (pckd = pckd_start; pckd <= pckd_stop; ++pckd) { + pck = lck / pckd; + + /* + * For OMAP2/3 the DISPC fclk is the same as LCD's logic + * clock, which means we're configuring DISPC fclk here + * also. Thus we need to use the calculated lck. For + * OMAP4+ the DISPC fclk is a separate clock. + */ + if (dss_has_feature(FEAT_CORE_CLK_DIV)) + fck = dispc_core_clk_rate(); + else + fck = lck; + + if (fck < pck * min_fck_per_pck) + continue; + + if (func(lckd, pckd, lck, pck, data)) + return true; + } + } + + return false; +} + +void dispc_mgr_set_clock_div(enum omap_channel channel, + const struct dispc_clock_info *cinfo) +{ + DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div); + DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div); + + dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div); +} + +int dispc_mgr_get_clock_div(enum omap_channel channel, + struct dispc_clock_info *cinfo) +{ + unsigned long fck; + + fck = dispc_fclk_rate(); + + cinfo->lck_div = REG_GET(DISPC_DIVISORo(channel), 23, 16); + cinfo->pck_div = REG_GET(DISPC_DIVISORo(channel), 7, 0); + + cinfo->lck = fck / cinfo->lck_div; + cinfo->pck = cinfo->lck / cinfo->pck_div; + + return 0; +} + +u32 dispc_read_irqstatus(void) +{ + return dispc_read_reg(DISPC_IRQSTATUS); +} +EXPORT_SYMBOL(dispc_read_irqstatus); + +void dispc_clear_irqstatus(u32 mask) +{ + dispc_write_reg(DISPC_IRQSTATUS, mask); +} +EXPORT_SYMBOL(dispc_clear_irqstatus); + +u32 dispc_read_irqenable(void) +{ + return dispc_read_reg(DISPC_IRQENABLE); +} +EXPORT_SYMBOL(dispc_read_irqenable); + +void dispc_write_irqenable(u32 mask) +{ + u32 old_mask = dispc_read_reg(DISPC_IRQENABLE); + + /* clear the irqstatus for newly enabled irqs */ + dispc_clear_irqstatus((mask ^ old_mask) & mask); + + dispc_write_reg(DISPC_IRQENABLE, mask); +} +EXPORT_SYMBOL(dispc_write_irqenable); + +void dispc_enable_sidle(void) +{ + REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3); /* SIDLEMODE: smart idle */ +} + +void dispc_disable_sidle(void) +{ + REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ +} + +static void _omap_dispc_initial_config(void) +{ + u32 l; + + /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */ + if (dss_has_feature(FEAT_CORE_CLK_DIV)) { + l = dispc_read_reg(DISPC_DIVISOR); + /* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */ + l = FLD_MOD(l, 1, 0, 0); + l = FLD_MOD(l, 1, 23, 16); + dispc_write_reg(DISPC_DIVISOR, l); + + dispc.core_clk_rate = dispc_fclk_rate(); + } + + /* FUNCGATED */ + if (dss_has_feature(FEAT_FUNCGATED)) + REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); + + dispc_setup_color_conv_coef(); + + dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY); + + dispc_init_fifos(); + + dispc_configure_burst_sizes(); + + dispc_ovl_enable_zorder_planes(); + + if (dispc.feat->mstandby_workaround) + REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0); + + if (dss_has_feature(FEAT_MFLAG)) + dispc_init_mflag(); +} + +static const struct dispc_features omap24xx_dispc_feats = { + .sw_start = 5, + .fp_start = 15, + .bp_start = 27, + .sw_max = 64, + .vp_max = 255, + .hp_max = 256, + .mgr_width_start = 10, + .mgr_height_start = 26, + .mgr_width_max = 2048, + .mgr_height_max = 2048, + .max_lcd_pclk = 66500000, + .calc_scaling = dispc_ovl_calc_scaling_24xx, + .calc_core_clk = calc_core_clk_24xx, + .num_fifos = 3, + .no_framedone_tv = true, + .set_max_preload = false, + .last_pixel_inc_missing = true, +}; + +static const struct dispc_features omap34xx_rev1_0_dispc_feats = { + .sw_start = 5, + .fp_start = 15, + .bp_start = 27, + .sw_max = 64, + .vp_max = 255, + .hp_max = 256, + .mgr_width_start = 10, + .mgr_height_start = 26, + .mgr_width_max = 2048, + .mgr_height_max = 2048, + .max_lcd_pclk = 173000000, + .max_tv_pclk = 59000000, + .calc_scaling = dispc_ovl_calc_scaling_34xx, + .calc_core_clk = calc_core_clk_34xx, + .num_fifos = 3, + .no_framedone_tv = true, + .set_max_preload = false, + .last_pixel_inc_missing = true, +}; + +static const struct dispc_features omap34xx_rev3_0_dispc_feats = { + .sw_start = 7, + .fp_start = 19, + .bp_start = 31, + .sw_max = 256, + .vp_max = 4095, + .hp_max = 4096, + .mgr_width_start = 10, + .mgr_height_start = 26, + .mgr_width_max = 2048, + .mgr_height_max = 2048, + .max_lcd_pclk = 173000000, + .max_tv_pclk = 59000000, + .calc_scaling = dispc_ovl_calc_scaling_34xx, + .calc_core_clk = calc_core_clk_34xx, + .num_fifos = 3, + .no_framedone_tv = true, + .set_max_preload = false, + .last_pixel_inc_missing = true, +}; + +static const struct dispc_features omap44xx_dispc_feats = { + .sw_start = 7, + .fp_start = 19, + .bp_start = 31, + .sw_max = 256, + .vp_max = 4095, + .hp_max = 4096, + .mgr_width_start = 10, + .mgr_height_start = 26, + .mgr_width_max = 2048, + .mgr_height_max = 2048, + .max_lcd_pclk = 170000000, + .max_tv_pclk = 185625000, + .calc_scaling = dispc_ovl_calc_scaling_44xx, + .calc_core_clk = calc_core_clk_44xx, + .num_fifos = 5, + .gfx_fifo_workaround = true, + .set_max_preload = true, + .supports_sync_align = true, + .has_writeback = true, +}; + +static const struct dispc_features omap54xx_dispc_feats = { + .sw_start = 7, + .fp_start = 19, + .bp_start = 31, + .sw_max = 256, + .vp_max = 4095, + .hp_max = 4096, + .mgr_width_start = 11, + .mgr_height_start = 27, + .mgr_width_max = 4096, + .mgr_height_max = 4096, + .max_lcd_pclk = 170000000, + .max_tv_pclk = 186000000, + .calc_scaling = dispc_ovl_calc_scaling_44xx, + .calc_core_clk = calc_core_clk_44xx, + .num_fifos = 5, + .gfx_fifo_workaround = true, + .mstandby_workaround = true, + .set_max_preload = true, + .supports_sync_align = true, + .has_writeback = true, +}; + +static int dispc_init_features(struct platform_device *pdev) +{ + const struct dispc_features *src; + struct dispc_features *dst; + + dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); + if (!dst) { + dev_err(&pdev->dev, "Failed to allocate DISPC Features\n"); + return -ENOMEM; + } + + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP24xx: + src = &omap24xx_dispc_feats; + break; + + case OMAPDSS_VER_OMAP34xx_ES1: + src = &omap34xx_rev1_0_dispc_feats; + break; + + case OMAPDSS_VER_OMAP34xx_ES3: + case OMAPDSS_VER_OMAP3630: + case OMAPDSS_VER_AM35xx: + case OMAPDSS_VER_AM43xx: + src = &omap34xx_rev3_0_dispc_feats; + break; + + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + src = &omap44xx_dispc_feats; + break; + + case OMAPDSS_VER_OMAP5: + case OMAPDSS_VER_DRA7xx: + src = &omap54xx_dispc_feats; + break; + + default: + return -ENODEV; + } + + memcpy(dst, src, sizeof(*dst)); + dispc.feat = dst; + + return 0; +} + +static irqreturn_t dispc_irq_handler(int irq, void *arg) +{ + if (!dispc.is_enabled) + return IRQ_NONE; + + return dispc.user_handler(irq, dispc.user_data); +} + +int dispc_request_irq(irq_handler_t handler, void *dev_id) +{ + int r; + + if (dispc.user_handler != NULL) + return -EBUSY; + + dispc.user_handler = handler; + dispc.user_data = dev_id; + + /* ensure the dispc_irq_handler sees the values above */ + smp_wmb(); + + r = devm_request_irq(&dispc.pdev->dev, dispc.irq, dispc_irq_handler, + IRQF_SHARED, "OMAP DISPC", &dispc); + if (r) { + dispc.user_handler = NULL; + dispc.user_data = NULL; + } + + return r; +} +EXPORT_SYMBOL(dispc_request_irq); + +void dispc_free_irq(void *dev_id) +{ + devm_free_irq(&dispc.pdev->dev, dispc.irq, &dispc); + + dispc.user_handler = NULL; + dispc.user_data = NULL; +} +EXPORT_SYMBOL(dispc_free_irq); + +/* DISPC HW IP initialisation */ +static int dispc_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + u32 rev; + int r = 0; + struct resource *dispc_mem; + struct device_node *np = pdev->dev.of_node; + + dispc.pdev = pdev; + + spin_lock_init(&dispc.control_lock); + + r = dispc_init_features(dispc.pdev); + if (r) + return r; + + dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0); + if (!dispc_mem) { + DSSERR("can't get IORESOURCE_MEM DISPC\n"); + return -EINVAL; + } + + dispc.base = devm_ioremap(&pdev->dev, dispc_mem->start, + resource_size(dispc_mem)); + if (!dispc.base) { + DSSERR("can't ioremap DISPC\n"); + return -ENOMEM; + } + + dispc.irq = platform_get_irq(dispc.pdev, 0); + if (dispc.irq < 0) { + DSSERR("platform_get_irq failed\n"); + return -ENODEV; + } + + if (np && of_property_read_bool(np, "syscon-pol")) { + dispc.syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol"); + if (IS_ERR(dispc.syscon_pol)) { + dev_err(&pdev->dev, "failed to get syscon-pol regmap\n"); + return PTR_ERR(dispc.syscon_pol); + } + + if (of_property_read_u32_index(np, "syscon-pol", 1, + &dispc.syscon_pol_offset)) { + dev_err(&pdev->dev, "failed to get syscon-pol offset\n"); + return -EINVAL; + } + } + + pm_runtime_enable(&pdev->dev); + + r = dispc_runtime_get(); + if (r) + goto err_runtime_get; + + _omap_dispc_initial_config(); + + rev = dispc_read_reg(DISPC_REVISION); + dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n", + FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + dispc_runtime_put(); + + dss_init_overlay_managers(); + + dss_debugfs_create_file("dispc", dispc_dump_regs); + + return 0; + +err_runtime_get: + pm_runtime_disable(&pdev->dev); + return r; +} + +static void dispc_unbind(struct device *dev, struct device *master, + void *data) +{ + pm_runtime_disable(dev); + + dss_uninit_overlay_managers(); +} + +static const struct component_ops dispc_component_ops = { + .bind = dispc_bind, + .unbind = dispc_unbind, +}; + +static int dispc_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dispc_component_ops); +} + +static int dispc_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dispc_component_ops); + return 0; +} + +static int dispc_runtime_suspend(struct device *dev) +{ + dispc.is_enabled = false; + /* ensure the dispc_irq_handler sees the is_enabled value */ + smp_wmb(); + /* wait for current handler to finish before turning the DISPC off */ + synchronize_irq(dispc.irq); + + dispc_save_context(); + + return 0; +} + +static int dispc_runtime_resume(struct device *dev) +{ + /* + * The reset value for load mode is 0 (OMAP_DSS_LOAD_CLUT_AND_FRAME) + * but we always initialize it to 2 (OMAP_DSS_LOAD_FRAME_ONLY) in + * _omap_dispc_initial_config(). We can thus use it to detect if + * we have lost register context. + */ + if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) { + _omap_dispc_initial_config(); + + dispc_restore_context(); + } + + dispc.is_enabled = true; + /* ensure the dispc_irq_handler sees the is_enabled value */ + smp_wmb(); + + return 0; +} + +static const struct dev_pm_ops dispc_pm_ops = { + .runtime_suspend = dispc_runtime_suspend, + .runtime_resume = dispc_runtime_resume, +}; + +static const struct of_device_id dispc_of_match[] = { + { .compatible = "ti,omap2-dispc", }, + { .compatible = "ti,omap3-dispc", }, + { .compatible = "ti,omap4-dispc", }, + { .compatible = "ti,omap5-dispc", }, + { .compatible = "ti,dra7-dispc", }, + {}, +}; + +static struct platform_driver omap_dispchw_driver = { + .probe = dispc_probe, + .remove = dispc_remove, + .driver = { + .name = "omapdss_dispc", + .pm = &dispc_pm_ops, + .of_match_table = dispc_of_match, + .suppress_bind_attrs = true, + }, +}; + +int __init dispc_init_platform_driver(void) +{ + return platform_driver_register(&omap_dispchw_driver); +} + +void dispc_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_dispchw_driver); +} diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h new file mode 100644 index 000000000000..483744223dd1 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dispc.h @@ -0,0 +1,918 @@ +/* + * linux/drivers/video/omap2/dss/dispc.h + * + * Copyright (C) 2011 Texas Instruments + * Author: Archit Taneja <archit@ti.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __OMAP2_DISPC_REG_H +#define __OMAP2_DISPC_REG_H + +/* DISPC common registers */ +#define DISPC_REVISION 0x0000 +#define DISPC_SYSCONFIG 0x0010 +#define DISPC_SYSSTATUS 0x0014 +#define DISPC_IRQSTATUS 0x0018 +#define DISPC_IRQENABLE 0x001C +#define DISPC_CONTROL 0x0040 +#define DISPC_CONFIG 0x0044 +#define DISPC_CAPABLE 0x0048 +#define DISPC_LINE_STATUS 0x005C +#define DISPC_LINE_NUMBER 0x0060 +#define DISPC_GLOBAL_ALPHA 0x0074 +#define DISPC_CONTROL2 0x0238 +#define DISPC_CONFIG2 0x0620 +#define DISPC_DIVISOR 0x0804 +#define DISPC_GLOBAL_BUFFER 0x0800 +#define DISPC_CONTROL3 0x0848 +#define DISPC_CONFIG3 0x084C +#define DISPC_MSTANDBY_CTRL 0x0858 +#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C + +/* DISPC overlay registers */ +#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA0_OFFSET(n)) +#define DISPC_OVL_BA1(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA1_OFFSET(n)) +#define DISPC_OVL_BA0_UV(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA0_UV_OFFSET(n)) +#define DISPC_OVL_BA1_UV(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA1_UV_OFFSET(n)) +#define DISPC_OVL_POSITION(n) (DISPC_OVL_BASE(n) + \ + DISPC_POS_OFFSET(n)) +#define DISPC_OVL_SIZE(n) (DISPC_OVL_BASE(n) + \ + DISPC_SIZE_OFFSET(n)) +#define DISPC_OVL_ATTRIBUTES(n) (DISPC_OVL_BASE(n) + \ + DISPC_ATTR_OFFSET(n)) +#define DISPC_OVL_ATTRIBUTES2(n) (DISPC_OVL_BASE(n) + \ + DISPC_ATTR2_OFFSET(n)) +#define DISPC_OVL_FIFO_THRESHOLD(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIFO_THRESH_OFFSET(n)) +#define DISPC_OVL_FIFO_SIZE_STATUS(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIFO_SIZE_STATUS_OFFSET(n)) +#define DISPC_OVL_ROW_INC(n) (DISPC_OVL_BASE(n) + \ + DISPC_ROW_INC_OFFSET(n)) +#define DISPC_OVL_PIXEL_INC(n) (DISPC_OVL_BASE(n) + \ + DISPC_PIX_INC_OFFSET(n)) +#define DISPC_OVL_WINDOW_SKIP(n) (DISPC_OVL_BASE(n) + \ + DISPC_WINDOW_SKIP_OFFSET(n)) +#define DISPC_OVL_TABLE_BA(n) (DISPC_OVL_BASE(n) + \ + DISPC_TABLE_BA_OFFSET(n)) +#define DISPC_OVL_FIR(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_OFFSET(n)) +#define DISPC_OVL_FIR2(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIR2_OFFSET(n)) +#define DISPC_OVL_PICTURE_SIZE(n) (DISPC_OVL_BASE(n) + \ + DISPC_PIC_SIZE_OFFSET(n)) +#define DISPC_OVL_ACCU0(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU0_OFFSET(n)) +#define DISPC_OVL_ACCU1(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU1_OFFSET(n)) +#define DISPC_OVL_ACCU2_0(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU2_0_OFFSET(n)) +#define DISPC_OVL_ACCU2_1(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU2_1_OFFSET(n)) +#define DISPC_OVL_FIR_COEF_H(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_H_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_HV(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_HV_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_H2(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_H2_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_HV2(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_HV2_OFFSET(n, i)) +#define DISPC_OVL_CONV_COEF(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_CONV_COEF_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_V(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_V_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_V2(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_V2_OFFSET(n, i)) +#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \ + DISPC_PRELOAD_OFFSET(n)) +#define DISPC_OVL_MFLAG_THRESHOLD(n) DISPC_MFLAG_THRESHOLD_OFFSET(n) + +/* DISPC up/downsampling FIR filter coefficient structure */ +struct dispc_coef { + s8 hc4_vc22; + s8 hc3_vc2; + u8 hc2_vc1; + s8 hc1_vc0; + s8 hc0_vc00; +}; + +const struct dispc_coef *dispc_ovl_get_scale_coef(int inc, int five_taps); + +/* DISPC manager/channel specific registers */ +static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x004C; + case OMAP_DSS_CHANNEL_DIGIT: + return 0x0050; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03AC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0814; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0054; + case OMAP_DSS_CHANNEL_DIGIT: + return 0x0058; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03B0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0818; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_TIMING_H(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0064; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x0400; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0840; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_TIMING_V(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0068; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x0404; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0844; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_POL_FREQ(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x006C; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x0408; + case OMAP_DSS_CHANNEL_LCD3: + return 0x083C; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_DIVISORo(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0070; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x040C; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0838; + default: + BUG(); + return 0; + } +} + +/* Named as DISPC_SIZE_LCD, DISPC_SIZE_DIGIT and DISPC_SIZE_LCD2 in TRM */ +static inline u16 DISPC_SIZE_MGR(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x007C; + case OMAP_DSS_CHANNEL_DIGIT: + return 0x0078; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03CC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0834; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x01D4; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03C0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0828; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x01D8; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03C4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x082C; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x01DC; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03C8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0830; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0220; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03BC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0824; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0224; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03B8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0820; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0228; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03B4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x081C; + default: + BUG(); + return 0; + } +} + +/* DISPC overlay register base addresses */ +static inline u16 DISPC_OVL_BASE(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0080; + case OMAP_DSS_VIDEO1: + return 0x00BC; + case OMAP_DSS_VIDEO2: + return 0x014C; + case OMAP_DSS_VIDEO3: + return 0x0300; + case OMAP_DSS_WB: + return 0x0500; + default: + BUG(); + return 0; + } +} + +/* DISPC overlay register offsets */ +static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0000; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0008; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0004; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x000C; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0544; + case OMAP_DSS_VIDEO2: + return 0x04BC; + case OMAP_DSS_VIDEO3: + return 0x0310; + case OMAP_DSS_WB: + return 0x0118; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0548; + case OMAP_DSS_VIDEO2: + return 0x04C0; + case OMAP_DSS_VIDEO3: + return 0x0314; + case OMAP_DSS_WB: + return 0x011C; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_POS_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0008; + case OMAP_DSS_VIDEO3: + return 0x009C; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x000C; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x00A8; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0020; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0010; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0070; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0568; + case OMAP_DSS_VIDEO2: + return 0x04DC; + case OMAP_DSS_VIDEO3: + return 0x032C; + case OMAP_DSS_WB: + return 0x0310; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0024; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0014; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x008C; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0028; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0018; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0088; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x002C; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x001C; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x00A4; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0030; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0020; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0098; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0034; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + case OMAP_DSS_VIDEO3: + BUG(); + return 0; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0038; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + case OMAP_DSS_VIDEO3: + BUG(); + return 0; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0024; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0090; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0580; + case OMAP_DSS_VIDEO2: + return 0x055C; + case OMAP_DSS_VIDEO3: + return 0x0424; + case OMAP_DSS_WB: + return 0x290; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0028; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0094; + default: + BUG(); + return 0; + } +} + + +static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x002C; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0000; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0584; + case OMAP_DSS_VIDEO2: + return 0x0560; + case OMAP_DSS_VIDEO3: + return 0x0428; + case OMAP_DSS_WB: + return 0x0294; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0030; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0004; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0588; + case OMAP_DSS_VIDEO2: + return 0x0564; + case OMAP_DSS_VIDEO3: + return 0x042C; + case OMAP_DSS_WB: + return 0x0298; + default: + BUG(); + return 0; + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0034 + i * 0x8; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0010 + i * 0x8; + default: + BUG(); + return 0; + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x058C + i * 0x8; + case OMAP_DSS_VIDEO2: + return 0x0568 + i * 0x8; + case OMAP_DSS_VIDEO3: + return 0x0430 + i * 0x8; + case OMAP_DSS_WB: + return 0x02A0 + i * 0x8; + default: + BUG(); + return 0; + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0038 + i * 0x8; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0014 + i * 0x8; + default: + BUG(); + return 0; + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0590 + i * 8; + case OMAP_DSS_VIDEO2: + return 0x056C + i * 0x8; + case OMAP_DSS_VIDEO3: + return 0x0434 + i * 0x8; + case OMAP_DSS_WB: + return 0x02A4 + i * 0x8; + default: + BUG(); + return 0; + } +} + +/* coef index i = {0, 1, 2, 3, 4,} */ +static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0074 + i * 0x4; + default: + BUG(); + return 0; + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x0124 + i * 0x4; + case OMAP_DSS_VIDEO2: + return 0x00B4 + i * 0x4; + case OMAP_DSS_VIDEO3: + case OMAP_DSS_WB: + return 0x0050 + i * 0x4; + default: + BUG(); + return 0; + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + return 0; + case OMAP_DSS_VIDEO1: + return 0x05CC + i * 0x4; + case OMAP_DSS_VIDEO2: + return 0x05A8 + i * 0x4; + case OMAP_DSS_VIDEO3: + return 0x0470 + i * 0x4; + case OMAP_DSS_WB: + return 0x02E0 + i * 0x4; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x01AC; + case OMAP_DSS_VIDEO1: + return 0x0174; + case OMAP_DSS_VIDEO2: + return 0x00E8; + case OMAP_DSS_VIDEO3: + return 0x00A0; + default: + BUG(); + return 0; + } +} + +static inline u16 DISPC_MFLAG_THRESHOLD_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0860; + case OMAP_DSS_VIDEO1: + return 0x0864; + case OMAP_DSS_VIDEO2: + return 0x0868; + case OMAP_DSS_VIDEO3: + return 0x086c; + case OMAP_DSS_WB: + return 0x0870; + default: + BUG(); + return 0; + } +} +#endif diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c new file mode 100644 index 000000000000..038c15b04215 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c @@ -0,0 +1,325 @@ +/* + * linux/drivers/video/omap2/dss/dispc_coefs.c + * + * Copyright (C) 2011 Texas Instruments + * Author: Chandrabhanu Mahapatra <cmahapatra@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <video/omapdss.h> + +#include "dispc.h" + +static const struct dispc_coef coef3_M8[8] = { + { 0, 0, 128, 0, 0 }, + { 0, -4, 123, 9, 0 }, + { 0, -4, 108, 24, 0 }, + { 0, -2, 87, 43, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 43, 87, -2, 0 }, + { 0, 24, 108, -4, 0 }, + { 0, 9, 123, -4, 0 }, +}; + +static const struct dispc_coef coef3_M9[8] = { + { 0, 6, 116, 6, 0 }, + { 0, 0, 112, 16, 0 }, + { 0, -2, 100, 30, 0 }, + { 0, -2, 83, 47, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 47, 83, -2, 0 }, + { 0, 30, 100, -2, 0 }, + { 0, 16, 112, 0, 0 }, +}; + +static const struct dispc_coef coef3_M10[8] = { + { 0, 10, 108, 10, 0 }, + { 0, 3, 104, 21, 0 }, + { 0, 0, 94, 34, 0 }, + { 0, -1, 80, 49, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 49, 80, -1, 0 }, + { 0, 34, 94, 0, 0 }, + { 0, 21, 104, 3, 0 }, +}; + +static const struct dispc_coef coef3_M11[8] = { + { 0, 14, 100, 14, 0 }, + { 0, 6, 98, 24, 0 }, + { 0, 2, 90, 36, 0 }, + { 0, 0, 78, 50, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 50, 78, 0, 0 }, + { 0, 36, 90, 2, 0 }, + { 0, 24, 98, 6, 0 }, +}; + +static const struct dispc_coef coef3_M12[8] = { + { 0, 16, 96, 16, 0 }, + { 0, 9, 93, 26, 0 }, + { 0, 4, 86, 38, 0 }, + { 0, 1, 76, 51, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 51, 76, 1, 0 }, + { 0, 38, 86, 4, 0 }, + { 0, 26, 93, 9, 0 }, +}; + +static const struct dispc_coef coef3_M13[8] = { + { 0, 18, 92, 18, 0 }, + { 0, 10, 90, 28, 0 }, + { 0, 5, 83, 40, 0 }, + { 0, 1, 75, 52, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 52, 75, 1, 0 }, + { 0, 40, 83, 5, 0 }, + { 0, 28, 90, 10, 0 }, +}; + +static const struct dispc_coef coef3_M14[8] = { + { 0, 20, 88, 20, 0 }, + { 0, 12, 86, 30, 0 }, + { 0, 6, 81, 41, 0 }, + { 0, 2, 74, 52, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 52, 74, 2, 0 }, + { 0, 41, 81, 6, 0 }, + { 0, 30, 86, 12, 0 }, +}; + +static const struct dispc_coef coef3_M16[8] = { + { 0, 22, 84, 22, 0 }, + { 0, 14, 82, 32, 0 }, + { 0, 8, 78, 42, 0 }, + { 0, 3, 72, 53, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 53, 72, 3, 0 }, + { 0, 42, 78, 8, 0 }, + { 0, 32, 82, 14, 0 }, +}; + +static const struct dispc_coef coef3_M19[8] = { + { 0, 24, 80, 24, 0 }, + { 0, 16, 79, 33, 0 }, + { 0, 9, 76, 43, 0 }, + { 0, 4, 70, 54, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 54, 70, 4, 0 }, + { 0, 43, 76, 9, 0 }, + { 0, 33, 79, 16, 0 }, +}; + +static const struct dispc_coef coef3_M22[8] = { + { 0, 25, 78, 25, 0 }, + { 0, 17, 77, 34, 0 }, + { 0, 10, 74, 44, 0 }, + { 0, 5, 69, 54, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 54, 69, 5, 0 }, + { 0, 44, 74, 10, 0 }, + { 0, 34, 77, 17, 0 }, +}; + +static const struct dispc_coef coef3_M26[8] = { + { 0, 26, 76, 26, 0 }, + { 0, 19, 74, 35, 0 }, + { 0, 11, 72, 45, 0 }, + { 0, 5, 69, 54, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 54, 69, 5, 0 }, + { 0, 45, 72, 11, 0 }, + { 0, 35, 74, 19, 0 }, +}; + +static const struct dispc_coef coef3_M32[8] = { + { 0, 27, 74, 27, 0 }, + { 0, 19, 73, 36, 0 }, + { 0, 12, 71, 45, 0 }, + { 0, 6, 68, 54, 0 }, + { 0, 64, 64, 0, 0 }, + { 0, 54, 68, 6, 0 }, + { 0, 45, 71, 12, 0 }, + { 0, 36, 73, 19, 0 }, +}; + +static const struct dispc_coef coef5_M8[8] = { + { 0, 0, 128, 0, 0 }, + { -2, 14, 125, -10, 1 }, + { -6, 33, 114, -15, 2 }, + { -10, 55, 98, -16, 1 }, + { 0, -14, 78, 78, -14 }, + { 1, -16, 98, 55, -10 }, + { 2, -15, 114, 33, -6 }, + { 1, -10, 125, 14, -2 }, +}; + +static const struct dispc_coef coef5_M9[8] = { + { -3, 10, 114, 10, -3 }, + { -6, 24, 111, 0, -1 }, + { -8, 40, 103, -7, 0 }, + { -11, 58, 91, -11, 1 }, + { 0, -12, 76, 76, -12 }, + { 1, -11, 91, 58, -11 }, + { 0, -7, 103, 40, -8 }, + { -1, 0, 111, 24, -6 }, +}; + +static const struct dispc_coef coef5_M10[8] = { + { -4, 18, 100, 18, -4 }, + { -6, 30, 99, 8, -3 }, + { -8, 44, 93, 0, -1 }, + { -9, 58, 84, -5, 0 }, + { 0, -8, 72, 72, -8 }, + { 0, -5, 84, 58, -9 }, + { -1, 0, 93, 44, -8 }, + { -3, 8, 99, 30, -6 }, +}; + +static const struct dispc_coef coef5_M11[8] = { + { -5, 23, 92, 23, -5 }, + { -6, 34, 90, 13, -3 }, + { -6, 45, 85, 6, -2 }, + { -6, 57, 78, 0, -1 }, + { 0, -4, 68, 68, -4 }, + { -1, 0, 78, 57, -6 }, + { -2, 6, 85, 45, -6 }, + { -3, 13, 90, 34, -6 }, +}; + +static const struct dispc_coef coef5_M12[8] = { + { -4, 26, 84, 26, -4 }, + { -5, 36, 82, 18, -3 }, + { -4, 46, 78, 10, -2 }, + { -3, 55, 72, 5, -1 }, + { 0, 0, 64, 64, 0 }, + { -1, 5, 72, 55, -3 }, + { -2, 10, 78, 46, -4 }, + { -3, 18, 82, 36, -5 }, +}; + +static const struct dispc_coef coef5_M13[8] = { + { -3, 28, 78, 28, -3 }, + { -3, 37, 76, 21, -3 }, + { -2, 45, 73, 14, -2 }, + { 0, 53, 68, 8, -1 }, + { 0, 3, 61, 61, 3 }, + { -1, 8, 68, 53, 0 }, + { -2, 14, 73, 45, -2 }, + { -3, 21, 76, 37, -3 }, +}; + +static const struct dispc_coef coef5_M14[8] = { + { -2, 30, 72, 30, -2 }, + { -1, 37, 71, 23, -2 }, + { 0, 45, 69, 16, -2 }, + { 3, 52, 64, 10, -1 }, + { 0, 6, 58, 58, 6 }, + { -1, 10, 64, 52, 3 }, + { -2, 16, 69, 45, 0 }, + { -2, 23, 71, 37, -1 }, +}; + +static const struct dispc_coef coef5_M16[8] = { + { 0, 31, 66, 31, 0 }, + { 1, 38, 65, 25, -1 }, + { 3, 44, 62, 20, -1 }, + { 6, 49, 59, 14, 0 }, + { 0, 10, 54, 54, 10 }, + { 0, 14, 59, 49, 6 }, + { -1, 20, 62, 44, 3 }, + { -1, 25, 65, 38, 1 }, +}; + +static const struct dispc_coef coef5_M19[8] = { + { 3, 32, 58, 32, 3 }, + { 4, 38, 58, 27, 1 }, + { 7, 42, 55, 23, 1 }, + { 10, 46, 54, 18, 0 }, + { 0, 14, 50, 50, 14 }, + { 0, 18, 54, 46, 10 }, + { 1, 23, 55, 42, 7 }, + { 1, 27, 58, 38, 4 }, +}; + +static const struct dispc_coef coef5_M22[8] = { + { 4, 33, 54, 33, 4 }, + { 6, 37, 54, 28, 3 }, + { 9, 41, 53, 24, 1 }, + { 12, 45, 51, 20, 0 }, + { 0, 16, 48, 48, 16 }, + { 0, 20, 51, 45, 12 }, + { 1, 24, 53, 41, 9 }, + { 3, 28, 54, 37, 6 }, +}; + +static const struct dispc_coef coef5_M26[8] = { + { 6, 33, 50, 33, 6 }, + { 8, 36, 51, 29, 4 }, + { 11, 40, 50, 25, 2 }, + { 14, 43, 48, 22, 1 }, + { 0, 18, 46, 46, 18 }, + { 1, 22, 48, 43, 14 }, + { 2, 25, 50, 40, 11 }, + { 4, 29, 51, 36, 8 }, +}; + +static const struct dispc_coef coef5_M32[8] = { + { 7, 33, 48, 33, 7 }, + { 10, 36, 48, 29, 5 }, + { 13, 39, 47, 26, 3 }, + { 16, 42, 46, 23, 1 }, + { 0, 19, 45, 45, 19 }, + { 1, 23, 46, 42, 16 }, + { 3, 26, 47, 39, 13 }, + { 5, 29, 48, 36, 10 }, +}; + +const struct dispc_coef *dispc_ovl_get_scale_coef(int inc, int five_taps) +{ + int i; + static const struct { + int Mmin; + int Mmax; + const struct dispc_coef *coef_3; + const struct dispc_coef *coef_5; + } coefs[] = { + { 27, 32, coef3_M32, coef5_M32 }, + { 23, 26, coef3_M26, coef5_M26 }, + { 20, 22, coef3_M22, coef5_M22 }, + { 17, 19, coef3_M19, coef5_M19 }, + { 15, 16, coef3_M16, coef5_M16 }, + { 14, 14, coef3_M14, coef5_M14 }, + { 13, 13, coef3_M13, coef5_M13 }, + { 12, 12, coef3_M12, coef5_M12 }, + { 11, 11, coef3_M11, coef5_M11 }, + { 10, 10, coef3_M10, coef5_M10 }, + { 9, 9, coef3_M9, coef5_M9 }, + { 4, 8, coef3_M8, coef5_M8 }, + /* + * When upscaling more than two times, blockiness and outlines + * around the image are observed when M8 tables are used. M11, + * M16 and M19 tables are used to prevent this. + */ + { 3, 3, coef3_M11, coef5_M11 }, + { 2, 2, coef3_M16, coef5_M16 }, + { 0, 1, coef3_M19, coef5_M19 }, + }; + + inc /= 128; + for (i = 0; i < ARRAY_SIZE(coefs); ++i) + if (inc >= coefs[i].Mmin && inc <= coefs[i].Mmax) + return five_taps ? coefs[i].coef_5 : coefs[i].coef_3; + return NULL; +} diff --git a/drivers/gpu/drm/omapdrm/dss/display-sysfs.c b/drivers/gpu/drm/omapdrm/dss/display-sysfs.c new file mode 100644 index 000000000000..6ad0991f8259 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/display-sysfs.c @@ -0,0 +1,356 @@ +/* + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "DISPLAY" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sysfs.h> + +#include <video/omapdss.h> +#include "dss.h" + +static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", + dssdev->name ? + dssdev->name : ""); +} + +static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + omapdss_device_is_enabled(dssdev)); +} + +static ssize_t display_enabled_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) +{ + int r; + bool enable; + + r = strtobool(buf, &enable); + if (r) + return r; + + if (enable == omapdss_device_is_enabled(dssdev)) + return size; + + if (omapdss_device_is_connected(dssdev) == false) + return -ENODEV; + + if (enable) { + r = dssdev->driver->enable(dssdev); + if (r) + return r; + } else { + dssdev->driver->disable(dssdev); + } + + return size; +} + +static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + dssdev->driver->get_te ? + dssdev->driver->get_te(dssdev) : 0); +} + +static ssize_t display_tear_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) +{ + int r; + bool te; + + if (!dssdev->driver->enable_te || !dssdev->driver->get_te) + return -ENOENT; + + r = strtobool(buf, &te); + if (r) + return r; + + r = dssdev->driver->enable_te(dssdev, te); + if (r) + return r; + + return size; +} + +static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) +{ + struct omap_video_timings t; + + if (!dssdev->driver->get_timings) + return -ENOENT; + + dssdev->driver->get_timings(dssdev, &t); + + return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n", + t.pixelclock, + t.x_res, t.hfp, t.hbp, t.hsw, + t.y_res, t.vfp, t.vbp, t.vsw); +} + +static ssize_t display_timings_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) +{ + struct omap_video_timings t = dssdev->panel.timings; + int r, found; + + if (!dssdev->driver->set_timings || !dssdev->driver->check_timings) + return -ENOENT; + + found = 0; +#ifdef CONFIG_OMAP2_DSS_VENC + if (strncmp("pal", buf, 3) == 0) { + t = omap_dss_pal_timings; + found = 1; + } else if (strncmp("ntsc", buf, 4) == 0) { + t = omap_dss_ntsc_timings; + found = 1; + } +#endif + if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu", + &t.pixelclock, + &t.x_res, &t.hfp, &t.hbp, &t.hsw, + &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9) + return -EINVAL; + + r = dssdev->driver->check_timings(dssdev, &t); + if (r) + return r; + + dssdev->driver->disable(dssdev); + dssdev->driver->set_timings(dssdev, &t); + r = dssdev->driver->enable(dssdev); + if (r) + return r; + + return size; +} + +static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) +{ + int rotate; + if (!dssdev->driver->get_rotate) + return -ENOENT; + rotate = dssdev->driver->get_rotate(dssdev); + return snprintf(buf, PAGE_SIZE, "%u\n", rotate); +} + +static ssize_t display_rotate_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) +{ + int rot, r; + + if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) + return -ENOENT; + + r = kstrtoint(buf, 0, &rot); + if (r) + return r; + + r = dssdev->driver->set_rotate(dssdev, rot); + if (r) + return r; + + return size; +} + +static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) +{ + int mirror; + if (!dssdev->driver->get_mirror) + return -ENOENT; + mirror = dssdev->driver->get_mirror(dssdev); + return snprintf(buf, PAGE_SIZE, "%u\n", mirror); +} + +static ssize_t display_mirror_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) +{ + int r; + bool mirror; + + if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror) + return -ENOENT; + + r = strtobool(buf, &mirror); + if (r) + return r; + + r = dssdev->driver->set_mirror(dssdev, mirror); + if (r) + return r; + + return size; +} + +static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) +{ + unsigned int wss; + + if (!dssdev->driver->get_wss) + return -ENOENT; + + wss = dssdev->driver->get_wss(dssdev); + + return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); +} + +static ssize_t display_wss_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) +{ + u32 wss; + int r; + + if (!dssdev->driver->get_wss || !dssdev->driver->set_wss) + return -ENOENT; + + r = kstrtou32(buf, 0, &wss); + if (r) + return r; + + if (wss > 0xfffff) + return -EINVAL; + + r = dssdev->driver->set_wss(dssdev, wss); + if (r) + return r; + + return size; +} + +struct display_attribute { + struct attribute attr; + ssize_t (*show)(struct omap_dss_device *, char *); + ssize_t (*store)(struct omap_dss_device *, const char *, size_t); +}; + +#define DISPLAY_ATTR(_name, _mode, _show, _store) \ + struct display_attribute display_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); +static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); +static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, + display_enabled_show, display_enabled_store); +static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, + display_tear_show, display_tear_store); +static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, + display_timings_show, display_timings_store); +static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, + display_rotate_show, display_rotate_store); +static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, + display_mirror_show, display_mirror_store); +static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, + display_wss_show, display_wss_store); + +static struct attribute *display_sysfs_attrs[] = { + &display_attr_name.attr, + &display_attr_display_name.attr, + &display_attr_enabled.attr, + &display_attr_tear_elim.attr, + &display_attr_timings.attr, + &display_attr_rotate.attr, + &display_attr_mirror.attr, + &display_attr_wss.attr, + NULL +}; + +static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct omap_dss_device *dssdev; + struct display_attribute *display_attr; + + dssdev = container_of(kobj, struct omap_dss_device, kobj); + display_attr = container_of(attr, struct display_attribute, attr); + + if (!display_attr->show) + return -ENOENT; + + return display_attr->show(dssdev, buf); +} + +static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t size) +{ + struct omap_dss_device *dssdev; + struct display_attribute *display_attr; + + dssdev = container_of(kobj, struct omap_dss_device, kobj); + display_attr = container_of(attr, struct display_attribute, attr); + + if (!display_attr->store) + return -ENOENT; + + return display_attr->store(dssdev, buf, size); +} + +static const struct sysfs_ops display_sysfs_ops = { + .show = display_attr_show, + .store = display_attr_store, +}; + +static struct kobj_type display_ktype = { + .sysfs_ops = &display_sysfs_ops, + .default_attrs = display_sysfs_attrs, +}; + +int display_init_sysfs(struct platform_device *pdev) +{ + struct omap_dss_device *dssdev = NULL; + int r; + + for_each_dss_dev(dssdev) { + r = kobject_init_and_add(&dssdev->kobj, &display_ktype, + &pdev->dev.kobj, "%s", dssdev->alias); + if (r) { + DSSERR("failed to create sysfs files\n"); + omap_dss_put_device(dssdev); + goto err; + } + } + + return 0; + +err: + display_uninit_sysfs(pdev); + + return r; +} + +void display_uninit_sysfs(struct platform_device *pdev) +{ + struct omap_dss_device *dssdev = NULL; + + for_each_dss_dev(dssdev) { + if (kobject_name(&dssdev->kobj) == NULL) + continue; + + kobject_del(&dssdev->kobj); + kobject_put(&dssdev->kobj); + + memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); + } +} diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c new file mode 100644 index 000000000000..ef5b9027985d --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -0,0 +1,338 @@ +/* + * linux/drivers/video/omap2/dss/display.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "DISPLAY" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/jiffies.h> +#include <linux/platform_device.h> +#include <linux/of.h> + +#include <video/omapdss.h> +#include "dss.h" +#include "dss_features.h" + +void omapdss_default_get_resolution(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres) +{ + *xres = dssdev->panel.timings.x_res; + *yres = dssdev->panel.timings.y_res; +} +EXPORT_SYMBOL(omapdss_default_get_resolution); + +int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev) +{ + switch (dssdev->type) { + case OMAP_DISPLAY_TYPE_DPI: + if (dssdev->phy.dpi.data_lines == 24) + return 24; + else + return 16; + + case OMAP_DISPLAY_TYPE_DBI: + if (dssdev->ctrl.pixel_size == 24) + return 24; + else + return 16; + case OMAP_DISPLAY_TYPE_DSI: + if (dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) > 16) + return 24; + else + return 16; + case OMAP_DISPLAY_TYPE_VENC: + case OMAP_DISPLAY_TYPE_SDI: + case OMAP_DISPLAY_TYPE_HDMI: + case OMAP_DISPLAY_TYPE_DVI: + return 24; + default: + BUG(); + return 0; + } +} +EXPORT_SYMBOL(omapdss_default_get_recommended_bpp); + +void omapdss_default_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + *timings = dssdev->panel.timings; +} +EXPORT_SYMBOL(omapdss_default_get_timings); + +int dss_suspend_all_devices(void) +{ + struct omap_dss_device *dssdev = NULL; + + for_each_dss_dev(dssdev) { + if (!dssdev->driver) + continue; + + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { + dssdev->driver->disable(dssdev); + dssdev->activate_after_resume = true; + } else { + dssdev->activate_after_resume = false; + } + } + + return 0; +} + +int dss_resume_all_devices(void) +{ + struct omap_dss_device *dssdev = NULL; + + for_each_dss_dev(dssdev) { + if (!dssdev->driver) + continue; + + if (dssdev->activate_after_resume) { + dssdev->driver->enable(dssdev); + dssdev->activate_after_resume = false; + } + } + + return 0; +} + +void dss_disable_all_devices(void) +{ + struct omap_dss_device *dssdev = NULL; + + for_each_dss_dev(dssdev) { + if (!dssdev->driver) + continue; + + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) + dssdev->driver->disable(dssdev); + } +} + +static LIST_HEAD(panel_list); +static DEFINE_MUTEX(panel_list_mutex); +static int disp_num_counter; + +int omapdss_register_display(struct omap_dss_device *dssdev) +{ + struct omap_dss_driver *drv = dssdev->driver; + int id; + + /* + * Note: this presumes all the displays are either using DT or non-DT, + * which normally should be the case. This also presumes that all + * displays either have an DT alias, or none has. + */ + + if (dssdev->dev->of_node) { + id = of_alias_get_id(dssdev->dev->of_node, "display"); + + if (id < 0) + id = disp_num_counter++; + } else { + id = disp_num_counter++; + } + + snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id); + + /* Use 'label' property for name, if it exists */ + if (dssdev->dev->of_node) + of_property_read_string(dssdev->dev->of_node, "label", + &dssdev->name); + + if (dssdev->name == NULL) + dssdev->name = dssdev->alias; + + if (drv && drv->get_resolution == NULL) + drv->get_resolution = omapdss_default_get_resolution; + if (drv && drv->get_recommended_bpp == NULL) + drv->get_recommended_bpp = omapdss_default_get_recommended_bpp; + if (drv && drv->get_timings == NULL) + drv->get_timings = omapdss_default_get_timings; + + mutex_lock(&panel_list_mutex); + list_add_tail(&dssdev->panel_list, &panel_list); + mutex_unlock(&panel_list_mutex); + return 0; +} +EXPORT_SYMBOL(omapdss_register_display); + +void omapdss_unregister_display(struct omap_dss_device *dssdev) +{ + mutex_lock(&panel_list_mutex); + list_del(&dssdev->panel_list); + mutex_unlock(&panel_list_mutex); +} +EXPORT_SYMBOL(omapdss_unregister_display); + +struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev) +{ + if (!try_module_get(dssdev->owner)) + return NULL; + + if (get_device(dssdev->dev) == NULL) { + module_put(dssdev->owner); + return NULL; + } + + return dssdev; +} +EXPORT_SYMBOL(omap_dss_get_device); + +void omap_dss_put_device(struct omap_dss_device *dssdev) +{ + put_device(dssdev->dev); + module_put(dssdev->owner); +} +EXPORT_SYMBOL(omap_dss_put_device); + +/* + * ref count of the found device is incremented. + * ref count of from-device is decremented. + */ +struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from) +{ + struct list_head *l; + struct omap_dss_device *dssdev; + + mutex_lock(&panel_list_mutex); + + if (list_empty(&panel_list)) { + dssdev = NULL; + goto out; + } + + if (from == NULL) { + dssdev = list_first_entry(&panel_list, struct omap_dss_device, + panel_list); + omap_dss_get_device(dssdev); + goto out; + } + + omap_dss_put_device(from); + + list_for_each(l, &panel_list) { + dssdev = list_entry(l, struct omap_dss_device, panel_list); + if (dssdev == from) { + if (list_is_last(l, &panel_list)) { + dssdev = NULL; + goto out; + } + + dssdev = list_entry(l->next, struct omap_dss_device, + panel_list); + omap_dss_get_device(dssdev); + goto out; + } + } + + WARN(1, "'from' dssdev not found\n"); + + dssdev = NULL; +out: + mutex_unlock(&panel_list_mutex); + return dssdev; +} +EXPORT_SYMBOL(omap_dss_get_next_device); + +struct omap_dss_device *omap_dss_find_device(void *data, + int (*match)(struct omap_dss_device *dssdev, void *data)) +{ + struct omap_dss_device *dssdev = NULL; + + while ((dssdev = omap_dss_get_next_device(dssdev)) != NULL) { + if (match(dssdev, data)) + return dssdev; + } + + return NULL; +} +EXPORT_SYMBOL(omap_dss_find_device); + +void videomode_to_omap_video_timings(const struct videomode *vm, + struct omap_video_timings *ovt) +{ + memset(ovt, 0, sizeof(*ovt)); + + ovt->pixelclock = vm->pixelclock; + ovt->x_res = vm->hactive; + ovt->hbp = vm->hback_porch; + ovt->hfp = vm->hfront_porch; + ovt->hsw = vm->hsync_len; + ovt->y_res = vm->vactive; + ovt->vbp = vm->vback_porch; + ovt->vfp = vm->vfront_porch; + ovt->vsw = vm->vsync_len; + + ovt->vsync_level = vm->flags & DISPLAY_FLAGS_VSYNC_HIGH ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + ovt->hsync_level = vm->flags & DISPLAY_FLAGS_HSYNC_HIGH ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + ovt->de_level = vm->flags & DISPLAY_FLAGS_DE_HIGH ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + ovt->data_pclk_edge = vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE ? + OMAPDSS_DRIVE_SIG_RISING_EDGE : + OMAPDSS_DRIVE_SIG_FALLING_EDGE; + + ovt->sync_pclk_edge = ovt->data_pclk_edge; +} +EXPORT_SYMBOL(videomode_to_omap_video_timings); + +void omap_video_timings_to_videomode(const struct omap_video_timings *ovt, + struct videomode *vm) +{ + memset(vm, 0, sizeof(*vm)); + + vm->pixelclock = ovt->pixelclock; + + vm->hactive = ovt->x_res; + vm->hback_porch = ovt->hbp; + vm->hfront_porch = ovt->hfp; + vm->hsync_len = ovt->hsw; + vm->vactive = ovt->y_res; + vm->vback_porch = ovt->vbp; + vm->vfront_porch = ovt->vfp; + vm->vsync_len = ovt->vsw; + + if (ovt->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH) + vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH; + else + vm->flags |= DISPLAY_FLAGS_HSYNC_LOW; + + if (ovt->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH) + vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH; + else + vm->flags |= DISPLAY_FLAGS_VSYNC_LOW; + + if (ovt->de_level == OMAPDSS_SIG_ACTIVE_HIGH) + vm->flags |= DISPLAY_FLAGS_DE_HIGH; + else + vm->flags |= DISPLAY_FLAGS_DE_LOW; + + if (ovt->data_pclk_edge == OMAPDSS_DRIVE_SIG_RISING_EDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + else + vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; +} +EXPORT_SYMBOL(omap_video_timings_to_videomode); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c new file mode 100644 index 000000000000..7953e6a52346 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -0,0 +1,899 @@ +/* + * linux/drivers/video/omap2/dss/dpi.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "DPI" + +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/string.h> +#include <linux/of.h> +#include <linux/clk.h> +#include <linux/component.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +#define HSDIV_DISPC 0 + +struct dpi_data { + struct platform_device *pdev; + + struct regulator *vdds_dsi_reg; + struct dss_pll *pll; + + struct mutex lock; + + struct omap_video_timings timings; + struct dss_lcd_mgr_config mgr_config; + int data_lines; + + struct omap_dss_device output; + + bool port_initialized; +}; + +static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev) +{ + return container_of(dssdev, struct dpi_data, output); +} + +/* only used in non-DT mode */ +static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static struct dss_pll *dpi_get_pll(enum omap_channel channel) +{ + /* + * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL + * would also be used for DISPC fclk. Meaning, when the DPI output is + * disabled, DISPC clock will be disabled, and TV out will stop. + */ + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP24xx: + case OMAPDSS_VER_OMAP34xx_ES1: + case OMAPDSS_VER_OMAP34xx_ES3: + case OMAPDSS_VER_OMAP3630: + case OMAPDSS_VER_AM35xx: + case OMAPDSS_VER_AM43xx: + return NULL; + + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return dss_pll_find("dsi0"); + case OMAP_DSS_CHANNEL_LCD2: + return dss_pll_find("dsi1"); + default: + return NULL; + } + + case OMAPDSS_VER_OMAP5: + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return dss_pll_find("dsi0"); + case OMAP_DSS_CHANNEL_LCD3: + return dss_pll_find("dsi1"); + default: + return NULL; + } + + case OMAPDSS_VER_DRA7xx: + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + case OMAP_DSS_CHANNEL_LCD2: + return dss_pll_find("video0"); + case OMAP_DSS_CHANNEL_LCD3: + return dss_pll_find("video1"); + default: + return NULL; + } + + default: + return NULL; + } +} + +static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC; + case OMAP_DSS_CHANNEL_LCD2: + return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; + case OMAP_DSS_CHANNEL_LCD3: + return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; + default: + /* this shouldn't happen */ + WARN_ON(1); + return OMAP_DSS_CLK_SRC_FCK; + } +} + +struct dpi_clk_calc_ctx { + struct dss_pll *pll; + + /* inputs */ + + unsigned long pck_min, pck_max; + + /* outputs */ + + struct dss_pll_clock_info dsi_cinfo; + unsigned long fck; + struct dispc_clock_info dispc_cinfo; +}; + +static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck, + unsigned long pck, void *data) +{ + struct dpi_clk_calc_ctx *ctx = data; + + /* + * Odd dividers give us uneven duty cycle, causing problem when level + * shifted. So skip all odd dividers when the pixel clock is on the + * higher side. + */ + if (ctx->pck_min >= 100000000) { + if (lckd > 1 && lckd % 2 != 0) + return false; + + if (pckd > 1 && pckd % 2 != 0) + return false; + } + + ctx->dispc_cinfo.lck_div = lckd; + ctx->dispc_cinfo.pck_div = pckd; + ctx->dispc_cinfo.lck = lck; + ctx->dispc_cinfo.pck = pck; + + return true; +} + + +static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc, + void *data) +{ + struct dpi_clk_calc_ctx *ctx = data; + + /* + * Odd dividers give us uneven duty cycle, causing problem when level + * shifted. So skip all odd dividers when the pixel clock is on the + * higher side. + */ + if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) + return false; + + ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; + ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; + + return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, + dpi_calc_dispc_cb, ctx); +} + + +static bool dpi_calc_pll_cb(int n, int m, unsigned long fint, + unsigned long clkdco, + void *data) +{ + struct dpi_clk_calc_ctx *ctx = data; + + ctx->dsi_cinfo.n = n; + ctx->dsi_cinfo.m = m; + ctx->dsi_cinfo.fint = fint; + ctx->dsi_cinfo.clkdco = clkdco; + + return dss_pll_hsdiv_calc(ctx->pll, clkdco, + ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), + dpi_calc_hsdiv_cb, ctx); +} + +static bool dpi_calc_dss_cb(unsigned long fck, void *data) +{ + struct dpi_clk_calc_ctx *ctx = data; + + ctx->fck = fck; + + return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max, + dpi_calc_dispc_cb, ctx); +} + +static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, + struct dpi_clk_calc_ctx *ctx) +{ + unsigned long clkin; + unsigned long pll_min, pll_max; + + memset(ctx, 0, sizeof(*ctx)); + ctx->pll = dpi->pll; + ctx->pck_min = pck - 1000; + ctx->pck_max = pck + 1000; + + pll_min = 0; + pll_max = 0; + + clkin = clk_get_rate(ctx->pll->clkin); + + return dss_pll_calc(ctx->pll, clkin, + pll_min, pll_max, + dpi_calc_pll_cb, ctx); +} + +static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) +{ + int i; + + /* + * DSS fck gives us very few possibilities, so finding a good pixel + * clock may not be possible. We try multiple times to find the clock, + * each time widening the pixel clock range we look for, up to + * +/- ~15MHz. + */ + + for (i = 0; i < 25; ++i) { + bool ok; + + memset(ctx, 0, sizeof(*ctx)); + if (pck > 1000 * i * i * i) + ctx->pck_min = max(pck - 1000 * i * i * i, 0lu); + else + ctx->pck_min = 0; + ctx->pck_max = pck + 1000 * i * i * i; + + ok = dss_div_calc(pck, ctx->pck_min, dpi_calc_dss_cb, ctx); + if (ok) + return ok; + } + + return false; +} + + + +static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, + unsigned long pck_req, unsigned long *fck, int *lck_div, + int *pck_div) +{ + struct dpi_clk_calc_ctx ctx; + int r; + bool ok; + + ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); + if (!ok) + return -EINVAL; + + r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); + if (r) + return r; + + dss_select_lcd_clk_source(channel, + dpi_get_alt_clk_src(channel)); + + dpi->mgr_config.clock_info = ctx.dispc_cinfo; + + *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + *lck_div = ctx.dispc_cinfo.lck_div; + *pck_div = ctx.dispc_cinfo.pck_div; + + return 0; +} + +static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req, + unsigned long *fck, int *lck_div, int *pck_div) +{ + struct dpi_clk_calc_ctx ctx; + int r; + bool ok; + + ok = dpi_dss_clk_calc(pck_req, &ctx); + if (!ok) + return -EINVAL; + + r = dss_set_fck_rate(ctx.fck); + if (r) + return r; + + dpi->mgr_config.clock_info = ctx.dispc_cinfo; + + *fck = ctx.fck; + *lck_div = ctx.dispc_cinfo.lck_div; + *pck_div = ctx.dispc_cinfo.pck_div; + + return 0; +} + +static int dpi_set_mode(struct dpi_data *dpi) +{ + struct omap_dss_device *out = &dpi->output; + struct omap_overlay_manager *mgr = out->manager; + struct omap_video_timings *t = &dpi->timings; + int lck_div = 0, pck_div = 0; + unsigned long fck = 0; + unsigned long pck; + int r = 0; + + if (dpi->pll) + r = dpi_set_dsi_clk(dpi, mgr->id, t->pixelclock, &fck, + &lck_div, &pck_div); + else + r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck, + &lck_div, &pck_div); + if (r) + return r; + + pck = fck / lck_div / pck_div; + + if (pck != t->pixelclock) { + DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n", + t->pixelclock, pck); + + t->pixelclock = pck; + } + + dss_mgr_set_timings(mgr, t); + + return 0; +} + +static void dpi_config_lcd_manager(struct dpi_data *dpi) +{ + struct omap_dss_device *out = &dpi->output; + struct omap_overlay_manager *mgr = out->manager; + + dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + dpi->mgr_config.stallmode = false; + dpi->mgr_config.fifohandcheck = false; + + dpi->mgr_config.video_port_width = dpi->data_lines; + + dpi->mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(mgr, &dpi->mgr_config); +} + +static int dpi_display_enable(struct omap_dss_device *dssdev) +{ + struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + struct omap_dss_device *out = &dpi->output; + int r; + + mutex_lock(&dpi->lock); + + if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi->vdds_dsi_reg) { + DSSERR("no VDSS_DSI regulator\n"); + r = -ENODEV; + goto err_no_reg; + } + + if (out->manager == NULL) { + DSSERR("failed to enable display: no output/manager\n"); + r = -ENODEV; + goto err_no_out_mgr; + } + + if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) { + r = regulator_enable(dpi->vdds_dsi_reg); + if (r) + goto err_reg_enable; + } + + r = dispc_runtime_get(); + if (r) + goto err_get_dispc; + + r = dss_dpi_select_source(out->port_num, out->manager->id); + if (r) + goto err_src_sel; + + if (dpi->pll) { + r = dss_pll_enable(dpi->pll); + if (r) + goto err_dsi_pll_init; + } + + r = dpi_set_mode(dpi); + if (r) + goto err_set_mode; + + dpi_config_lcd_manager(dpi); + + mdelay(2); + + r = dss_mgr_enable(out->manager); + if (r) + goto err_mgr_enable; + + mutex_unlock(&dpi->lock); + + return 0; + +err_mgr_enable: +err_set_mode: + if (dpi->pll) + dss_pll_disable(dpi->pll); +err_dsi_pll_init: +err_src_sel: + dispc_runtime_put(); +err_get_dispc: + if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) + regulator_disable(dpi->vdds_dsi_reg); +err_reg_enable: +err_no_out_mgr: +err_no_reg: + mutex_unlock(&dpi->lock); + return r; +} + +static void dpi_display_disable(struct omap_dss_device *dssdev) +{ + struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + struct omap_overlay_manager *mgr = dpi->output.manager; + + mutex_lock(&dpi->lock); + + dss_mgr_disable(mgr); + + if (dpi->pll) { + dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK); + dss_pll_disable(dpi->pll); + } + + dispc_runtime_put(); + + if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) + regulator_disable(dpi->vdds_dsi_reg); + + mutex_unlock(&dpi->lock); +} + +static void dpi_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + + DSSDBG("dpi_set_timings\n"); + + mutex_lock(&dpi->lock); + + dpi->timings = *timings; + + mutex_unlock(&dpi->lock); +} + +static void dpi_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + + mutex_lock(&dpi->lock); + + *timings = dpi->timings; + + mutex_unlock(&dpi->lock); +} + +static int dpi_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + struct omap_overlay_manager *mgr = dpi->output.manager; + int lck_div, pck_div; + unsigned long fck; + unsigned long pck; + struct dpi_clk_calc_ctx ctx; + bool ok; + + if (mgr && !dispc_mgr_timings_ok(mgr->id, timings)) + return -EINVAL; + + if (timings->pixelclock == 0) + return -EINVAL; + + if (dpi->pll) { + ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); + if (!ok) + return -EINVAL; + + fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + } else { + ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); + if (!ok) + return -EINVAL; + + fck = ctx.fck; + } + + lck_div = ctx.dispc_cinfo.lck_div; + pck_div = ctx.dispc_cinfo.pck_div; + + pck = fck / lck_div / pck_div; + + timings->pixelclock = pck; + + return 0; +} + +static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines) +{ + struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + + mutex_lock(&dpi->lock); + + dpi->data_lines = data_lines; + + mutex_unlock(&dpi->lock); +} + +static int dpi_verify_dsi_pll(struct dss_pll *pll) +{ + int r; + + /* do initial setup with the PLL to see if it is operational */ + + r = dss_pll_enable(pll); + if (r) + return r; + + dss_pll_disable(pll); + + return 0; +} + +static int dpi_init_regulator(struct dpi_data *dpi) +{ + struct regulator *vdds_dsi; + + if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) + return 0; + + if (dpi->vdds_dsi_reg) + return 0; + + vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi"); + if (IS_ERR(vdds_dsi)) { + if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) + DSSERR("can't get VDDS_DSI regulator\n"); + return PTR_ERR(vdds_dsi); + } + + dpi->vdds_dsi_reg = vdds_dsi; + + return 0; +} + +static void dpi_init_pll(struct dpi_data *dpi) +{ + struct dss_pll *pll; + + if (dpi->pll) + return; + + pll = dpi_get_pll(dpi->output.dispc_channel); + if (!pll) + return; + + /* On DRA7 we need to set a mux to use the PLL */ + if (omapdss_get_version() == OMAPDSS_VER_DRA7xx) + dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel); + + if (dpi_verify_dsi_pll(pll)) { + DSSWARN("DSI PLL not operational\n"); + return; + } + + dpi->pll = pll; +} + +/* + * Return a hardcoded channel for the DPI output. This should work for + * current use cases, but this can be later expanded to either resolve + * the channel in some more dynamic manner, or get the channel as a user + * parameter. + */ +static enum omap_channel dpi_get_channel(int port_num) +{ + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP24xx: + case OMAPDSS_VER_OMAP34xx_ES1: + case OMAPDSS_VER_OMAP34xx_ES3: + case OMAPDSS_VER_OMAP3630: + case OMAPDSS_VER_AM35xx: + case OMAPDSS_VER_AM43xx: + return OMAP_DSS_CHANNEL_LCD; + + case OMAPDSS_VER_DRA7xx: + switch (port_num) { + case 2: + return OMAP_DSS_CHANNEL_LCD3; + case 1: + return OMAP_DSS_CHANNEL_LCD2; + case 0: + default: + return OMAP_DSS_CHANNEL_LCD; + } + + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + return OMAP_DSS_CHANNEL_LCD2; + + case OMAPDSS_VER_OMAP5: + return OMAP_DSS_CHANNEL_LCD3; + + default: + DSSWARN("unsupported DSS version\n"); + return OMAP_DSS_CHANNEL_LCD; + } +} + +static int dpi_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + struct omap_overlay_manager *mgr; + int r; + + r = dpi_init_regulator(dpi); + if (r) + return r; + + dpi_init_pll(dpi); + + mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); + if (!mgr) + return -ENODEV; + + r = dss_mgr_connect(mgr, dssdev); + if (r) + return r; + + r = omapdss_output_set_device(dssdev, dst); + if (r) { + DSSERR("failed to connect output to new device: %s\n", + dst->name); + dss_mgr_disconnect(mgr, dssdev); + return r; + } + + return 0; +} + +static void dpi_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + WARN_ON(dst != dssdev->dst); + + if (dst != dssdev->dst) + return; + + omapdss_output_unset_device(dssdev); + + if (dssdev->manager) + dss_mgr_disconnect(dssdev->manager, dssdev); +} + +static const struct omapdss_dpi_ops dpi_ops = { + .connect = dpi_connect, + .disconnect = dpi_disconnect, + + .enable = dpi_display_enable, + .disable = dpi_display_disable, + + .check_timings = dpi_check_timings, + .set_timings = dpi_set_timings, + .get_timings = dpi_get_timings, + + .set_data_lines = dpi_set_data_lines, +}; + +static void dpi_init_output(struct platform_device *pdev) +{ + struct dpi_data *dpi = dpi_get_data_from_pdev(pdev); + struct omap_dss_device *out = &dpi->output; + + out->dev = &pdev->dev; + out->id = OMAP_DSS_OUTPUT_DPI; + out->output_type = OMAP_DISPLAY_TYPE_DPI; + out->name = "dpi.0"; + out->dispc_channel = dpi_get_channel(0); + out->ops.dpi = &dpi_ops; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void dpi_uninit_output(struct platform_device *pdev) +{ + struct dpi_data *dpi = dpi_get_data_from_pdev(pdev); + struct omap_dss_device *out = &dpi->output; + + omapdss_unregister_output(out); +} + +static void dpi_init_output_port(struct platform_device *pdev, + struct device_node *port) +{ + struct dpi_data *dpi = port->data; + struct omap_dss_device *out = &dpi->output; + int r; + u32 port_num; + + r = of_property_read_u32(port, "reg", &port_num); + if (r) + port_num = 0; + + switch (port_num) { + case 2: + out->name = "dpi.2"; + break; + case 1: + out->name = "dpi.1"; + break; + case 0: + default: + out->name = "dpi.0"; + break; + } + + out->dev = &pdev->dev; + out->id = OMAP_DSS_OUTPUT_DPI; + out->output_type = OMAP_DISPLAY_TYPE_DPI; + out->dispc_channel = dpi_get_channel(port_num); + out->port_num = port_num; + out->ops.dpi = &dpi_ops; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void dpi_uninit_output_port(struct device_node *port) +{ + struct dpi_data *dpi = port->data; + struct omap_dss_device *out = &dpi->output; + + omapdss_unregister_output(out); +} + +static int dpi_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dpi_data *dpi; + + dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL); + if (!dpi) + return -ENOMEM; + + dpi->pdev = pdev; + + dev_set_drvdata(&pdev->dev, dpi); + + mutex_init(&dpi->lock); + + dpi_init_output(pdev); + + return 0; +} + +static void dpi_unbind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + dpi_uninit_output(pdev); +} + +static const struct component_ops dpi_component_ops = { + .bind = dpi_bind, + .unbind = dpi_unbind, +}; + +static int dpi_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dpi_component_ops); +} + +static int dpi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dpi_component_ops); + return 0; +} + +static struct platform_driver omap_dpi_driver = { + .probe = dpi_probe, + .remove = dpi_remove, + .driver = { + .name = "omapdss_dpi", + .suppress_bind_attrs = true, + }, +}; + +int __init dpi_init_platform_driver(void) +{ + return platform_driver_register(&omap_dpi_driver); +} + +void dpi_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_dpi_driver); +} + +int dpi_init_port(struct platform_device *pdev, struct device_node *port) +{ + struct dpi_data *dpi; + struct device_node *ep; + u32 datalines; + int r; + + dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL); + if (!dpi) + return -ENOMEM; + + ep = omapdss_of_get_next_endpoint(port, NULL); + if (!ep) + return 0; + + r = of_property_read_u32(ep, "data-lines", &datalines); + if (r) { + DSSERR("failed to parse datalines\n"); + goto err_datalines; + } + + dpi->data_lines = datalines; + + of_node_put(ep); + + dpi->pdev = pdev; + port->data = dpi; + + mutex_init(&dpi->lock); + + dpi_init_output_port(pdev, port); + + dpi->port_initialized = true; + + return 0; + +err_datalines: + of_node_put(ep); + + return r; +} + +void dpi_uninit_port(struct device_node *port) +{ + struct dpi_data *dpi = port->data; + + if (!dpi->port_initialized) + return; + + dpi_uninit_output_port(port); +} diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c new file mode 100644 index 000000000000..43be4b2a7b05 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -0,0 +1,5607 @@ +/* + * linux/drivers/video/omap2/dss/dsi.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "DSI" + +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/module.h> +#include <linux/semaphore.h> +#include <linux/seq_file.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/wait.h> +#include <linux/workqueue.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/component.h> + +#include <video/omapdss.h> +#include <video/mipi_display.h> + +#include "dss.h" +#include "dss_features.h" + +#define DSI_CATCH_MISSING_TE + +struct dsi_reg { u16 module; u16 idx; }; + +#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx }) + +/* DSI Protocol Engine */ + +#define DSI_PROTO 0 +#define DSI_PROTO_SZ 0x200 + +#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000) +#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010) +#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014) +#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018) +#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C) +#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040) +#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044) +#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048) +#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C) +#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050) +#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054) +#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058) +#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C) +#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060) +#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064) +#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068) +#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C) +#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070) +#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074) +#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078) +#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C) +#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080) +#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084) +#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088) +#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C) +#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090) +#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094) +#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20)) +#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20)) +#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20)) +#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20)) +#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20)) +#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20)) +#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20)) + +/* DSIPHY_SCP */ + +#define DSI_PHY 1 +#define DSI_PHY_OFFSET 0x200 +#define DSI_PHY_SZ 0x40 + +#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000) +#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004) +#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008) +#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014) +#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028) + +/* DSI_PLL_CTRL_SCP */ + +#define DSI_PLL 2 +#define DSI_PLL_OFFSET 0x300 +#define DSI_PLL_SZ 0x20 + +#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000) +#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004) +#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008) +#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C) +#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010) + +#define REG_GET(dsidev, idx, start, end) \ + FLD_GET(dsi_read_reg(dsidev, idx), start, end) + +#define REG_FLD_MOD(dsidev, idx, val, start, end) \ + dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end)) + +/* Global interrupts */ +#define DSI_IRQ_VC0 (1 << 0) +#define DSI_IRQ_VC1 (1 << 1) +#define DSI_IRQ_VC2 (1 << 2) +#define DSI_IRQ_VC3 (1 << 3) +#define DSI_IRQ_WAKEUP (1 << 4) +#define DSI_IRQ_RESYNC (1 << 5) +#define DSI_IRQ_PLL_LOCK (1 << 7) +#define DSI_IRQ_PLL_UNLOCK (1 << 8) +#define DSI_IRQ_PLL_RECALL (1 << 9) +#define DSI_IRQ_COMPLEXIO_ERR (1 << 10) +#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14) +#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15) +#define DSI_IRQ_TE_TRIGGER (1 << 16) +#define DSI_IRQ_ACK_TRIGGER (1 << 17) +#define DSI_IRQ_SYNC_LOST (1 << 18) +#define DSI_IRQ_LDO_POWER_GOOD (1 << 19) +#define DSI_IRQ_TA_TIMEOUT (1 << 20) +#define DSI_IRQ_ERROR_MASK \ + (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \ + DSI_IRQ_TA_TIMEOUT) +#define DSI_IRQ_CHANNEL_MASK 0xf + +/* Virtual channel interrupts */ +#define DSI_VC_IRQ_CS (1 << 0) +#define DSI_VC_IRQ_ECC_CORR (1 << 1) +#define DSI_VC_IRQ_PACKET_SENT (1 << 2) +#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3) +#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4) +#define DSI_VC_IRQ_BTA (1 << 5) +#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6) +#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7) +#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8) +#define DSI_VC_IRQ_ERROR_MASK \ + (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \ + DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \ + DSI_VC_IRQ_FIFO_TX_UDF) + +/* ComplexIO interrupts */ +#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) +#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) +#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) +#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3) +#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4) +#define DSI_CIO_IRQ_ERRESC1 (1 << 5) +#define DSI_CIO_IRQ_ERRESC2 (1 << 6) +#define DSI_CIO_IRQ_ERRESC3 (1 << 7) +#define DSI_CIO_IRQ_ERRESC4 (1 << 8) +#define DSI_CIO_IRQ_ERRESC5 (1 << 9) +#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) +#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) +#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) +#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13) +#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14) +#define DSI_CIO_IRQ_STATEULPS1 (1 << 15) +#define DSI_CIO_IRQ_STATEULPS2 (1 << 16) +#define DSI_CIO_IRQ_STATEULPS3 (1 << 17) +#define DSI_CIO_IRQ_STATEULPS4 (1 << 18) +#define DSI_CIO_IRQ_STATEULPS5 (1 << 19) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29) +#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) +#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) +#define DSI_CIO_IRQ_ERROR_MASK \ + (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \ + DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \ + DSI_CIO_IRQ_ERRSYNCESC5 | \ + DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ + DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \ + DSI_CIO_IRQ_ERRESC5 | \ + DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \ + DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \ + DSI_CIO_IRQ_ERRCONTROL5 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5) + +typedef void (*omap_dsi_isr_t) (void *arg, u32 mask); + +static int dsi_display_init_dispc(struct platform_device *dsidev, + struct omap_overlay_manager *mgr); +static void dsi_display_uninit_dispc(struct platform_device *dsidev, + struct omap_overlay_manager *mgr); + +static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel); + +/* DSI PLL HSDIV indices */ +#define HSDIV_DISPC 0 +#define HSDIV_DSI 1 + +#define DSI_MAX_NR_ISRS 2 +#define DSI_MAX_NR_LANES 5 + +enum dsi_lane_function { + DSI_LANE_UNUSED = 0, + DSI_LANE_CLK, + DSI_LANE_DATA1, + DSI_LANE_DATA2, + DSI_LANE_DATA3, + DSI_LANE_DATA4, +}; + +struct dsi_lane_config { + enum dsi_lane_function function; + u8 polarity; +}; + +struct dsi_isr_data { + omap_dsi_isr_t isr; + void *arg; + u32 mask; +}; + +enum fifo_size { + DSI_FIFO_SIZE_0 = 0, + DSI_FIFO_SIZE_32 = 1, + DSI_FIFO_SIZE_64 = 2, + DSI_FIFO_SIZE_96 = 3, + DSI_FIFO_SIZE_128 = 4, +}; + +enum dsi_vc_source { + DSI_VC_SOURCE_L4 = 0, + DSI_VC_SOURCE_VP, +}; + +struct dsi_irq_stats { + unsigned long last_reset; + unsigned irq_count; + unsigned dsi_irqs[32]; + unsigned vc_irqs[4][32]; + unsigned cio_irqs[32]; +}; + +struct dsi_isr_tables { + struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS]; + struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS]; + struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; +}; + +struct dsi_clk_calc_ctx { + struct platform_device *dsidev; + struct dss_pll *pll; + + /* inputs */ + + const struct omap_dss_dsi_config *config; + + unsigned long req_pck_min, req_pck_nom, req_pck_max; + + /* outputs */ + + struct dss_pll_clock_info dsi_cinfo; + struct dispc_clock_info dispc_cinfo; + + struct omap_video_timings dispc_vm; + struct omap_dss_dsi_videomode_timings dsi_vm; +}; + +struct dsi_lp_clock_info { + unsigned long lp_clk; + u16 lp_clk_div; +}; + +struct dsi_data { + struct platform_device *pdev; + void __iomem *proto_base; + void __iomem *phy_base; + void __iomem *pll_base; + + int module_id; + + int irq; + + bool is_enabled; + + struct clk *dss_clk; + + struct dispc_clock_info user_dispc_cinfo; + struct dss_pll_clock_info user_dsi_cinfo; + + struct dsi_lp_clock_info user_lp_cinfo; + struct dsi_lp_clock_info current_lp_cinfo; + + struct dss_pll pll; + + bool vdds_dsi_enabled; + struct regulator *vdds_dsi_reg; + + struct { + enum dsi_vc_source source; + struct omap_dss_device *dssdev; + enum fifo_size tx_fifo_size; + enum fifo_size rx_fifo_size; + int vc_id; + } vc[4]; + + struct mutex lock; + struct semaphore bus_lock; + + spinlock_t irq_lock; + struct dsi_isr_tables isr_tables; + /* space for a copy used by the interrupt handler */ + struct dsi_isr_tables isr_tables_copy; + + int update_channel; +#ifdef DSI_PERF_MEASURE + unsigned update_bytes; +#endif + + bool te_enabled; + bool ulps_enabled; + + void (*framedone_callback)(int, void *); + void *framedone_data; + + struct delayed_work framedone_timeout_work; + +#ifdef DSI_CATCH_MISSING_TE + struct timer_list te_timer; +#endif + + unsigned long cache_req_pck; + unsigned long cache_clk_freq; + struct dss_pll_clock_info cache_cinfo; + + u32 errors; + spinlock_t errors_lock; +#ifdef DSI_PERF_MEASURE + ktime_t perf_setup_time; + ktime_t perf_start_time; +#endif + int debug_read; + int debug_write; + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + spinlock_t irq_stats_lock; + struct dsi_irq_stats irq_stats; +#endif + + unsigned num_lanes_supported; + unsigned line_buffer_size; + + struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; + unsigned num_lanes_used; + + unsigned scp_clk_refcount; + + struct dss_lcd_mgr_config mgr_config; + struct omap_video_timings timings; + enum omap_dss_dsi_pixel_format pix_fmt; + enum omap_dss_dsi_mode mode; + struct omap_dss_dsi_videomode_timings vm_timings; + + struct omap_dss_device output; +}; + +struct dsi_packet_sent_handler_data { + struct platform_device *dsidev; + struct completion *completion; +}; + +struct dsi_module_id_data { + u32 address; + int id; +}; + +static const struct of_device_id dsi_of_match[]; + +#ifdef DSI_PERF_MEASURE +static bool dsi_perf; +module_param(dsi_perf, bool, 0644); +#endif + +static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev) +{ + return dev_get_drvdata(&dsidev->dev); +} + +static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev) +{ + return to_platform_device(dssdev->dev); +} + +static struct platform_device *dsi_get_dsidev_from_id(int module) +{ + struct omap_dss_device *out; + enum omap_dss_output_id id; + + switch (module) { + case 0: + id = OMAP_DSS_OUTPUT_DSI1; + break; + case 1: + id = OMAP_DSS_OUTPUT_DSI2; + break; + default: + return NULL; + } + + out = omap_dss_get_output(id); + + return out ? to_platform_device(out->dev) : NULL; +} + +static inline void dsi_write_reg(struct platform_device *dsidev, + const struct dsi_reg idx, u32 val) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + void __iomem *base; + + switch(idx.module) { + case DSI_PROTO: base = dsi->proto_base; break; + case DSI_PHY: base = dsi->phy_base; break; + case DSI_PLL: base = dsi->pll_base; break; + default: return; + } + + __raw_writel(val, base + idx.idx); +} + +static inline u32 dsi_read_reg(struct platform_device *dsidev, + const struct dsi_reg idx) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + void __iomem *base; + + switch(idx.module) { + case DSI_PROTO: base = dsi->proto_base; break; + case DSI_PHY: base = dsi->phy_base; break; + case DSI_PLL: base = dsi->pll_base; break; + default: return 0; + } + + return __raw_readl(base + idx.idx); +} + +static void dsi_bus_lock(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + down(&dsi->bus_lock); +} + +static void dsi_bus_unlock(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + up(&dsi->bus_lock); +} + +static bool dsi_bus_is_locked(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->bus_lock.count == 0; +} + +static void dsi_completion_handler(void *data, u32 mask) +{ + complete((struct completion *)data); +} + +static inline int wait_for_bit_change(struct platform_device *dsidev, + const struct dsi_reg idx, int bitnum, int value) +{ + unsigned long timeout; + ktime_t wait; + int t; + + /* first busyloop to see if the bit changes right away */ + t = 100; + while (t-- > 0) { + if (REG_GET(dsidev, idx, bitnum, bitnum) == value) + return value; + } + + /* then loop for 500ms, sleeping for 1ms in between */ + timeout = jiffies + msecs_to_jiffies(500); + while (time_before(jiffies, timeout)) { + if (REG_GET(dsidev, idx, bitnum, bitnum) == value) + return value; + + wait = ns_to_ktime(1000 * 1000); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&wait, HRTIMER_MODE_REL); + } + + return !value; +} + +u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt) +{ + switch (fmt) { + case OMAP_DSS_DSI_FMT_RGB888: + case OMAP_DSS_DSI_FMT_RGB666: + return 24; + case OMAP_DSS_DSI_FMT_RGB666_PACKED: + return 18; + case OMAP_DSS_DSI_FMT_RGB565: + return 16; + default: + BUG(); + return 0; + } +} + +#ifdef DSI_PERF_MEASURE +static void dsi_perf_mark_setup(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + dsi->perf_setup_time = ktime_get(); +} + +static void dsi_perf_mark_start(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + dsi->perf_start_time = ktime_get(); +} + +static void dsi_perf_show(struct platform_device *dsidev, const char *name) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + ktime_t t, setup_time, trans_time; + u32 total_bytes; + u32 setup_us, trans_us, total_us; + + if (!dsi_perf) + return; + + t = ktime_get(); + + setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time); + setup_us = (u32)ktime_to_us(setup_time); + if (setup_us == 0) + setup_us = 1; + + trans_time = ktime_sub(t, dsi->perf_start_time); + trans_us = (u32)ktime_to_us(trans_time); + if (trans_us == 0) + trans_us = 1; + + total_us = setup_us + trans_us; + + total_bytes = dsi->update_bytes; + + printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), " + "%u bytes, %u kbytes/sec\n", + name, + setup_us, + trans_us, + total_us, + 1000*1000 / total_us, + total_bytes, + total_bytes * 1000 / total_us); +} +#else +static inline void dsi_perf_mark_setup(struct platform_device *dsidev) +{ +} + +static inline void dsi_perf_mark_start(struct platform_device *dsidev) +{ +} + +static inline void dsi_perf_show(struct platform_device *dsidev, + const char *name) +{ +} +#endif + +static int verbose_irq; + +static void print_irq_status(u32 status) +{ + if (status == 0) + return; + + if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0) + return; + +#define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : "" + + pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + status, + verbose_irq ? PIS(VC0) : "", + verbose_irq ? PIS(VC1) : "", + verbose_irq ? PIS(VC2) : "", + verbose_irq ? PIS(VC3) : "", + PIS(WAKEUP), + PIS(RESYNC), + PIS(PLL_LOCK), + PIS(PLL_UNLOCK), + PIS(PLL_RECALL), + PIS(COMPLEXIO_ERR), + PIS(HS_TX_TIMEOUT), + PIS(LP_RX_TIMEOUT), + PIS(TE_TRIGGER), + PIS(ACK_TRIGGER), + PIS(SYNC_LOST), + PIS(LDO_POWER_GOOD), + PIS(TA_TIMEOUT)); +#undef PIS +} + +static void print_irq_status_vc(int channel, u32 status) +{ + if (status == 0) + return; + + if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0) + return; + +#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : "" + + pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n", + channel, + status, + PIS(CS), + PIS(ECC_CORR), + PIS(ECC_NO_CORR), + verbose_irq ? PIS(PACKET_SENT) : "", + PIS(BTA), + PIS(FIFO_TX_OVF), + PIS(FIFO_RX_OVF), + PIS(FIFO_TX_UDF), + PIS(PP_BUSY_CHANGE)); +#undef PIS +} + +static void print_irq_status_cio(u32 status) +{ + if (status == 0) + return; + +#define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : "" + + pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + status, + PIS(ERRSYNCESC1), + PIS(ERRSYNCESC2), + PIS(ERRSYNCESC3), + PIS(ERRESC1), + PIS(ERRESC2), + PIS(ERRESC3), + PIS(ERRCONTROL1), + PIS(ERRCONTROL2), + PIS(ERRCONTROL3), + PIS(STATEULPS1), + PIS(STATEULPS2), + PIS(STATEULPS3), + PIS(ERRCONTENTIONLP0_1), + PIS(ERRCONTENTIONLP1_1), + PIS(ERRCONTENTIONLP0_2), + PIS(ERRCONTENTIONLP1_2), + PIS(ERRCONTENTIONLP0_3), + PIS(ERRCONTENTIONLP1_3), + PIS(ULPSACTIVENOT_ALL0), + PIS(ULPSACTIVENOT_ALL1)); +#undef PIS +} + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS +static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus, + u32 *vcstatus, u32 ciostatus) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int i; + + spin_lock(&dsi->irq_stats_lock); + + dsi->irq_stats.irq_count++; + dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs); + + for (i = 0; i < 4; ++i) + dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]); + + dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs); + + spin_unlock(&dsi->irq_stats_lock); +} +#else +#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus) +#endif + +static int debug_irq; + +static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus, + u32 *vcstatus, u32 ciostatus) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int i; + + if (irqstatus & DSI_IRQ_ERROR_MASK) { + DSSERR("DSI error, irqstatus %x\n", irqstatus); + print_irq_status(irqstatus); + spin_lock(&dsi->errors_lock); + dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK; + spin_unlock(&dsi->errors_lock); + } else if (debug_irq) { + print_irq_status(irqstatus); + } + + for (i = 0; i < 4; ++i) { + if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) { + DSSERR("DSI VC(%d) error, vc irqstatus %x\n", + i, vcstatus[i]); + print_irq_status_vc(i, vcstatus[i]); + } else if (debug_irq) { + print_irq_status_vc(i, vcstatus[i]); + } + } + + if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) { + DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus); + print_irq_status_cio(ciostatus); + } else if (debug_irq) { + print_irq_status_cio(ciostatus); + } +} + +static void dsi_call_isrs(struct dsi_isr_data *isr_array, + unsigned isr_array_size, u32 irqstatus) +{ + struct dsi_isr_data *isr_data; + int i; + + for (i = 0; i < isr_array_size; i++) { + isr_data = &isr_array[i]; + if (isr_data->isr && isr_data->mask & irqstatus) + isr_data->isr(isr_data->arg, irqstatus); + } +} + +static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables, + u32 irqstatus, u32 *vcstatus, u32 ciostatus) +{ + int i; + + dsi_call_isrs(isr_tables->isr_table, + ARRAY_SIZE(isr_tables->isr_table), + irqstatus); + + for (i = 0; i < 4; ++i) { + if (vcstatus[i] == 0) + continue; + dsi_call_isrs(isr_tables->isr_table_vc[i], + ARRAY_SIZE(isr_tables->isr_table_vc[i]), + vcstatus[i]); + } + + if (ciostatus != 0) + dsi_call_isrs(isr_tables->isr_table_cio, + ARRAY_SIZE(isr_tables->isr_table_cio), + ciostatus); +} + +static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) +{ + struct platform_device *dsidev; + struct dsi_data *dsi; + u32 irqstatus, vcstatus[4], ciostatus; + int i; + + dsidev = (struct platform_device *) arg; + dsi = dsi_get_dsidrv_data(dsidev); + + if (!dsi->is_enabled) + return IRQ_NONE; + + spin_lock(&dsi->irq_lock); + + irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS); + + /* IRQ is not for us */ + if (!irqstatus) { + spin_unlock(&dsi->irq_lock); + return IRQ_NONE; + } + + dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); + /* flush posted write */ + dsi_read_reg(dsidev, DSI_IRQSTATUS); + + for (i = 0; i < 4; ++i) { + if ((irqstatus & (1 << i)) == 0) { + vcstatus[i] = 0; + continue; + } + + vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); + + dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]); + /* flush posted write */ + dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); + } + + if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { + ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); + + dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus); + /* flush posted write */ + dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); + } else { + ciostatus = 0; + } + +#ifdef DSI_CATCH_MISSING_TE + if (irqstatus & DSI_IRQ_TE_TRIGGER) + del_timer(&dsi->te_timer); +#endif + + /* make a copy and unlock, so that isrs can unregister + * themselves */ + memcpy(&dsi->isr_tables_copy, &dsi->isr_tables, + sizeof(dsi->isr_tables)); + + spin_unlock(&dsi->irq_lock); + + dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus); + + dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus); + + dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus); + + return IRQ_HANDLED; +} + +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_configure_irqs(struct platform_device *dsidev, + struct dsi_isr_data *isr_array, + unsigned isr_array_size, u32 default_mask, + const struct dsi_reg enable_reg, + const struct dsi_reg status_reg) +{ + struct dsi_isr_data *isr_data; + u32 mask; + u32 old_mask; + int i; + + mask = default_mask; + + for (i = 0; i < isr_array_size; i++) { + isr_data = &isr_array[i]; + + if (isr_data->isr == NULL) + continue; + + mask |= isr_data->mask; + } + + old_mask = dsi_read_reg(dsidev, enable_reg); + /* clear the irqstatus for newly enabled irqs */ + dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask); + dsi_write_reg(dsidev, enable_reg, mask); + + /* flush posted writes */ + dsi_read_reg(dsidev, enable_reg); + dsi_read_reg(dsidev, status_reg); +} + +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_set_irqs(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 mask = DSI_IRQ_ERROR_MASK; +#ifdef DSI_CATCH_MISSING_TE + mask |= DSI_IRQ_TE_TRIGGER; +#endif + _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table, + ARRAY_SIZE(dsi->isr_tables.isr_table), mask, + DSI_IRQENABLE, DSI_IRQSTATUS); +} + +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]), + DSI_VC_IRQ_ERROR_MASK, + DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); +} + +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio, + ARRAY_SIZE(dsi->isr_tables.isr_table_cio), + DSI_CIO_IRQ_ERROR_MASK, + DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); +} + +static void _dsi_initialize_irq(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + int vc; + + spin_lock_irqsave(&dsi->irq_lock, flags); + + memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables)); + + _omap_dsi_set_irqs(dsidev); + for (vc = 0; vc < 4; ++vc) + _omap_dsi_set_irqs_vc(dsidev, vc); + _omap_dsi_set_irqs_cio(dsidev); + + spin_unlock_irqrestore(&dsi->irq_lock, flags); +} + +static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask, + struct dsi_isr_data *isr_array, unsigned isr_array_size) +{ + struct dsi_isr_data *isr_data; + int free_idx; + int i; + + BUG_ON(isr == NULL); + + /* check for duplicate entry and find a free slot */ + free_idx = -1; + for (i = 0; i < isr_array_size; i++) { + isr_data = &isr_array[i]; + + if (isr_data->isr == isr && isr_data->arg == arg && + isr_data->mask == mask) { + return -EINVAL; + } + + if (isr_data->isr == NULL && free_idx == -1) + free_idx = i; + } + + if (free_idx == -1) + return -EBUSY; + + isr_data = &isr_array[free_idx]; + isr_data->isr = isr; + isr_data->arg = arg; + isr_data->mask = mask; + + return 0; +} + +static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask, + struct dsi_isr_data *isr_array, unsigned isr_array_size) +{ + struct dsi_isr_data *isr_data; + int i; + + for (i = 0; i < isr_array_size; i++) { + isr_data = &isr_array[i]; + if (isr_data->isr != isr || isr_data->arg != arg || + isr_data->mask != mask) + continue; + + isr_data->isr = NULL; + isr_data->arg = NULL; + isr_data->mask = 0; + + return 0; + } + + return -EINVAL; +} + +static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr, + void *arg, u32 mask) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + int r; + + spin_lock_irqsave(&dsi->irq_lock, flags); + + r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table, + ARRAY_SIZE(dsi->isr_tables.isr_table)); + + if (r == 0) + _omap_dsi_set_irqs(dsidev); + + spin_unlock_irqrestore(&dsi->irq_lock, flags); + + return r; +} + +static int dsi_unregister_isr(struct platform_device *dsidev, + omap_dsi_isr_t isr, void *arg, u32 mask) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + int r; + + spin_lock_irqsave(&dsi->irq_lock, flags); + + r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table, + ARRAY_SIZE(dsi->isr_tables.isr_table)); + + if (r == 0) + _omap_dsi_set_irqs(dsidev); + + spin_unlock_irqrestore(&dsi->irq_lock, flags); + + return r; +} + +static int dsi_register_isr_vc(struct platform_device *dsidev, int channel, + omap_dsi_isr_t isr, void *arg, u32 mask) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + int r; + + spin_lock_irqsave(&dsi->irq_lock, flags); + + r = _dsi_register_isr(isr, arg, mask, + dsi->isr_tables.isr_table_vc[channel], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); + + if (r == 0) + _omap_dsi_set_irqs_vc(dsidev, channel); + + spin_unlock_irqrestore(&dsi->irq_lock, flags); + + return r; +} + +static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel, + omap_dsi_isr_t isr, void *arg, u32 mask) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + int r; + + spin_lock_irqsave(&dsi->irq_lock, flags); + + r = _dsi_unregister_isr(isr, arg, mask, + dsi->isr_tables.isr_table_vc[channel], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); + + if (r == 0) + _omap_dsi_set_irqs_vc(dsidev, channel); + + spin_unlock_irqrestore(&dsi->irq_lock, flags); + + return r; +} + +static int dsi_register_isr_cio(struct platform_device *dsidev, + omap_dsi_isr_t isr, void *arg, u32 mask) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + int r; + + spin_lock_irqsave(&dsi->irq_lock, flags); + + r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, + ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); + + if (r == 0) + _omap_dsi_set_irqs_cio(dsidev); + + spin_unlock_irqrestore(&dsi->irq_lock, flags); + + return r; +} + +static int dsi_unregister_isr_cio(struct platform_device *dsidev, + omap_dsi_isr_t isr, void *arg, u32 mask) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + int r; + + spin_lock_irqsave(&dsi->irq_lock, flags); + + r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, + ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); + + if (r == 0) + _omap_dsi_set_irqs_cio(dsidev); + + spin_unlock_irqrestore(&dsi->irq_lock, flags); + + return r; +} + +static u32 dsi_get_errors(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + u32 e; + spin_lock_irqsave(&dsi->errors_lock, flags); + e = dsi->errors; + dsi->errors = 0; + spin_unlock_irqrestore(&dsi->errors_lock, flags); + return e; +} + +static int dsi_runtime_get(struct platform_device *dsidev) +{ + int r; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + DSSDBG("dsi_runtime_get\n"); + + r = pm_runtime_get_sync(&dsi->pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void dsi_runtime_put(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r; + + DSSDBG("dsi_runtime_put\n"); + + r = pm_runtime_put_sync(&dsi->pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} + +static int dsi_regulator_init(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct regulator *vdds_dsi; + int r; + + if (dsi->vdds_dsi_reg != NULL) + return 0; + + vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd"); + + if (IS_ERR(vdds_dsi)) { + if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) + DSSERR("can't get DSI VDD regulator\n"); + return PTR_ERR(vdds_dsi); + } + + if (regulator_can_change_voltage(vdds_dsi)) { + r = regulator_set_voltage(vdds_dsi, 1800000, 1800000); + if (r) { + devm_regulator_put(vdds_dsi); + DSSERR("can't set the DSI regulator voltage\n"); + return r; + } + } + + dsi->vdds_dsi_reg = vdds_dsi; + + return 0; +} + +static void _dsi_print_reset_status(struct platform_device *dsidev) +{ + u32 l; + int b0, b1, b2; + + /* A dummy read using the SCP interface to any DSIPHY register is + * required after DSIPHY reset to complete the reset of the DSI complex + * I/O. */ + l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); + + if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) { + b0 = 28; + b1 = 27; + b2 = 26; + } else { + b0 = 24; + b1 = 25; + b2 = 26; + } + +#define DSI_FLD_GET(fld, start, end)\ + FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end) + + pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n", + DSI_FLD_GET(PLL_STATUS, 0, 0), + DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29), + DSI_FLD_GET(DSIPHY_CFG5, b0, b0), + DSI_FLD_GET(DSIPHY_CFG5, b1, b1), + DSI_FLD_GET(DSIPHY_CFG5, b2, b2), + DSI_FLD_GET(DSIPHY_CFG5, 29, 29), + DSI_FLD_GET(DSIPHY_CFG5, 30, 30), + DSI_FLD_GET(DSIPHY_CFG5, 31, 31)); + +#undef DSI_FLD_GET +} + +static inline int dsi_if_enable(struct platform_device *dsidev, bool enable) +{ + DSSDBG("dsi_if_enable(%d)\n", enable); + + enable = enable ? 1 : 0; + REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */ + + if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) { + DSSERR("Failed to set dsi_if_enable to %d\n", enable); + return -EIO; + } + + return 0; +} + +static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->pll.cinfo.clkout[HSDIV_DISPC]; +} + +static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->pll.cinfo.clkout[HSDIV_DSI]; +} + +static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->pll.cinfo.clkdco / 16; +} + +static unsigned long dsi_fclk_rate(struct platform_device *dsidev) +{ + unsigned long r; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { + /* DSI FCLK source is DSS_CLK_FCK */ + r = clk_get_rate(dsi->dss_clk); + } else { + /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ + r = dsi_get_pll_hsdiv_dsi_rate(dsidev); + } + + return r; +} + +static int dsi_lp_clock_calc(unsigned long dsi_fclk, + unsigned long lp_clk_min, unsigned long lp_clk_max, + struct dsi_lp_clock_info *lp_cinfo) +{ + unsigned lp_clk_div; + unsigned long lp_clk; + + lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2); + lp_clk = dsi_fclk / 2 / lp_clk_div; + + if (lp_clk < lp_clk_min || lp_clk > lp_clk_max) + return -EINVAL; + + lp_cinfo->lp_clk_div = lp_clk_div; + lp_cinfo->lp_clk = lp_clk; + + return 0; +} + +static int dsi_set_lp_clk_divisor(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long dsi_fclk; + unsigned lp_clk_div; + unsigned long lp_clk; + unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); + + + lp_clk_div = dsi->user_lp_cinfo.lp_clk_div; + + if (lp_clk_div == 0 || lp_clk_div > lpdiv_max) + return -EINVAL; + + dsi_fclk = dsi_fclk_rate(dsidev); + + lp_clk = dsi_fclk / 2 / lp_clk_div; + + DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); + dsi->current_lp_cinfo.lp_clk = lp_clk; + dsi->current_lp_cinfo.lp_clk_div = lp_clk_div; + + /* LP_CLK_DIVISOR */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0); + + /* LP_RX_SYNCHRO_ENABLE */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21); + + return 0; +} + +static void dsi_enable_scp_clk(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->scp_clk_refcount++ == 0) + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */ +} + +static void dsi_disable_scp_clk(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + WARN_ON(dsi->scp_clk_refcount == 0); + if (--dsi->scp_clk_refcount == 0) + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */ +} + +enum dsi_pll_power_state { + DSI_PLL_POWER_OFF = 0x0, + DSI_PLL_POWER_ON_HSCLK = 0x1, + DSI_PLL_POWER_ON_ALL = 0x2, + DSI_PLL_POWER_ON_DIV = 0x3, +}; + +static int dsi_pll_power(struct platform_device *dsidev, + enum dsi_pll_power_state state) +{ + int t = 0; + + /* DSI-PLL power command 0x3 is not working */ + if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) && + state == DSI_PLL_POWER_ON_DIV) + state = DSI_PLL_POWER_ON_ALL; + + /* PLL_PWR_CMD */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30); + + /* PLL_PWR_STATUS */ + while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) { + if (++t > 1000) { + DSSERR("Failed to set DSI PLL power mode to %d\n", + state); + return -ENODEV; + } + udelay(1); + } + + return 0; +} + + +static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo) +{ + unsigned long max_dsi_fck; + + max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK); + + cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck); + cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI]; +} + +static int dsi_pll_enable(struct dss_pll *pll) +{ + struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); + struct platform_device *dsidev = dsi->pdev; + int r = 0; + + DSSDBG("PLL init\n"); + + r = dsi_regulator_init(dsidev); + if (r) + return r; + + r = dsi_runtime_get(dsidev); + if (r) + return r; + + /* + * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. + */ + dsi_enable_scp_clk(dsidev); + + if (!dsi->vdds_dsi_enabled) { + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err0; + dsi->vdds_dsi_enabled = true; + } + + /* XXX PLL does not come out of reset without this... */ + dispc_pck_free_enable(1); + + if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) { + DSSERR("PLL not coming out of reset.\n"); + r = -ENODEV; + dispc_pck_free_enable(0); + goto err1; + } + + /* XXX ... but if left on, we get problems when planes do not + * fill the whole display. No idea about this */ + dispc_pck_free_enable(0); + + r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL); + + if (r) + goto err1; + + DSSDBG("PLL init done\n"); + + return 0; +err1: + if (dsi->vdds_dsi_enabled) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } +err0: + dsi_disable_scp_clk(dsidev); + dsi_runtime_put(dsidev); + return r; +} + +static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi_pll_power(dsidev, DSI_PLL_POWER_OFF); + if (disconnect_lanes) { + WARN_ON(!dsi->vdds_dsi_enabled); + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } + + dsi_disable_scp_clk(dsidev); + dsi_runtime_put(dsidev); + + DSSDBG("PLL uninit done\n"); +} + +static void dsi_pll_disable(struct dss_pll *pll) +{ + struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); + struct platform_device *dsidev = dsi->pdev; + + dsi_pll_uninit(dsidev, true); +} + +static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, + struct seq_file *s) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; + enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; + int dsi_module = dsi->module_id; + struct dss_pll *pll = &dsi->pll; + + dispc_clk_src = dss_get_dispc_clk_source(); + dsi_clk_src = dss_get_dsi_clk_source(dsi_module); + + if (dsi_runtime_get(dsidev)) + return; + + seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); + + seq_printf(s, "dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin)); + + seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n); + + seq_printf(s, "CLKIN4DDR\t%-16lum %u\n", + cinfo->clkdco, cinfo->m); + + seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", + dss_feat_get_clk_source_name(dsi_module == 0 ? + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), + cinfo->clkout[HSDIV_DISPC], + cinfo->mX[HSDIV_DISPC], + dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? + "off" : "on"); + + seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", + dss_feat_get_clk_source_name(dsi_module == 0 ? + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), + cinfo->clkout[HSDIV_DSI], + cinfo->mX[HSDIV_DSI], + dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? + "off" : "on"); + + seq_printf(s, "- DSI%d -\n", dsi_module + 1); + + seq_printf(s, "dsi fclk source = %s (%s)\n", + dss_get_generic_clk_source_name(dsi_clk_src), + dss_feat_get_clk_source_name(dsi_clk_src)); + + seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); + + seq_printf(s, "DDR_CLK\t\t%lu\n", + cinfo->clkdco / 4); + + seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev)); + + seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk); + + dsi_runtime_put(dsidev); +} + +void dsi_dump_clocks(struct seq_file *s) +{ + struct platform_device *dsidev; + int i; + + for (i = 0; i < MAX_NUM_DSI; i++) { + dsidev = dsi_get_dsidev_from_id(i); + if (dsidev) + dsi_dump_dsidev_clocks(dsidev, s); + } +} + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS +static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, + struct seq_file *s) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned long flags; + struct dsi_irq_stats stats; + + spin_lock_irqsave(&dsi->irq_stats_lock, flags); + + stats = dsi->irq_stats; + memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats)); + dsi->irq_stats.last_reset = jiffies; + + spin_unlock_irqrestore(&dsi->irq_stats_lock, flags); + + seq_printf(s, "period %u ms\n", + jiffies_to_msecs(jiffies - stats.last_reset)); + + seq_printf(s, "irqs %d\n", stats.irq_count); +#define PIS(x) \ + seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); + + seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1); + PIS(VC0); + PIS(VC1); + PIS(VC2); + PIS(VC3); + PIS(WAKEUP); + PIS(RESYNC); + PIS(PLL_LOCK); + PIS(PLL_UNLOCK); + PIS(PLL_RECALL); + PIS(COMPLEXIO_ERR); + PIS(HS_TX_TIMEOUT); + PIS(LP_RX_TIMEOUT); + PIS(TE_TRIGGER); + PIS(ACK_TRIGGER); + PIS(SYNC_LOST); + PIS(LDO_POWER_GOOD); + PIS(TA_TIMEOUT); +#undef PIS + +#define PIS(x) \ + seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \ + stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ + stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ + stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ + stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); + + seq_printf(s, "-- VC interrupts --\n"); + PIS(CS); + PIS(ECC_CORR); + PIS(PACKET_SENT); + PIS(FIFO_TX_OVF); + PIS(FIFO_RX_OVF); + PIS(BTA); + PIS(ECC_NO_CORR); + PIS(FIFO_TX_UDF); + PIS(PP_BUSY_CHANGE); +#undef PIS + +#define PIS(x) \ + seq_printf(s, "%-20s %10d\n", #x, \ + stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); + + seq_printf(s, "-- CIO interrupts --\n"); + PIS(ERRSYNCESC1); + PIS(ERRSYNCESC2); + PIS(ERRSYNCESC3); + PIS(ERRESC1); + PIS(ERRESC2); + PIS(ERRESC3); + PIS(ERRCONTROL1); + PIS(ERRCONTROL2); + PIS(ERRCONTROL3); + PIS(STATEULPS1); + PIS(STATEULPS2); + PIS(STATEULPS3); + PIS(ERRCONTENTIONLP0_1); + PIS(ERRCONTENTIONLP1_1); + PIS(ERRCONTENTIONLP0_2); + PIS(ERRCONTENTIONLP1_2); + PIS(ERRCONTENTIONLP0_3); + PIS(ERRCONTENTIONLP1_3); + PIS(ULPSACTIVENOT_ALL0); + PIS(ULPSACTIVENOT_ALL1); +#undef PIS +} + +static void dsi1_dump_irqs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(0); + + dsi_dump_dsidev_irqs(dsidev, s); +} + +static void dsi2_dump_irqs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(1); + + dsi_dump_dsidev_irqs(dsidev, s); +} +#endif + +static void dsi_dump_dsidev_regs(struct platform_device *dsidev, + struct seq_file *s) +{ +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) + + if (dsi_runtime_get(dsidev)) + return; + dsi_enable_scp_clk(dsidev); + + DUMPREG(DSI_REVISION); + DUMPREG(DSI_SYSCONFIG); + DUMPREG(DSI_SYSSTATUS); + DUMPREG(DSI_IRQSTATUS); + DUMPREG(DSI_IRQENABLE); + DUMPREG(DSI_CTRL); + DUMPREG(DSI_COMPLEXIO_CFG1); + DUMPREG(DSI_COMPLEXIO_IRQ_STATUS); + DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE); + DUMPREG(DSI_CLK_CTRL); + DUMPREG(DSI_TIMING1); + DUMPREG(DSI_TIMING2); + DUMPREG(DSI_VM_TIMING1); + DUMPREG(DSI_VM_TIMING2); + DUMPREG(DSI_VM_TIMING3); + DUMPREG(DSI_CLK_TIMING); + DUMPREG(DSI_TX_FIFO_VC_SIZE); + DUMPREG(DSI_RX_FIFO_VC_SIZE); + DUMPREG(DSI_COMPLEXIO_CFG2); + DUMPREG(DSI_RX_FIFO_VC_FULLNESS); + DUMPREG(DSI_VM_TIMING4); + DUMPREG(DSI_TX_FIFO_VC_EMPTINESS); + DUMPREG(DSI_VM_TIMING5); + DUMPREG(DSI_VM_TIMING6); + DUMPREG(DSI_VM_TIMING7); + DUMPREG(DSI_STOPCLK_TIMING); + + DUMPREG(DSI_VC_CTRL(0)); + DUMPREG(DSI_VC_TE(0)); + DUMPREG(DSI_VC_LONG_PACKET_HEADER(0)); + DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0)); + DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0)); + DUMPREG(DSI_VC_IRQSTATUS(0)); + DUMPREG(DSI_VC_IRQENABLE(0)); + + DUMPREG(DSI_VC_CTRL(1)); + DUMPREG(DSI_VC_TE(1)); + DUMPREG(DSI_VC_LONG_PACKET_HEADER(1)); + DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1)); + DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1)); + DUMPREG(DSI_VC_IRQSTATUS(1)); + DUMPREG(DSI_VC_IRQENABLE(1)); + + DUMPREG(DSI_VC_CTRL(2)); + DUMPREG(DSI_VC_TE(2)); + DUMPREG(DSI_VC_LONG_PACKET_HEADER(2)); + DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2)); + DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2)); + DUMPREG(DSI_VC_IRQSTATUS(2)); + DUMPREG(DSI_VC_IRQENABLE(2)); + + DUMPREG(DSI_VC_CTRL(3)); + DUMPREG(DSI_VC_TE(3)); + DUMPREG(DSI_VC_LONG_PACKET_HEADER(3)); + DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3)); + DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3)); + DUMPREG(DSI_VC_IRQSTATUS(3)); + DUMPREG(DSI_VC_IRQENABLE(3)); + + DUMPREG(DSI_DSIPHY_CFG0); + DUMPREG(DSI_DSIPHY_CFG1); + DUMPREG(DSI_DSIPHY_CFG2); + DUMPREG(DSI_DSIPHY_CFG5); + + DUMPREG(DSI_PLL_CONTROL); + DUMPREG(DSI_PLL_STATUS); + DUMPREG(DSI_PLL_GO); + DUMPREG(DSI_PLL_CONFIGURATION1); + DUMPREG(DSI_PLL_CONFIGURATION2); + + dsi_disable_scp_clk(dsidev); + dsi_runtime_put(dsidev); +#undef DUMPREG +} + +static void dsi1_dump_regs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(0); + + dsi_dump_dsidev_regs(dsidev, s); +} + +static void dsi2_dump_regs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(1); + + dsi_dump_dsidev_regs(dsidev, s); +} + +enum dsi_cio_power_state { + DSI_COMPLEXIO_POWER_OFF = 0x0, + DSI_COMPLEXIO_POWER_ON = 0x1, + DSI_COMPLEXIO_POWER_ULPS = 0x2, +}; + +static int dsi_cio_power(struct platform_device *dsidev, + enum dsi_cio_power_state state) +{ + int t = 0; + + /* PWR_CMD */ + REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27); + + /* PWR_STATUS */ + while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1), + 26, 25) != state) { + if (++t > 1000) { + DSSERR("failed to set complexio power state to " + "%d\n", state); + return -ENODEV; + } + udelay(1); + } + + return 0; +} + +static unsigned dsi_get_line_buf_size(struct platform_device *dsidev) +{ + int val; + + /* line buffer on OMAP3 is 1024 x 24bits */ + /* XXX: for some reason using full buffer size causes + * considerable TX slowdown with update sizes that fill the + * whole buffer */ + if (!dss_has_feature(FEAT_DSI_GNQ)) + return 1023 * 3; + + val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */ + + switch (val) { + case 1: + return 512 * 3; /* 512x24 bits */ + case 2: + return 682 * 3; /* 682x24 bits */ + case 3: + return 853 * 3; /* 853x24 bits */ + case 4: + return 1024 * 3; /* 1024x24 bits */ + case 5: + return 1194 * 3; /* 1194x24 bits */ + case 6: + return 1365 * 3; /* 1365x24 bits */ + case 7: + return 1920 * 3; /* 1920x24 bits */ + default: + BUG(); + return 0; + } +} + +static int dsi_set_lane_config(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + static const u8 offsets[] = { 0, 4, 8, 12, 16 }; + static const enum dsi_lane_function functions[] = { + DSI_LANE_CLK, + DSI_LANE_DATA1, + DSI_LANE_DATA2, + DSI_LANE_DATA3, + DSI_LANE_DATA4, + }; + u32 r; + int i; + + r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1); + + for (i = 0; i < dsi->num_lanes_used; ++i) { + unsigned offset = offsets[i]; + unsigned polarity, lane_number; + unsigned t; + + for (t = 0; t < dsi->num_lanes_supported; ++t) + if (dsi->lanes[t].function == functions[i]) + break; + + if (t == dsi->num_lanes_supported) + return -EINVAL; + + lane_number = t; + polarity = dsi->lanes[t].polarity; + + r = FLD_MOD(r, lane_number + 1, offset + 2, offset); + r = FLD_MOD(r, polarity, offset + 3, offset + 3); + } + + /* clear the unused lanes */ + for (; i < dsi->num_lanes_supported; ++i) { + unsigned offset = offsets[i]; + + r = FLD_MOD(r, 0, offset + 2, offset); + r = FLD_MOD(r, 0, offset + 3, offset + 3); + } + + dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r); + + return 0; +} + +static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + /* convert time in ns to ddr ticks, rounding up */ + unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4; + return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; +} + +static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4; + return ddr * 1000 * 1000 / (ddr_clk / 1000); +} + +static void dsi_cio_timings(struct platform_device *dsidev) +{ + u32 r; + u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; + u32 tlpx_half, tclk_trail, tclk_zero; + u32 tclk_prepare; + + /* calculate timings */ + + /* 1 * DDR_CLK = 2 * UI */ + + /* min 40ns + 4*UI max 85ns + 6*UI */ + ths_prepare = ns2ddr(dsidev, 70) + 2; + + /* min 145ns + 10*UI */ + ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2; + + /* min max(8*UI, 60ns+4*UI) */ + ths_trail = ns2ddr(dsidev, 60) + 5; + + /* min 100ns */ + ths_exit = ns2ddr(dsidev, 145); + + /* tlpx min 50n */ + tlpx_half = ns2ddr(dsidev, 25); + + /* min 60ns */ + tclk_trail = ns2ddr(dsidev, 60) + 2; + + /* min 38ns, max 95ns */ + tclk_prepare = ns2ddr(dsidev, 65); + + /* min tclk-prepare + tclk-zero = 300ns */ + tclk_zero = ns2ddr(dsidev, 260); + + DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", + ths_prepare, ddr2ns(dsidev, ths_prepare), + ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero)); + DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", + ths_trail, ddr2ns(dsidev, ths_trail), + ths_exit, ddr2ns(dsidev, ths_exit)); + + DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " + "tclk_zero %u (%uns)\n", + tlpx_half, ddr2ns(dsidev, tlpx_half), + tclk_trail, ddr2ns(dsidev, tclk_trail), + tclk_zero, ddr2ns(dsidev, tclk_zero)); + DSSDBG("tclk_prepare %u (%uns)\n", + tclk_prepare, ddr2ns(dsidev, tclk_prepare)); + + /* program timings */ + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); + r = FLD_MOD(r, ths_prepare, 31, 24); + r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); + r = FLD_MOD(r, ths_trail, 15, 8); + r = FLD_MOD(r, ths_exit, 7, 0); + dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); + r = FLD_MOD(r, tlpx_half, 20, 16); + r = FLD_MOD(r, tclk_trail, 15, 8); + r = FLD_MOD(r, tclk_zero, 7, 0); + + if (dss_has_feature(FEAT_DSI_PHY_DCC)) { + r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */ + r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */ + r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */ + } + + dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); + r = FLD_MOD(r, tclk_prepare, 7, 0); + dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r); +} + +/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */ +static void dsi_cio_enable_lane_override(struct platform_device *dsidev, + unsigned mask_p, unsigned mask_n) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int i; + u32 l; + u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26; + + l = 0; + + for (i = 0; i < dsi->num_lanes_supported; ++i) { + unsigned p = dsi->lanes[i].polarity; + + if (mask_p & (1 << i)) + l |= 1 << (i * 2 + (p ? 0 : 1)); + + if (mask_n & (1 << i)) + l |= 1 << (i * 2 + (p ? 1 : 0)); + } + + /* + * Bits in REGLPTXSCPDAT4TO0DXDY: + * 17: DY0 18: DX0 + * 19: DY1 20: DX1 + * 21: DY2 22: DX2 + * 23: DY3 24: DX3 + * 25: DY4 26: DX4 + */ + + /* Set the lane override configuration */ + + /* REGLPTXSCPDAT4TO0DXDY */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17); + + /* Enable lane override */ + + /* ENLPTXSCPDAT */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27); +} + +static void dsi_cio_disable_lane_override(struct platform_device *dsidev) +{ + /* Disable lane override */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */ + /* Reset the lane override configuration */ + /* REGLPTXSCPDAT4TO0DXDY */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17); +} + +static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int t, i; + bool in_use[DSI_MAX_NR_LANES]; + static const u8 offsets_old[] = { 28, 27, 26 }; + static const u8 offsets_new[] = { 24, 25, 26, 27, 28 }; + const u8 *offsets; + + if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) + offsets = offsets_old; + else + offsets = offsets_new; + + for (i = 0; i < dsi->num_lanes_supported; ++i) + in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED; + + t = 100000; + while (true) { + u32 l; + int ok; + + l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); + + ok = 0; + for (i = 0; i < dsi->num_lanes_supported; ++i) { + if (!in_use[i] || (l & (1 << offsets[i]))) + ok++; + } + + if (ok == dsi->num_lanes_supported) + break; + + if (--t == 0) { + for (i = 0; i < dsi->num_lanes_supported; ++i) { + if (!in_use[i] || (l & (1 << offsets[i]))) + continue; + + DSSERR("CIO TXCLKESC%d domain not coming " \ + "out of reset\n", i); + } + return -EIO; + } + } + + return 0; +} + +/* return bitmask of enabled lanes, lane0 being the lsb */ +static unsigned dsi_get_lane_mask(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned mask = 0; + int i; + + for (i = 0; i < dsi->num_lanes_supported; ++i) { + if (dsi->lanes[i].function != DSI_LANE_UNUSED) + mask |= 1 << i; + } + + return mask; +} + +static int dsi_cio_init(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r; + u32 l; + + DSSDBG("DSI CIO init starts"); + + r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev)); + if (r) + return r; + + dsi_enable_scp_clk(dsidev); + + /* A dummy read using the SCP interface to any DSIPHY register is + * required after DSIPHY reset to complete the reset of the DSI complex + * I/O. */ + dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); + + if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) { + DSSERR("CIO SCP Clock domain not coming out of reset.\n"); + r = -EIO; + goto err_scp_clk_dom; + } + + r = dsi_set_lane_config(dsidev); + if (r) + goto err_scp_clk_dom; + + /* set TX STOP MODE timer to maximum for this operation */ + l = dsi_read_reg(dsidev, DSI_TIMING1); + l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ + l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ + l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ + l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ + dsi_write_reg(dsidev, DSI_TIMING1, l); + + if (dsi->ulps_enabled) { + unsigned mask_p; + int i; + + DSSDBG("manual ulps exit\n"); + + /* ULPS is exited by Mark-1 state for 1ms, followed by + * stop state. DSS HW cannot do this via the normal + * ULPS exit sequence, as after reset the DSS HW thinks + * that we are not in ULPS mode, and refuses to send the + * sequence. So we need to send the ULPS exit sequence + * manually by setting positive lines high and negative lines + * low for 1ms. + */ + + mask_p = 0; + + for (i = 0; i < dsi->num_lanes_supported; ++i) { + if (dsi->lanes[i].function == DSI_LANE_UNUSED) + continue; + mask_p |= 1 << i; + } + + dsi_cio_enable_lane_override(dsidev, mask_p, 0); + } + + r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON); + if (r) + goto err_cio_pwr; + + if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) { + DSSERR("CIO PWR clock domain not coming out of reset.\n"); + r = -ENODEV; + goto err_cio_pwr_dom; + } + + dsi_if_enable(dsidev, true); + dsi_if_enable(dsidev, false); + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ + + r = dsi_cio_wait_tx_clk_esc_reset(dsidev); + if (r) + goto err_tx_clk_esc_rst; + + if (dsi->ulps_enabled) { + /* Keep Mark-1 state for 1ms (as per DSI spec) */ + ktime_t wait = ns_to_ktime(1000 * 1000); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&wait, HRTIMER_MODE_REL); + + /* Disable the override. The lanes should be set to Mark-11 + * state by the HW */ + dsi_cio_disable_lane_override(dsidev); + } + + /* FORCE_TX_STOP_MODE_IO */ + REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15); + + dsi_cio_timings(dsidev); + + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + /* DDR_CLK_ALWAYS_ON */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, + dsi->vm_timings.ddr_clk_always_on, 13, 13); + } + + dsi->ulps_enabled = false; + + DSSDBG("CIO init done\n"); + + return 0; + +err_tx_clk_esc_rst: + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */ +err_cio_pwr_dom: + dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); +err_cio_pwr: + if (dsi->ulps_enabled) + dsi_cio_disable_lane_override(dsidev); +err_scp_clk_dom: + dsi_disable_scp_clk(dsidev); + dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev)); + return r; +} + +static void dsi_cio_uninit(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + /* DDR_CLK_ALWAYS_ON */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13); + + dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); + dsi_disable_scp_clk(dsidev); + dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev)); +} + +static void dsi_config_tx_fifo(struct platform_device *dsidev, + enum fifo_size size1, enum fifo_size size2, + enum fifo_size size3, enum fifo_size size4) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 r = 0; + int add = 0; + int i; + + dsi->vc[0].tx_fifo_size = size1; + dsi->vc[1].tx_fifo_size = size2; + dsi->vc[2].tx_fifo_size = size3; + dsi->vc[3].tx_fifo_size = size4; + + for (i = 0; i < 4; i++) { + u8 v; + int size = dsi->vc[i].tx_fifo_size; + + if (add + size > 4) { + DSSERR("Illegal FIFO configuration\n"); + BUG(); + return; + } + + v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); + r |= v << (8 * i); + /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */ + add += size; + } + + dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r); +} + +static void dsi_config_rx_fifo(struct platform_device *dsidev, + enum fifo_size size1, enum fifo_size size2, + enum fifo_size size3, enum fifo_size size4) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 r = 0; + int add = 0; + int i; + + dsi->vc[0].rx_fifo_size = size1; + dsi->vc[1].rx_fifo_size = size2; + dsi->vc[2].rx_fifo_size = size3; + dsi->vc[3].rx_fifo_size = size4; + + for (i = 0; i < 4; i++) { + u8 v; + int size = dsi->vc[i].rx_fifo_size; + + if (add + size > 4) { + DSSERR("Illegal FIFO configuration\n"); + BUG(); + return; + } + + v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); + r |= v << (8 * i); + /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */ + add += size; + } + + dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r); +} + +static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev) +{ + u32 r; + + r = dsi_read_reg(dsidev, DSI_TIMING1); + r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ + dsi_write_reg(dsidev, DSI_TIMING1, r); + + if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) { + DSSERR("TX_STOP bit not going down\n"); + return -EIO; + } + + return 0; +} + +static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel) +{ + return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0); +} + +static void dsi_packet_sent_handler_vp(void *data, u32 mask) +{ + struct dsi_packet_sent_handler_data *vp_data = + (struct dsi_packet_sent_handler_data *) data; + struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev); + const int channel = dsi->update_channel; + u8 bit = dsi->te_enabled ? 30 : 31; + + if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0) + complete(vp_data->completion); +} + +static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + DECLARE_COMPLETION_ONSTACK(completion); + struct dsi_packet_sent_handler_data vp_data = { + .dsidev = dsidev, + .completion = &completion + }; + int r = 0; + u8 bit; + + bit = dsi->te_enabled ? 30 : 31; + + r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, + &vp_data, DSI_VC_IRQ_PACKET_SENT); + if (r) + goto err0; + + /* Wait for completion only if TE_EN/TE_START is still set */ + if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) { + if (wait_for_completion_timeout(&completion, + msecs_to_jiffies(10)) == 0) { + DSSERR("Failed to complete previous frame transfer\n"); + r = -EIO; + goto err1; + } + } + + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, + &vp_data, DSI_VC_IRQ_PACKET_SENT); + + return 0; +err1: + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, + &vp_data, DSI_VC_IRQ_PACKET_SENT); +err0: + return r; +} + +static void dsi_packet_sent_handler_l4(void *data, u32 mask) +{ + struct dsi_packet_sent_handler_data *l4_data = + (struct dsi_packet_sent_handler_data *) data; + struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev); + const int channel = dsi->update_channel; + + if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0) + complete(l4_data->completion); +} + +static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel) +{ + DECLARE_COMPLETION_ONSTACK(completion); + struct dsi_packet_sent_handler_data l4_data = { + .dsidev = dsidev, + .completion = &completion + }; + int r = 0; + + r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, + &l4_data, DSI_VC_IRQ_PACKET_SENT); + if (r) + goto err0; + + /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */ + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) { + if (wait_for_completion_timeout(&completion, + msecs_to_jiffies(10)) == 0) { + DSSERR("Failed to complete previous l4 transfer\n"); + r = -EIO; + goto err1; + } + } + + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, + &l4_data, DSI_VC_IRQ_PACKET_SENT); + + return 0; +err1: + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, + &l4_data, DSI_VC_IRQ_PACKET_SENT); +err0: + return r; +} + +static int dsi_sync_vc(struct platform_device *dsidev, int channel) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + WARN_ON(in_interrupt()); + + if (!dsi_vc_is_enabled(dsidev, channel)) + return 0; + + switch (dsi->vc[channel].source) { + case DSI_VC_SOURCE_VP: + return dsi_sync_vc_vp(dsidev, channel); + case DSI_VC_SOURCE_L4: + return dsi_sync_vc_l4(dsidev, channel); + default: + BUG(); + return -EINVAL; + } +} + +static int dsi_vc_enable(struct platform_device *dsidev, int channel, + bool enable) +{ + DSSDBG("dsi_vc_enable channel %d, enable %d\n", + channel, enable); + + enable = enable ? 1 : 0; + + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0); + + if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), + 0, enable) != enable) { + DSSERR("Failed to set dsi_vc_enable to %d\n", enable); + return -EIO; + } + + return 0; +} + +static void dsi_vc_initial_config(struct platform_device *dsidev, int channel) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 r; + + DSSDBG("Initial config of virtual channel %d", channel); + + r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel)); + + if (FLD_GET(r, 15, 15)) /* VC_BUSY */ + DSSERR("VC(%d) busy when trying to configure it!\n", + channel); + + r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */ + r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */ + r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */ + r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */ + r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ + r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ + r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */ + if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH)) + r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */ + + r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ + r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ + + dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r); + + dsi->vc[channel].source = DSI_VC_SOURCE_L4; +} + +static int dsi_vc_config_source(struct platform_device *dsidev, int channel, + enum dsi_vc_source source) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->vc[channel].source == source) + return 0; + + DSSDBG("Source config of virtual channel %d", channel); + + dsi_sync_vc(dsidev, channel); + + dsi_vc_enable(dsidev, channel, 0); + + /* VC_BUSY */ + if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) { + DSSERR("vc(%d) busy when trying to config for VP\n", channel); + return -EIO; + } + + /* SOURCE, 0 = L4, 1 = video port */ + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1); + + /* DCS_CMD_ENABLE */ + if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) { + bool enable = source == DSI_VC_SOURCE_VP; + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30); + } + + dsi_vc_enable(dsidev, channel, 1); + + dsi->vc[channel].source = source; + + return 0; +} + +static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel, + bool enable) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + dsi_vc_enable(dsidev, channel, 0); + dsi_if_enable(dsidev, 0); + + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9); + + dsi_vc_enable(dsidev, channel, 1); + dsi_if_enable(dsidev, 1); + + dsi_force_tx_stop_mode_io(dsidev); + + /* start the DDR clock by sending a NULL packet */ + if (dsi->vm_timings.ddr_clk_always_on && enable) + dsi_vc_send_null(dssdev, channel); +} + +static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel) +{ + while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { + u32 val; + val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); + DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", + (val >> 0) & 0xff, + (val >> 8) & 0xff, + (val >> 16) & 0xff, + (val >> 24) & 0xff); + } +} + +static void dsi_show_rx_ack_with_err(u16 err) +{ + DSSERR("\tACK with ERROR (%#x):\n", err); + if (err & (1 << 0)) + DSSERR("\t\tSoT Error\n"); + if (err & (1 << 1)) + DSSERR("\t\tSoT Sync Error\n"); + if (err & (1 << 2)) + DSSERR("\t\tEoT Sync Error\n"); + if (err & (1 << 3)) + DSSERR("\t\tEscape Mode Entry Command Error\n"); + if (err & (1 << 4)) + DSSERR("\t\tLP Transmit Sync Error\n"); + if (err & (1 << 5)) + DSSERR("\t\tHS Receive Timeout Error\n"); + if (err & (1 << 6)) + DSSERR("\t\tFalse Control Error\n"); + if (err & (1 << 7)) + DSSERR("\t\t(reserved7)\n"); + if (err & (1 << 8)) + DSSERR("\t\tECC Error, single-bit (corrected)\n"); + if (err & (1 << 9)) + DSSERR("\t\tECC Error, multi-bit (not corrected)\n"); + if (err & (1 << 10)) + DSSERR("\t\tChecksum Error\n"); + if (err & (1 << 11)) + DSSERR("\t\tData type not recognized\n"); + if (err & (1 << 12)) + DSSERR("\t\tInvalid VC ID\n"); + if (err & (1 << 13)) + DSSERR("\t\tInvalid Transmission Length\n"); + if (err & (1 << 14)) + DSSERR("\t\t(reserved14)\n"); + if (err & (1 << 15)) + DSSERR("\t\tDSI Protocol Violation\n"); +} + +static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev, + int channel) +{ + /* RX_FIFO_NOT_EMPTY */ + while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { + u32 val; + u8 dt; + val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); + DSSERR("\trawval %#08x\n", val); + dt = FLD_GET(val, 5, 0); + if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) { + u16 err = FLD_GET(val, 23, 8); + dsi_show_rx_ack_with_err(err); + } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) { + DSSERR("\tDCS short response, 1 byte: %#x\n", + FLD_GET(val, 23, 8)); + } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) { + DSSERR("\tDCS short response, 2 byte: %#x\n", + FLD_GET(val, 23, 8)); + } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) { + DSSERR("\tDCS long response, len %d\n", + FLD_GET(val, 23, 8)); + dsi_vc_flush_long_data(dsidev, channel); + } else { + DSSERR("\tunknown datatype 0x%02x\n", dt); + } + } + return 0; +} + +static int dsi_vc_send_bta(struct platform_device *dsidev, int channel) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->debug_write || dsi->debug_read) + DSSDBG("dsi_vc_send_bta %d\n", channel); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + /* RX_FIFO_NOT_EMPTY */ + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { + DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); + dsi_vc_flush_receive_data(dsidev, channel); + } + + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ + + /* flush posted write */ + dsi_read_reg(dsidev, DSI_VC_CTRL(channel)); + + return 0; +} + +static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + DECLARE_COMPLETION_ONSTACK(completion); + int r = 0; + u32 err; + + r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler, + &completion, DSI_VC_IRQ_BTA); + if (r) + goto err0; + + r = dsi_register_isr(dsidev, dsi_completion_handler, &completion, + DSI_IRQ_ERROR_MASK); + if (r) + goto err1; + + r = dsi_vc_send_bta(dsidev, channel); + if (r) + goto err2; + + if (wait_for_completion_timeout(&completion, + msecs_to_jiffies(500)) == 0) { + DSSERR("Failed to receive BTA\n"); + r = -EIO; + goto err2; + } + + err = dsi_get_errors(dsidev); + if (err) { + DSSERR("Error while sending BTA: %x\n", err); + r = -EIO; + goto err2; + } +err2: + dsi_unregister_isr(dsidev, dsi_completion_handler, &completion, + DSI_IRQ_ERROR_MASK); +err1: + dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler, + &completion, DSI_VC_IRQ_BTA); +err0: + return r; +} + +static inline void dsi_vc_write_long_header(struct platform_device *dsidev, + int channel, u8 data_type, u16 len, u8 ecc) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 val; + u8 data_id; + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + data_id = data_type | dsi->vc[channel].vc_id << 6; + + val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | + FLD_VAL(ecc, 31, 24); + + dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val); +} + +static inline void dsi_vc_write_long_payload(struct platform_device *dsidev, + int channel, u8 b1, u8 b2, u8 b3, u8 b4) +{ + u32 val; + + val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0; + +/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", + b1, b2, b3, b4, val); */ + + dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val); +} + +static int dsi_vc_send_long(struct platform_device *dsidev, int channel, + u8 data_type, u8 *data, u16 len, u8 ecc) +{ + /*u32 val; */ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int i; + u8 *p; + int r = 0; + u8 b1, b2, b3, b4; + + if (dsi->debug_write) + DSSDBG("dsi_vc_send_long, %d bytes\n", len); + + /* len + header */ + if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) { + DSSERR("unable to send long packet: packet too long.\n"); + return -EINVAL; + } + + dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4); + + dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc); + + p = data; + for (i = 0; i < len >> 2; i++) { + if (dsi->debug_write) + DSSDBG("\tsending full packet %d\n", i); + + b1 = *p++; + b2 = *p++; + b3 = *p++; + b4 = *p++; + + dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4); + } + + i = len % 4; + if (i) { + b1 = 0; b2 = 0; b3 = 0; + + if (dsi->debug_write) + DSSDBG("\tsending remainder bytes %d\n", i); + + switch (i) { + case 3: + b1 = *p++; + b2 = *p++; + b3 = *p++; + break; + case 2: + b1 = *p++; + b2 = *p++; + break; + case 1: + b1 = *p++; + break; + } + + dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0); + } + + return r; +} + +static int dsi_vc_send_short(struct platform_device *dsidev, int channel, + u8 data_type, u16 data, u8 ecc) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 r; + u8 data_id; + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + if (dsi->debug_write) + DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", + channel, + data_type, data & 0xff, (data >> 8) & 0xff); + + dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4); + + if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) { + DSSERR("ERROR FIFO FULL, aborting transfer\n"); + return -EINVAL; + } + + data_id = data_type | dsi->vc[channel].vc_id << 6; + + r = (data_id << 0) | (data << 8) | (ecc << 24); + + dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r); + + return 0; +} + +static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + + return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL, + 0, 0); +} + +static int dsi_vc_write_nosync_common(struct platform_device *dsidev, + int channel, u8 *data, int len, enum dss_dsi_content_type type) +{ + int r; + + if (len == 0) { + BUG_ON(type == DSS_DSI_CONTENT_DCS); + r = dsi_vc_send_short(dsidev, channel, + MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0); + } else if (len == 1) { + r = dsi_vc_send_short(dsidev, channel, + type == DSS_DSI_CONTENT_GENERIC ? + MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM : + MIPI_DSI_DCS_SHORT_WRITE, data[0], 0); + } else if (len == 2) { + r = dsi_vc_send_short(dsidev, channel, + type == DSS_DSI_CONTENT_GENERIC ? + MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM : + MIPI_DSI_DCS_SHORT_WRITE_PARAM, + data[0] | (data[1] << 8), 0); + } else { + r = dsi_vc_send_long(dsidev, channel, + type == DSS_DSI_CONTENT_GENERIC ? + MIPI_DSI_GENERIC_LONG_WRITE : + MIPI_DSI_DCS_LONG_WRITE, data, len, 0); + } + + return r; +} + +static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel, + u8 *data, int len) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + + return dsi_vc_write_nosync_common(dsidev, channel, data, len, + DSS_DSI_CONTENT_DCS); +} + +static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel, + u8 *data, int len) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + + return dsi_vc_write_nosync_common(dsidev, channel, data, len, + DSS_DSI_CONTENT_GENERIC); +} + +static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel, + u8 *data, int len, enum dss_dsi_content_type type) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + int r; + + r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type); + if (r) + goto err; + + r = dsi_vc_send_bta_sync(dssdev, channel); + if (r) + goto err; + + /* RX_FIFO_NOT_EMPTY */ + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { + DSSERR("rx fifo not empty after write, dumping data:\n"); + dsi_vc_flush_receive_data(dsidev, channel); + r = -EIO; + goto err; + } + + return 0; +err: + DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n", + channel, data[0], len); + return r; +} + +static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data, + int len) +{ + return dsi_vc_write_common(dssdev, channel, data, len, + DSS_DSI_CONTENT_DCS); +} + +static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data, + int len) +{ + return dsi_vc_write_common(dssdev, channel, data, len, + DSS_DSI_CONTENT_GENERIC); +} + +static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev, + int channel, u8 dcs_cmd) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r; + + if (dsi->debug_read) + DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n", + channel, dcs_cmd); + + r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0); + if (r) { + DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)" + " failed\n", channel, dcs_cmd); + return r; + } + + return 0; +} + +static int dsi_vc_generic_send_read_request(struct platform_device *dsidev, + int channel, u8 *reqdata, int reqlen) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u16 data; + u8 data_type; + int r; + + if (dsi->debug_read) + DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n", + channel, reqlen); + + if (reqlen == 0) { + data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM; + data = 0; + } else if (reqlen == 1) { + data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM; + data = reqdata[0]; + } else if (reqlen == 2) { + data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM; + data = reqdata[0] | (reqdata[1] << 8); + } else { + BUG(); + return -EINVAL; + } + + r = dsi_vc_send_short(dsidev, channel, data_type, data, 0); + if (r) { + DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)" + " failed\n", channel, reqlen); + return r; + } + + return 0; +} + +static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel, + u8 *buf, int buflen, enum dss_dsi_content_type type) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 val; + u8 dt; + int r; + + /* RX_FIFO_NOT_EMPTY */ + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) { + DSSERR("RX fifo empty when trying to read.\n"); + r = -EIO; + goto err; + } + + val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); + if (dsi->debug_read) + DSSDBG("\theader: %08x\n", val); + dt = FLD_GET(val, 5, 0); + if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) { + u16 err = FLD_GET(val, 23, 8); + dsi_show_rx_ack_with_err(err); + r = -EIO; + goto err; + + } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? + MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE : + MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) { + u8 data = FLD_GET(val, 15, 8); + if (dsi->debug_read) + DSSDBG("\t%s short response, 1 byte: %02x\n", + type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : + "DCS", data); + + if (buflen < 1) { + r = -EIO; + goto err; + } + + buf[0] = data; + + return 1; + } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? + MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE : + MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) { + u16 data = FLD_GET(val, 23, 8); + if (dsi->debug_read) + DSSDBG("\t%s short response, 2 byte: %04x\n", + type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : + "DCS", data); + + if (buflen < 2) { + r = -EIO; + goto err; + } + + buf[0] = data & 0xff; + buf[1] = (data >> 8) & 0xff; + + return 2; + } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ? + MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE : + MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) { + int w; + int len = FLD_GET(val, 23, 8); + if (dsi->debug_read) + DSSDBG("\t%s long response, len %d\n", + type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : + "DCS", len); + + if (len > buflen) { + r = -EIO; + goto err; + } + + /* two byte checksum ends the packet, not included in len */ + for (w = 0; w < len + 2;) { + int b; + val = dsi_read_reg(dsidev, + DSI_VC_SHORT_PACKET_HEADER(channel)); + if (dsi->debug_read) + DSSDBG("\t\t%02x %02x %02x %02x\n", + (val >> 0) & 0xff, + (val >> 8) & 0xff, + (val >> 16) & 0xff, + (val >> 24) & 0xff); + + for (b = 0; b < 4; ++b) { + if (w < len) + buf[w] = (val >> (b * 8)) & 0xff; + /* we discard the 2 byte checksum */ + ++w; + } + } + + return len; + } else { + DSSERR("\tunknown datatype 0x%02x\n", dt); + r = -EIO; + goto err; + } + +err: + DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel, + type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS"); + + return r; +} + +static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 *buf, int buflen) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + int r; + + r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd); + if (r) + goto err; + + r = dsi_vc_send_bta_sync(dssdev, channel); + if (r) + goto err; + + r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen, + DSS_DSI_CONTENT_DCS); + if (r < 0) + goto err; + + if (r != buflen) { + r = -EIO; + goto err; + } + + return 0; +err: + DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd); + return r; +} + +static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel, + u8 *reqdata, int reqlen, u8 *buf, int buflen) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + int r; + + r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen); + if (r) + return r; + + r = dsi_vc_send_bta_sync(dssdev, channel); + if (r) + return r; + + r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen, + DSS_DSI_CONTENT_GENERIC); + if (r < 0) + return r; + + if (r != buflen) { + r = -EIO; + return r; + } + + return 0; +} + +static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel, + u16 len) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + + return dsi_vc_send_short(dsidev, channel, + MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0); +} + +static int dsi_enter_ulps(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + DECLARE_COMPLETION_ONSTACK(completion); + int r, i; + unsigned mask; + + DSSDBG("Entering ULPS"); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + WARN_ON(dsi->ulps_enabled); + + if (dsi->ulps_enabled) + return 0; + + /* DDR_CLK_ALWAYS_ON */ + if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) { + dsi_if_enable(dsidev, 0); + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13); + dsi_if_enable(dsidev, 1); + } + + dsi_sync_vc(dsidev, 0); + dsi_sync_vc(dsidev, 1); + dsi_sync_vc(dsidev, 2); + dsi_sync_vc(dsidev, 3); + + dsi_force_tx_stop_mode_io(dsidev); + + dsi_vc_enable(dsidev, 0, false); + dsi_vc_enable(dsidev, 1, false); + dsi_vc_enable(dsidev, 2, false); + dsi_vc_enable(dsidev, 3, false); + + if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */ + DSSERR("HS busy when enabling ULPS\n"); + return -EIO; + } + + if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */ + DSSERR("LP busy when enabling ULPS\n"); + return -EIO; + } + + r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion, + DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + if (r) + return r; + + mask = 0; + + for (i = 0; i < dsi->num_lanes_supported; ++i) { + if (dsi->lanes[i].function == DSI_LANE_UNUSED) + continue; + mask |= 1 << i; + } + /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */ + /* LANEx_ULPS_SIG2 */ + REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5); + + /* flush posted write and wait for SCP interface to finish the write */ + dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2); + + if (wait_for_completion_timeout(&completion, + msecs_to_jiffies(1000)) == 0) { + DSSERR("ULPS enable timeout\n"); + r = -EIO; + goto err; + } + + dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, + DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + + /* Reset LANEx_ULPS_SIG2 */ + REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5); + + /* flush posted write and wait for SCP interface to finish the write */ + dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2); + + dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); + + dsi_if_enable(dsidev, false); + + dsi->ulps_enabled = true; + + return 0; + +err: + dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, + DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + return r; +} + +static void dsi_set_lp_rx_timeout(struct platform_device *dsidev, + unsigned ticks, bool x4, bool x16) +{ + unsigned long fck; + unsigned long total_ticks; + u32 r; + + BUG_ON(ticks > 0x1fff); + + /* ticks in DSI_FCK */ + fck = dsi_fclk_rate(dsidev); + + r = dsi_read_reg(dsidev, DSI_TIMING2); + r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ + r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ + r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ + r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ + dsi_write_reg(dsidev, DSI_TIMING2, r); + + total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); + + DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n", + total_ticks, + ticks, x4 ? " x4" : "", x16 ? " x16" : "", + (total_ticks * 1000) / (fck / 1000 / 1000)); +} + +static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks, + bool x8, bool x16) +{ + unsigned long fck; + unsigned long total_ticks; + u32 r; + + BUG_ON(ticks > 0x1fff); + + /* ticks in DSI_FCK */ + fck = dsi_fclk_rate(dsidev); + + r = dsi_read_reg(dsidev, DSI_TIMING1); + r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ + r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ + r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ + r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ + dsi_write_reg(dsidev, DSI_TIMING1, r); + + total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); + + DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n", + total_ticks, + ticks, x8 ? " x8" : "", x16 ? " x16" : "", + (total_ticks * 1000) / (fck / 1000 / 1000)); +} + +static void dsi_set_stop_state_counter(struct platform_device *dsidev, + unsigned ticks, bool x4, bool x16) +{ + unsigned long fck; + unsigned long total_ticks; + u32 r; + + BUG_ON(ticks > 0x1fff); + + /* ticks in DSI_FCK */ + fck = dsi_fclk_rate(dsidev); + + r = dsi_read_reg(dsidev, DSI_TIMING1); + r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ + r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ + r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ + r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ + dsi_write_reg(dsidev, DSI_TIMING1, r); + + total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); + + DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n", + total_ticks, + ticks, x4 ? " x4" : "", x16 ? " x16" : "", + (total_ticks * 1000) / (fck / 1000 / 1000)); +} + +static void dsi_set_hs_tx_timeout(struct platform_device *dsidev, + unsigned ticks, bool x4, bool x16) +{ + unsigned long fck; + unsigned long total_ticks; + u32 r; + + BUG_ON(ticks > 0x1fff); + + /* ticks in TxByteClkHS */ + fck = dsi_get_txbyteclkhs(dsidev); + + r = dsi_read_reg(dsidev, DSI_TIMING2); + r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ + r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ + r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ + r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ + dsi_write_reg(dsidev, DSI_TIMING2, r); + + total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); + + DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n", + total_ticks, + ticks, x4 ? " x4" : "", x16 ? " x16" : "", + (total_ticks * 1000) / (fck / 1000 / 1000)); +} + +static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int num_line_buffers; + + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + int bpp = dsi_get_pixel_size(dsi->pix_fmt); + struct omap_video_timings *timings = &dsi->timings; + /* + * Don't use line buffers if width is greater than the video + * port's line buffer size + */ + if (dsi->line_buffer_size <= timings->x_res * bpp / 8) + num_line_buffers = 0; + else + num_line_buffers = 2; + } else { + /* Use maximum number of line buffers in command mode */ + num_line_buffers = 2; + } + + /* LINE_BUFFER */ + REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12); +} + +static void dsi_config_vp_sync_events(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + bool sync_end; + u32 r; + + if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE) + sync_end = true; + else + sync_end = false; + + r = dsi_read_reg(dsidev, DSI_CTRL); + r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */ + r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */ + r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */ + r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */ + r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */ + r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */ + r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */ + dsi_write_reg(dsidev, DSI_CTRL, r); +} + +static void dsi_config_blanking_modes(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int blanking_mode = dsi->vm_timings.blanking_mode; + int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode; + int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode; + int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode; + u32 r; + + /* + * 0 = TX FIFO packets sent or LPS in corresponding blanking periods + * 1 = Long blanking packets are sent in corresponding blanking periods + */ + r = dsi_read_reg(dsidev, DSI_CTRL); + r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */ + r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */ + r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */ + r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */ + dsi_write_reg(dsidev, DSI_CTRL, r); +} + +/* + * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3 + * results in maximum transition time for data and clock lanes to enter and + * exit HS mode. Hence, this is the scenario where the least amount of command + * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS + * clock cycles that can be used to interleave command mode data in HS so that + * all scenarios are satisfied. + */ +static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs, + int exit_hs, int exiths_clk, int ddr_pre, int ddr_post) +{ + int transition; + + /* + * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition + * time of data lanes only, if it isn't set, we need to consider HS + * transition time of both data and clock lanes. HS transition time + * of Scenario 3 is considered. + */ + if (ddr_alwon) { + transition = enter_hs + exit_hs + max(enter_hs, 2) + 1; + } else { + int trans1, trans2; + trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1; + trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre + + enter_hs + 1; + transition = max(trans1, trans2); + } + + return blank > transition ? blank - transition : 0; +} + +/* + * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1 + * results in maximum transition time for data lanes to enter and exit LP mode. + * Hence, this is the scenario where the least amount of command mode data can + * be interleaved. We program the minimum amount of bytes that can be + * interleaved in LP so that all scenarios are satisfied. + */ +static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, + int lp_clk_div, int tdsi_fclk) +{ + int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */ + int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */ + int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */ + int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */ + int lp_inter; /* cmd mode data that can be interleaved, in bytes */ + + /* maximum LP transition time according to Scenario 1 */ + trans_lp = exit_hs + max(enter_hs, 2) + 1; + + /* CLKIN4DDR = 16 * TXBYTECLKHS */ + tlp_avail = thsbyte_clk * (blank - trans_lp); + + ttxclkesc = tdsi_fclk * lp_clk_div; + + lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - + 26) / 16; + + return max(lp_inter, 0); +} + +static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int blanking_mode; + int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode; + int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div; + int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat; + int tclk_trail, ths_exit, exiths_clk; + bool ddr_alwon; + struct omap_video_timings *timings = &dsi->timings; + int bpp = dsi_get_pixel_size(dsi->pix_fmt); + int ndl = dsi->num_lanes_used - 1; + int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1; + int hsa_interleave_hs = 0, hsa_interleave_lp = 0; + int hfp_interleave_hs = 0, hfp_interleave_lp = 0; + int hbp_interleave_hs = 0, hbp_interleave_lp = 0; + int bl_interleave_hs = 0, bl_interleave_lp = 0; + u32 r; + + r = dsi_read_reg(dsidev, DSI_CTRL); + blanking_mode = FLD_GET(r, 20, 20); + hfp_blanking_mode = FLD_GET(r, 21, 21); + hbp_blanking_mode = FLD_GET(r, 22, 22); + hsa_blanking_mode = FLD_GET(r, 23, 23); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING1); + hbp = FLD_GET(r, 11, 0); + hfp = FLD_GET(r, 23, 12); + hsa = FLD_GET(r, 31, 24); + + r = dsi_read_reg(dsidev, DSI_CLK_TIMING); + ddr_clk_post = FLD_GET(r, 7, 0); + ddr_clk_pre = FLD_GET(r, 15, 8); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING7); + exit_hs_mode_lat = FLD_GET(r, 15, 0); + enter_hs_mode_lat = FLD_GET(r, 31, 16); + + r = dsi_read_reg(dsidev, DSI_CLK_CTRL); + lp_clk_div = FLD_GET(r, 12, 0); + ddr_alwon = FLD_GET(r, 13, 13); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); + ths_exit = FLD_GET(r, 7, 0); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); + tclk_trail = FLD_GET(r, 15, 8); + + exiths_clk = ths_exit + tclk_trail; + + width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8); + bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl); + + if (!hsa_blanking_mode) { + hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + hsa_interleave_lp = dsi_compute_interleave_lp(hsa, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + if (!hfp_blanking_mode) { + hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + hfp_interleave_lp = dsi_compute_interleave_lp(hfp, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + if (!hbp_blanking_mode) { + hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + + hbp_interleave_lp = dsi_compute_interleave_lp(hbp, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + if (!blanking_mode) { + bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + + bl_interleave_lp = dsi_compute_interleave_lp(bllp, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n", + hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs, + bl_interleave_hs); + + DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n", + hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp, + bl_interleave_lp); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING4); + r = FLD_MOD(r, hsa_interleave_hs, 23, 16); + r = FLD_MOD(r, hfp_interleave_hs, 15, 8); + r = FLD_MOD(r, hbp_interleave_hs, 7, 0); + dsi_write_reg(dsidev, DSI_VM_TIMING4, r); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING5); + r = FLD_MOD(r, hsa_interleave_lp, 23, 16); + r = FLD_MOD(r, hfp_interleave_lp, 15, 8); + r = FLD_MOD(r, hbp_interleave_lp, 7, 0); + dsi_write_reg(dsidev, DSI_VM_TIMING5, r); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING6); + r = FLD_MOD(r, bl_interleave_hs, 31, 15); + r = FLD_MOD(r, bl_interleave_lp, 16, 0); + dsi_write_reg(dsidev, DSI_VM_TIMING6, r); +} + +static int dsi_proto_config(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u32 r; + int buswidth = 0; + + dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32, + DSI_FIFO_SIZE_32, + DSI_FIFO_SIZE_32, + DSI_FIFO_SIZE_32); + + dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32, + DSI_FIFO_SIZE_32, + DSI_FIFO_SIZE_32, + DSI_FIFO_SIZE_32); + + /* XXX what values for the timeouts? */ + dsi_set_stop_state_counter(dsidev, 0x1000, false, false); + dsi_set_ta_timeout(dsidev, 0x1fff, true, true); + dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true); + dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true); + + switch (dsi_get_pixel_size(dsi->pix_fmt)) { + case 16: + buswidth = 0; + break; + case 18: + buswidth = 1; + break; + case 24: + buswidth = 2; + break; + default: + BUG(); + return -EINVAL; + } + + r = dsi_read_reg(dsidev, DSI_CTRL); + r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ + r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ + r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ + r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/ + r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */ + r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */ + r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */ + r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ + if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) { + r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ + /* DCS_CMD_CODE, 1=start, 0=continue */ + r = FLD_MOD(r, 0, 25, 25); + } + + dsi_write_reg(dsidev, DSI_CTRL, r); + + dsi_config_vp_num_line_buffers(dsidev); + + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + dsi_config_vp_sync_events(dsidev); + dsi_config_blanking_modes(dsidev); + dsi_config_cmd_mode_interleaving(dsidev); + } + + dsi_vc_initial_config(dsidev, 0); + dsi_vc_initial_config(dsidev, 1); + dsi_vc_initial_config(dsidev, 2); + dsi_vc_initial_config(dsidev, 3); + + return 0; +} + +static void dsi_proto_timings(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; + unsigned tclk_pre, tclk_post; + unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; + unsigned ths_trail, ths_exit; + unsigned ddr_clk_pre, ddr_clk_post; + unsigned enter_hs_mode_lat, exit_hs_mode_lat; + unsigned ths_eot; + int ndl = dsi->num_lanes_used - 1; + u32 r; + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); + ths_prepare = FLD_GET(r, 31, 24); + ths_prepare_ths_zero = FLD_GET(r, 23, 16); + ths_zero = ths_prepare_ths_zero - ths_prepare; + ths_trail = FLD_GET(r, 15, 8); + ths_exit = FLD_GET(r, 7, 0); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); + tlpx = FLD_GET(r, 20, 16) * 2; + tclk_trail = FLD_GET(r, 15, 8); + tclk_zero = FLD_GET(r, 7, 0); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); + tclk_prepare = FLD_GET(r, 7, 0); + + /* min 8*UI */ + tclk_pre = 20; + /* min 60ns + 52*UI */ + tclk_post = ns2ddr(dsidev, 60) + 26; + + ths_eot = DIV_ROUND_UP(4, ndl); + + ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, + 4); + ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot; + + BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); + BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); + + r = dsi_read_reg(dsidev, DSI_CLK_TIMING); + r = FLD_MOD(r, ddr_clk_pre, 15, 8); + r = FLD_MOD(r, ddr_clk_post, 7, 0); + dsi_write_reg(dsidev, DSI_CLK_TIMING, r); + + DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", + ddr_clk_pre, + ddr_clk_post); + + enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) + + DIV_ROUND_UP(ths_prepare, 4) + + DIV_ROUND_UP(ths_zero + 3, 4); + + exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot; + + r = FLD_VAL(enter_hs_mode_lat, 31, 16) | + FLD_VAL(exit_hs_mode_lat, 15, 0); + dsi_write_reg(dsidev, DSI_VM_TIMING7, r); + + DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", + enter_hs_mode_lat, exit_hs_mode_lat); + + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + /* TODO: Implement a video mode check_timings function */ + int hsa = dsi->vm_timings.hsa; + int hfp = dsi->vm_timings.hfp; + int hbp = dsi->vm_timings.hbp; + int vsa = dsi->vm_timings.vsa; + int vfp = dsi->vm_timings.vfp; + int vbp = dsi->vm_timings.vbp; + int window_sync = dsi->vm_timings.window_sync; + bool hsync_end; + struct omap_video_timings *timings = &dsi->timings; + int bpp = dsi_get_pixel_size(dsi->pix_fmt); + int tl, t_he, width_bytes; + + hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE; + t_he = hsync_end ? + ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0; + + width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8); + + /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */ + tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp + + DIV_ROUND_UP(width_bytes + 6, ndl) + hbp; + + DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp, + hfp, hsync_end ? hsa : 0, tl); + DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp, + vsa, timings->y_res); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING1); + r = FLD_MOD(r, hbp, 11, 0); /* HBP */ + r = FLD_MOD(r, hfp, 23, 12); /* HFP */ + r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */ + dsi_write_reg(dsidev, DSI_VM_TIMING1, r); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING2); + r = FLD_MOD(r, vbp, 7, 0); /* VBP */ + r = FLD_MOD(r, vfp, 15, 8); /* VFP */ + r = FLD_MOD(r, vsa, 23, 16); /* VSA */ + r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */ + dsi_write_reg(dsidev, DSI_VM_TIMING2, r); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING3); + r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */ + r = FLD_MOD(r, tl, 31, 16); /* TL */ + dsi_write_reg(dsidev, DSI_VM_TIMING3, r); + } +} + +static int dsi_configure_pins(struct omap_dss_device *dssdev, + const struct omap_dsi_pin_config *pin_cfg) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int num_pins; + const int *pins; + struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; + int num_lanes; + int i; + + static const enum dsi_lane_function functions[] = { + DSI_LANE_CLK, + DSI_LANE_DATA1, + DSI_LANE_DATA2, + DSI_LANE_DATA3, + DSI_LANE_DATA4, + }; + + num_pins = pin_cfg->num_pins; + pins = pin_cfg->pins; + + if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2 + || num_pins % 2 != 0) + return -EINVAL; + + for (i = 0; i < DSI_MAX_NR_LANES; ++i) + lanes[i].function = DSI_LANE_UNUSED; + + num_lanes = 0; + + for (i = 0; i < num_pins; i += 2) { + u8 lane, pol; + int dx, dy; + + dx = pins[i]; + dy = pins[i + 1]; + + if (dx < 0 || dx >= dsi->num_lanes_supported * 2) + return -EINVAL; + + if (dy < 0 || dy >= dsi->num_lanes_supported * 2) + return -EINVAL; + + if (dx & 1) { + if (dy != dx - 1) + return -EINVAL; + pol = 1; + } else { + if (dy != dx + 1) + return -EINVAL; + pol = 0; + } + + lane = dx / 2; + + lanes[lane].function = functions[i / 2]; + lanes[lane].polarity = pol; + num_lanes++; + } + + memcpy(dsi->lanes, lanes, sizeof(dsi->lanes)); + dsi->num_lanes_used = num_lanes; + + return 0; +} + +static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_overlay_manager *mgr = dsi->output.manager; + int bpp = dsi_get_pixel_size(dsi->pix_fmt); + struct omap_dss_device *out = &dsi->output; + u8 data_type; + u16 word_count; + int r; + + if (out->manager == NULL) { + DSSERR("failed to enable display: no output/manager\n"); + return -ENODEV; + } + + r = dsi_display_init_dispc(dsidev, mgr); + if (r) + goto err_init_dispc; + + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + switch (dsi->pix_fmt) { + case OMAP_DSS_DSI_FMT_RGB888: + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; + break; + case OMAP_DSS_DSI_FMT_RGB666: + data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; + break; + case OMAP_DSS_DSI_FMT_RGB666_PACKED: + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; + break; + case OMAP_DSS_DSI_FMT_RGB565: + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; + break; + default: + r = -EINVAL; + goto err_pix_fmt; + } + + dsi_if_enable(dsidev, false); + dsi_vc_enable(dsidev, channel, false); + + /* MODE, 1 = video mode */ + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4); + + word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8); + + dsi_vc_write_long_header(dsidev, channel, data_type, + word_count, 0); + + dsi_vc_enable(dsidev, channel, true); + dsi_if_enable(dsidev, true); + } + + r = dss_mgr_enable(mgr); + if (r) + goto err_mgr_enable; + + return 0; + +err_mgr_enable: + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + dsi_if_enable(dsidev, false); + dsi_vc_enable(dsidev, channel, false); + } +err_pix_fmt: + dsi_display_uninit_dispc(dsidev, mgr); +err_init_dispc: + return r; +} + +static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_overlay_manager *mgr = dsi->output.manager; + + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + dsi_if_enable(dsidev, false); + dsi_vc_enable(dsidev, channel, false); + + /* MODE, 0 = command mode */ + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4); + + dsi_vc_enable(dsidev, channel, true); + dsi_if_enable(dsidev, true); + } + + dss_mgr_disable(mgr); + + dsi_display_uninit_dispc(dsidev, mgr); +} + +static void dsi_update_screen_dispc(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_overlay_manager *mgr = dsi->output.manager; + unsigned bytespp; + unsigned bytespl; + unsigned bytespf; + unsigned total_len; + unsigned packet_payload; + unsigned packet_len; + u32 l; + int r; + const unsigned channel = dsi->update_channel; + const unsigned line_buf_size = dsi->line_buffer_size; + u16 w = dsi->timings.x_res; + u16 h = dsi->timings.y_res; + + DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); + + dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP); + + bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8; + bytespl = w * bytespp; + bytespf = bytespl * h; + + /* NOTE: packet_payload has to be equal to N * bytespl, where N is + * number of lines in a packet. See errata about VP_CLK_RATIO */ + + if (bytespf < line_buf_size) + packet_payload = bytespf; + else + packet_payload = (line_buf_size) / bytespl * bytespl; + + packet_len = packet_payload + 1; /* 1 byte for DCS cmd */ + total_len = (bytespf / packet_payload) * packet_len; + + if (bytespf % packet_payload) + total_len += (bytespf % packet_payload) + 1; + + l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ + dsi_write_reg(dsidev, DSI_VC_TE(channel), l); + + dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE, + packet_len, 0); + + if (dsi->te_enabled) + l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ + else + l = FLD_MOD(l, 1, 31, 31); /* TE_START */ + dsi_write_reg(dsidev, DSI_VC_TE(channel), l); + + /* We put SIDLEMODE to no-idle for the duration of the transfer, + * because DSS interrupts are not capable of waking up the CPU and the + * framedone interrupt could be delayed for quite a long time. I think + * the same goes for any DSS interrupts, but for some reason I have not + * seen the problem anywhere else than here. + */ + dispc_disable_sidle(); + + dsi_perf_mark_start(dsidev); + + r = schedule_delayed_work(&dsi->framedone_timeout_work, + msecs_to_jiffies(250)); + BUG_ON(r == 0); + + dss_mgr_set_timings(mgr, &dsi->timings); + + dss_mgr_start_update(mgr); + + if (dsi->te_enabled) { + /* disable LP_RX_TO, so that we can receive TE. Time to wait + * for TE is longer than the timer allows */ + REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ + + dsi_vc_send_bta(dsidev, channel); + +#ifdef DSI_CATCH_MISSING_TE + mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250)); +#endif + } +} + +#ifdef DSI_CATCH_MISSING_TE +static void dsi_te_timeout(unsigned long arg) +{ + DSSERR("TE not received for 250ms!\n"); +} +#endif + +static void dsi_handle_framedone(struct platform_device *dsidev, int error) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + /* SIDLEMODE back to smart-idle */ + dispc_enable_sidle(); + + if (dsi->te_enabled) { + /* enable LP_RX_TO again after the TE */ + REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ + } + + dsi->framedone_callback(error, dsi->framedone_data); + + if (!error) + dsi_perf_show(dsidev, "DISPC"); +} + +static void dsi_framedone_timeout_work_callback(struct work_struct *work) +{ + struct dsi_data *dsi = container_of(work, struct dsi_data, + framedone_timeout_work.work); + /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after + * 250ms which would conflict with this timeout work. What should be + * done is first cancel the transfer on the HW, and then cancel the + * possibly scheduled framedone work. However, cancelling the transfer + * on the HW is buggy, and would probably require resetting the whole + * DSI */ + + DSSERR("Framedone not received for 250ms!\n"); + + dsi_handle_framedone(dsi->pdev, -ETIMEDOUT); +} + +static void dsi_framedone_irq_callback(void *data) +{ + struct platform_device *dsidev = (struct platform_device *) data; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + /* Note: We get FRAMEDONE when DISPC has finished sending pixels and + * turns itself off. However, DSI still has the pixels in its buffers, + * and is sending the data. + */ + + cancel_delayed_work(&dsi->framedone_timeout_work); + + dsi_handle_framedone(dsidev, 0); +} + +static int dsi_update(struct omap_dss_device *dssdev, int channel, + void (*callback)(int, void *), void *data) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + u16 dw, dh; + + dsi_perf_mark_setup(dsidev); + + dsi->update_channel = channel; + + dsi->framedone_callback = callback; + dsi->framedone_data = data; + + dw = dsi->timings.x_res; + dh = dsi->timings.y_res; + +#ifdef DSI_PERF_MEASURE + dsi->update_bytes = dw * dh * + dsi_get_pixel_size(dsi->pix_fmt) / 8; +#endif + dsi_update_screen_dispc(dsidev); + + return 0; +} + +/* Display funcs */ + +static int dsi_configure_dispc_clocks(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dispc_clock_info dispc_cinfo; + int r; + unsigned long fck; + + fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); + + dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div; + dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div; + + r = dispc_calc_clock_rates(fck, &dispc_cinfo); + if (r) { + DSSERR("Failed to calc dispc clocks\n"); + return r; + } + + dsi->mgr_config.clock_info = dispc_cinfo; + + return 0; +} + +static int dsi_display_init_dispc(struct platform_device *dsidev, + struct omap_overlay_manager *mgr) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r; + + dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ? + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); + + if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { + r = dss_mgr_register_framedone_handler(mgr, + dsi_framedone_irq_callback, dsidev); + if (r) { + DSSERR("can't register FRAMEDONE handler\n"); + goto err; + } + + dsi->mgr_config.stallmode = true; + dsi->mgr_config.fifohandcheck = true; + } else { + dsi->mgr_config.stallmode = false; + dsi->mgr_config.fifohandcheck = false; + } + + /* + * override interlace, logic level and edge related parameters in + * omap_video_timings with default values + */ + dsi->timings.interlace = false; + dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH; + dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE; + + dss_mgr_set_timings(mgr, &dsi->timings); + + r = dsi_configure_dispc_clocks(dsidev); + if (r) + goto err1; + + dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + dsi->mgr_config.video_port_width = + dsi_get_pixel_size(dsi->pix_fmt); + dsi->mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(mgr, &dsi->mgr_config); + + return 0; +err1: + if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) + dss_mgr_unregister_framedone_handler(mgr, + dsi_framedone_irq_callback, dsidev); +err: + dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK); + return r; +} + +static void dsi_display_uninit_dispc(struct platform_device *dsidev, + struct omap_overlay_manager *mgr) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) + dss_mgr_unregister_framedone_handler(mgr, + dsi_framedone_irq_callback, dsidev); + + dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK); +} + +static int dsi_configure_dsi_clocks(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dss_pll_clock_info cinfo; + int r; + + cinfo = dsi->user_dsi_cinfo; + + r = dss_pll_set_config(&dsi->pll, &cinfo); + if (r) { + DSSERR("Failed to set dsi clocks\n"); + return r; + } + + return 0; +} + +static int dsi_display_init_dsi(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r; + + r = dss_pll_enable(&dsi->pll); + if (r) + goto err0; + + r = dsi_configure_dsi_clocks(dsidev); + if (r) + goto err1; + + dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); + + DSSDBG("PLL OK\n"); + + r = dsi_cio_init(dsidev); + if (r) + goto err2; + + _dsi_print_reset_status(dsidev); + + dsi_proto_timings(dsidev); + dsi_set_lp_clk_divisor(dsidev); + + if (1) + _dsi_print_reset_status(dsidev); + + r = dsi_proto_config(dsidev); + if (r) + goto err3; + + /* enable interface */ + dsi_vc_enable(dsidev, 0, 1); + dsi_vc_enable(dsidev, 1, 1); + dsi_vc_enable(dsidev, 2, 1); + dsi_vc_enable(dsidev, 3, 1); + dsi_if_enable(dsidev, 1); + dsi_force_tx_stop_mode_io(dsidev); + + return 0; +err3: + dsi_cio_uninit(dsidev); +err2: + dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); +err1: + dss_pll_disable(&dsi->pll); +err0: + return r; +} + +static void dsi_display_uninit_dsi(struct platform_device *dsidev, + bool disconnect_lanes, bool enter_ulps) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (enter_ulps && !dsi->ulps_enabled) + dsi_enter_ulps(dsidev); + + /* disable interface */ + dsi_if_enable(dsidev, 0); + dsi_vc_enable(dsidev, 0, 0); + dsi_vc_enable(dsidev, 1, 0); + dsi_vc_enable(dsidev, 2, 0); + dsi_vc_enable(dsidev, 3, 0); + + dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + dsi_cio_uninit(dsidev); + dsi_pll_uninit(dsidev, disconnect_lanes); +} + +static int dsi_display_enable(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r = 0; + + DSSDBG("dsi_display_enable\n"); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + mutex_lock(&dsi->lock); + + r = dsi_runtime_get(dsidev); + if (r) + goto err_get_dsi; + + _dsi_initialize_irq(dsidev); + + r = dsi_display_init_dsi(dsidev); + if (r) + goto err_init_dsi; + + mutex_unlock(&dsi->lock); + + return 0; + +err_init_dsi: + dsi_runtime_put(dsidev); +err_get_dsi: + mutex_unlock(&dsi->lock); + DSSDBG("dsi_display_enable FAILED\n"); + return r; +} + +static void dsi_display_disable(struct omap_dss_device *dssdev, + bool disconnect_lanes, bool enter_ulps) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + DSSDBG("dsi_display_disable\n"); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + mutex_lock(&dsi->lock); + + dsi_sync_vc(dsidev, 0); + dsi_sync_vc(dsidev, 1); + dsi_sync_vc(dsidev, 2); + dsi_sync_vc(dsidev, 3); + + dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps); + + dsi_runtime_put(dsidev); + + mutex_unlock(&dsi->lock); +} + +static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi->te_enabled = enable; + return 0; +} + +#ifdef PRINT_VERBOSE_VM_TIMINGS +static void print_dsi_vm(const char *str, + const struct omap_dss_dsi_videomode_timings *t) +{ + unsigned long byteclk = t->hsclk / 4; + int bl, wc, pps, tot; + + wc = DIV_ROUND_UP(t->hact * t->bitspp, 8); + pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */ + bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp; + tot = bl + pps; + +#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk)) + + pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, " + "%u/%u/%u/%u/%u/%u = %u + %u = %u\n", + str, + byteclk, + t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp, + bl, pps, tot, + TO_DSI_T(t->hss), + TO_DSI_T(t->hsa), + TO_DSI_T(t->hse), + TO_DSI_T(t->hbp), + TO_DSI_T(pps), + TO_DSI_T(t->hfp), + + TO_DSI_T(bl), + TO_DSI_T(pps), + + TO_DSI_T(tot)); +#undef TO_DSI_T +} + +static void print_dispc_vm(const char *str, const struct omap_video_timings *t) +{ + unsigned long pck = t->pixelclock; + int hact, bl, tot; + + hact = t->x_res; + bl = t->hsw + t->hbp + t->hfp; + tot = hact + bl; + +#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck)) + + pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, " + "%u/%u/%u/%u = %u + %u = %u\n", + str, + pck, + t->hsw, t->hbp, hact, t->hfp, + bl, hact, tot, + TO_DISPC_T(t->hsw), + TO_DISPC_T(t->hbp), + TO_DISPC_T(hact), + TO_DISPC_T(t->hfp), + TO_DISPC_T(bl), + TO_DISPC_T(hact), + TO_DISPC_T(tot)); +#undef TO_DISPC_T +} + +/* note: this is not quite accurate */ +static void print_dsi_dispc_vm(const char *str, + const struct omap_dss_dsi_videomode_timings *t) +{ + struct omap_video_timings vm = { 0 }; + unsigned long byteclk = t->hsclk / 4; + unsigned long pck; + u64 dsi_tput; + int dsi_hact, dsi_htot; + + dsi_tput = (u64)byteclk * t->ndl * 8; + pck = (u32)div64_u64(dsi_tput, t->bitspp); + dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl); + dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp; + + vm.pixelclock = pck; + vm.hsw = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk); + vm.hbp = div64_u64((u64)t->hbp * pck, byteclk); + vm.hfp = div64_u64((u64)t->hfp * pck, byteclk); + vm.x_res = t->hact; + + print_dispc_vm(str, &vm); +} +#endif /* PRINT_VERBOSE_VM_TIMINGS */ + +static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck, + unsigned long pck, void *data) +{ + struct dsi_clk_calc_ctx *ctx = data; + struct omap_video_timings *t = &ctx->dispc_vm; + + ctx->dispc_cinfo.lck_div = lckd; + ctx->dispc_cinfo.pck_div = pckd; + ctx->dispc_cinfo.lck = lck; + ctx->dispc_cinfo.pck = pck; + + *t = *ctx->config->timings; + t->pixelclock = pck; + t->x_res = ctx->config->timings->x_res; + t->y_res = ctx->config->timings->y_res; + t->hsw = t->hfp = t->hbp = t->vsw = 1; + t->vfp = t->vbp = 0; + + return true; +} + +static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc, + void *data) +{ + struct dsi_clk_calc_ctx *ctx = data; + + ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; + ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; + + return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max, + dsi_cm_calc_dispc_cb, ctx); +} + +static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, + unsigned long clkdco, void *data) +{ + struct dsi_clk_calc_ctx *ctx = data; + + ctx->dsi_cinfo.n = n; + ctx->dsi_cinfo.m = m; + ctx->dsi_cinfo.fint = fint; + ctx->dsi_cinfo.clkdco = clkdco; + + return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), + dsi_cm_calc_hsdiv_cb, ctx); +} + +static bool dsi_cm_calc(struct dsi_data *dsi, + const struct omap_dss_dsi_config *cfg, + struct dsi_clk_calc_ctx *ctx) +{ + unsigned long clkin; + int bitspp, ndl; + unsigned long pll_min, pll_max; + unsigned long pck, txbyteclk; + + clkin = clk_get_rate(dsi->pll.clkin); + bitspp = dsi_get_pixel_size(cfg->pixel_format); + ndl = dsi->num_lanes_used - 1; + + /* + * Here we should calculate minimum txbyteclk to be able to send the + * frame in time, and also to handle TE. That's not very simple, though, + * especially as we go to LP between each pixel packet due to HW + * "feature". So let's just estimate very roughly and multiply by 1.5. + */ + pck = cfg->timings->pixelclock; + pck = pck * 3 / 2; + txbyteclk = pck * bitspp / 8 / ndl; + + memset(ctx, 0, sizeof(*ctx)); + ctx->dsidev = dsi->pdev; + ctx->pll = &dsi->pll; + ctx->config = cfg; + ctx->req_pck_min = pck; + ctx->req_pck_nom = pck; + ctx->req_pck_max = pck * 3 / 2; + + pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); + pll_max = cfg->hs_clk_max * 4; + + return dss_pll_calc(ctx->pll, clkin, + pll_min, pll_max, + dsi_cm_calc_pll_cb, ctx); +} + +static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev); + const struct omap_dss_dsi_config *cfg = ctx->config; + int bitspp = dsi_get_pixel_size(cfg->pixel_format); + int ndl = dsi->num_lanes_used - 1; + unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4; + unsigned long byteclk = hsclk / 4; + + unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max; + int xres; + int panel_htot, panel_hbl; /* pixels */ + int dispc_htot, dispc_hbl; /* pixels */ + int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */ + int hfp, hsa, hbp; + const struct omap_video_timings *req_vm; + struct omap_video_timings *dispc_vm; + struct omap_dss_dsi_videomode_timings *dsi_vm; + u64 dsi_tput, dispc_tput; + + dsi_tput = (u64)byteclk * ndl * 8; + + req_vm = cfg->timings; + req_pck_min = ctx->req_pck_min; + req_pck_max = ctx->req_pck_max; + req_pck_nom = ctx->req_pck_nom; + + dispc_pck = ctx->dispc_cinfo.pck; + dispc_tput = (u64)dispc_pck * bitspp; + + xres = req_vm->x_res; + + panel_hbl = req_vm->hfp + req_vm->hbp + req_vm->hsw; + panel_htot = xres + panel_hbl; + + dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl); + + /* + * When there are no line buffers, DISPC and DSI must have the + * same tput. Otherwise DISPC tput needs to be higher than DSI's. + */ + if (dsi->line_buffer_size < xres * bitspp / 8) { + if (dispc_tput != dsi_tput) + return false; + } else { + if (dispc_tput < dsi_tput) + return false; + } + + /* DSI tput must be over the min requirement */ + if (dsi_tput < (u64)bitspp * req_pck_min) + return false; + + /* When non-burst mode, DSI tput must be below max requirement. */ + if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) { + if (dsi_tput > (u64)bitspp * req_pck_max) + return false; + } + + hss = DIV_ROUND_UP(4, ndl); + + if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) { + if (ndl == 3 && req_vm->hsw == 0) + hse = 1; + else + hse = DIV_ROUND_UP(4, ndl); + } else { + hse = 0; + } + + /* DSI htot to match the panel's nominal pck */ + dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom); + + /* fail if there would be no time for blanking */ + if (dsi_htot < hss + hse + dsi_hact) + return false; + + /* total DSI blanking needed to achieve panel's TL */ + dsi_hbl = dsi_htot - dsi_hact; + + /* DISPC htot to match the DSI TL */ + dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk); + + /* verify that the DSI and DISPC TLs are the same */ + if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk) + return false; + + dispc_hbl = dispc_htot - xres; + + /* setup DSI videomode */ + + dsi_vm = &ctx->dsi_vm; + memset(dsi_vm, 0, sizeof(*dsi_vm)); + + dsi_vm->hsclk = hsclk; + + dsi_vm->ndl = ndl; + dsi_vm->bitspp = bitspp; + + if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) { + hsa = 0; + } else if (ndl == 3 && req_vm->hsw == 0) { + hsa = 0; + } else { + hsa = div64_u64((u64)req_vm->hsw * byteclk, req_pck_nom); + hsa = max(hsa - hse, 1); + } + + hbp = div64_u64((u64)req_vm->hbp * byteclk, req_pck_nom); + hbp = max(hbp, 1); + + hfp = dsi_hbl - (hss + hsa + hse + hbp); + if (hfp < 1) { + int t; + /* we need to take cycles from hbp */ + + t = 1 - hfp; + hbp = max(hbp - t, 1); + hfp = dsi_hbl - (hss + hsa + hse + hbp); + + if (hfp < 1 && hsa > 0) { + /* we need to take cycles from hsa */ + t = 1 - hfp; + hsa = max(hsa - t, 1); + hfp = dsi_hbl - (hss + hsa + hse + hbp); + } + } + + if (hfp < 1) + return false; + + dsi_vm->hss = hss; + dsi_vm->hsa = hsa; + dsi_vm->hse = hse; + dsi_vm->hbp = hbp; + dsi_vm->hact = xres; + dsi_vm->hfp = hfp; + + dsi_vm->vsa = req_vm->vsw; + dsi_vm->vbp = req_vm->vbp; + dsi_vm->vact = req_vm->y_res; + dsi_vm->vfp = req_vm->vfp; + + dsi_vm->trans_mode = cfg->trans_mode; + + dsi_vm->blanking_mode = 0; + dsi_vm->hsa_blanking_mode = 1; + dsi_vm->hfp_blanking_mode = 1; + dsi_vm->hbp_blanking_mode = 1; + + dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on; + dsi_vm->window_sync = 4; + + /* setup DISPC videomode */ + + dispc_vm = &ctx->dispc_vm; + *dispc_vm = *req_vm; + dispc_vm->pixelclock = dispc_pck; + + if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) { + hsa = div64_u64((u64)req_vm->hsw * dispc_pck, + req_pck_nom); + hsa = max(hsa, 1); + } else { + hsa = 1; + } + + hbp = div64_u64((u64)req_vm->hbp * dispc_pck, req_pck_nom); + hbp = max(hbp, 1); + + hfp = dispc_hbl - hsa - hbp; + if (hfp < 1) { + int t; + /* we need to take cycles from hbp */ + + t = 1 - hfp; + hbp = max(hbp - t, 1); + hfp = dispc_hbl - hsa - hbp; + + if (hfp < 1) { + /* we need to take cycles from hsa */ + t = 1 - hfp; + hsa = max(hsa - t, 1); + hfp = dispc_hbl - hsa - hbp; + } + } + + if (hfp < 1) + return false; + + dispc_vm->hfp = hfp; + dispc_vm->hsw = hsa; + dispc_vm->hbp = hbp; + + return true; +} + + +static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck, + unsigned long pck, void *data) +{ + struct dsi_clk_calc_ctx *ctx = data; + + ctx->dispc_cinfo.lck_div = lckd; + ctx->dispc_cinfo.pck_div = pckd; + ctx->dispc_cinfo.lck = lck; + ctx->dispc_cinfo.pck = pck; + + if (dsi_vm_calc_blanking(ctx) == false) + return false; + +#ifdef PRINT_VERBOSE_VM_TIMINGS + print_dispc_vm("dispc", &ctx->dispc_vm); + print_dsi_vm("dsi ", &ctx->dsi_vm); + print_dispc_vm("req ", ctx->config->timings); + print_dsi_dispc_vm("act ", &ctx->dsi_vm); +#endif + + return true; +} + +static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc, + void *data) +{ + struct dsi_clk_calc_ctx *ctx = data; + unsigned long pck_max; + + ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; + ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; + + /* + * In burst mode we can let the dispc pck be arbitrarily high, but it + * limits our scaling abilities. So for now, don't aim too high. + */ + + if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE) + pck_max = ctx->req_pck_max + 10000000; + else + pck_max = ctx->req_pck_max; + + return dispc_div_calc(dispc, ctx->req_pck_min, pck_max, + dsi_vm_calc_dispc_cb, ctx); +} + +static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, + unsigned long clkdco, void *data) +{ + struct dsi_clk_calc_ctx *ctx = data; + + ctx->dsi_cinfo.n = n; + ctx->dsi_cinfo.m = m; + ctx->dsi_cinfo.fint = fint; + ctx->dsi_cinfo.clkdco = clkdco; + + return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), + dsi_vm_calc_hsdiv_cb, ctx); +} + +static bool dsi_vm_calc(struct dsi_data *dsi, + const struct omap_dss_dsi_config *cfg, + struct dsi_clk_calc_ctx *ctx) +{ + const struct omap_video_timings *t = cfg->timings; + unsigned long clkin; + unsigned long pll_min; + unsigned long pll_max; + int ndl = dsi->num_lanes_used - 1; + int bitspp = dsi_get_pixel_size(cfg->pixel_format); + unsigned long byteclk_min; + + clkin = clk_get_rate(dsi->pll.clkin); + + memset(ctx, 0, sizeof(*ctx)); + ctx->dsidev = dsi->pdev; + ctx->pll = &dsi->pll; + ctx->config = cfg; + + /* these limits should come from the panel driver */ + ctx->req_pck_min = t->pixelclock - 1000; + ctx->req_pck_nom = t->pixelclock; + ctx->req_pck_max = t->pixelclock + 1000; + + byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8); + pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4); + + if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) { + pll_max = cfg->hs_clk_max * 4; + } else { + unsigned long byteclk_max; + byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp, + ndl * 8); + + pll_max = byteclk_max * 4 * 4; + } + + return dss_pll_calc(ctx->pll, clkin, + pll_min, pll_max, + dsi_vm_calc_pll_cb, ctx); +} + +static int dsi_set_config(struct omap_dss_device *dssdev, + const struct omap_dss_dsi_config *config) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dsi_clk_calc_ctx ctx; + bool ok; + int r; + + mutex_lock(&dsi->lock); + + dsi->pix_fmt = config->pixel_format; + dsi->mode = config->mode; + + if (config->mode == OMAP_DSS_DSI_VIDEO_MODE) + ok = dsi_vm_calc(dsi, config, &ctx); + else + ok = dsi_cm_calc(dsi, config, &ctx); + + if (!ok) { + DSSERR("failed to find suitable DSI clock settings\n"); + r = -EINVAL; + goto err; + } + + dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo); + + r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI], + config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo); + if (r) { + DSSERR("failed to find suitable DSI LP clock settings\n"); + goto err; + } + + dsi->user_dsi_cinfo = ctx.dsi_cinfo; + dsi->user_dispc_cinfo = ctx.dispc_cinfo; + + dsi->timings = ctx.dispc_vm; + dsi->vm_timings = ctx.dsi_vm; + + mutex_unlock(&dsi->lock); + + return 0; +err: + mutex_unlock(&dsi->lock); + + return r; +} + +/* + * Return a hardcoded channel for the DSI output. This should work for + * current use cases, but this can be later expanded to either resolve + * the channel in some more dynamic manner, or get the channel as a user + * parameter. + */ +static enum omap_channel dsi_get_channel(int module_id) +{ + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP24xx: + case OMAPDSS_VER_AM43xx: + DSSWARN("DSI not supported\n"); + return OMAP_DSS_CHANNEL_LCD; + + case OMAPDSS_VER_OMAP34xx_ES1: + case OMAPDSS_VER_OMAP34xx_ES3: + case OMAPDSS_VER_OMAP3630: + case OMAPDSS_VER_AM35xx: + return OMAP_DSS_CHANNEL_LCD; + + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + switch (module_id) { + case 0: + return OMAP_DSS_CHANNEL_LCD; + case 1: + return OMAP_DSS_CHANNEL_LCD2; + default: + DSSWARN("unsupported module id\n"); + return OMAP_DSS_CHANNEL_LCD; + } + + case OMAPDSS_VER_OMAP5: + switch (module_id) { + case 0: + return OMAP_DSS_CHANNEL_LCD; + case 1: + return OMAP_DSS_CHANNEL_LCD3; + default: + DSSWARN("unsupported module id\n"); + return OMAP_DSS_CHANNEL_LCD; + } + + default: + DSSWARN("unsupported DSS version\n"); + return OMAP_DSS_CHANNEL_LCD; + } +} + +static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int i; + + for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { + if (!dsi->vc[i].dssdev) { + dsi->vc[i].dssdev = dssdev; + *channel = i; + return 0; + } + } + + DSSERR("cannot get VC for display %s", dssdev->name); + return -ENOSPC; +} + +static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (vc_id < 0 || vc_id > 3) { + DSSERR("VC ID out of range\n"); + return -EINVAL; + } + + if (channel < 0 || channel > 3) { + DSSERR("Virtual Channel out of range\n"); + return -EINVAL; + } + + if (dsi->vc[channel].dssdev != dssdev) { + DSSERR("Virtual Channel not allocated to display %s\n", + dssdev->name); + return -EINVAL; + } + + dsi->vc[channel].vc_id = vc_id; + + return 0; +} + +static void dsi_release_vc(struct omap_dss_device *dssdev, int channel) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if ((channel >= 0 && channel <= 3) && + dsi->vc[channel].dssdev == dssdev) { + dsi->vc[channel].dssdev = NULL; + dsi->vc[channel].vc_id = 0; + } +} + + +static int dsi_get_clocks(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct clk *clk; + + clk = devm_clk_get(&dsidev->dev, "fck"); + if (IS_ERR(clk)) { + DSSERR("can't get fck\n"); + return PTR_ERR(clk); + } + + dsi->dss_clk = clk; + + return 0; +} + +static int dsi_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct omap_overlay_manager *mgr; + int r; + + r = dsi_regulator_init(dsidev); + if (r) + return r; + + mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); + if (!mgr) + return -ENODEV; + + r = dss_mgr_connect(mgr, dssdev); + if (r) + return r; + + r = omapdss_output_set_device(dssdev, dst); + if (r) { + DSSERR("failed to connect output to new device: %s\n", + dssdev->name); + dss_mgr_disconnect(mgr, dssdev); + return r; + } + + return 0; +} + +static void dsi_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + WARN_ON(dst != dssdev->dst); + + if (dst != dssdev->dst) + return; + + omapdss_output_unset_device(dssdev); + + if (dssdev->manager) + dss_mgr_disconnect(dssdev->manager, dssdev); +} + +static const struct omapdss_dsi_ops dsi_ops = { + .connect = dsi_connect, + .disconnect = dsi_disconnect, + + .bus_lock = dsi_bus_lock, + .bus_unlock = dsi_bus_unlock, + + .enable = dsi_display_enable, + .disable = dsi_display_disable, + + .enable_hs = dsi_vc_enable_hs, + + .configure_pins = dsi_configure_pins, + .set_config = dsi_set_config, + + .enable_video_output = dsi_enable_video_output, + .disable_video_output = dsi_disable_video_output, + + .update = dsi_update, + + .enable_te = dsi_enable_te, + + .request_vc = dsi_request_vc, + .set_vc_id = dsi_set_vc_id, + .release_vc = dsi_release_vc, + + .dcs_write = dsi_vc_dcs_write, + .dcs_write_nosync = dsi_vc_dcs_write_nosync, + .dcs_read = dsi_vc_dcs_read, + + .gen_write = dsi_vc_generic_write, + .gen_write_nosync = dsi_vc_generic_write_nosync, + .gen_read = dsi_vc_generic_read, + + .bta_sync = dsi_vc_send_bta_sync, + + .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size, +}; + +static void dsi_init_output(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_dss_device *out = &dsi->output; + + out->dev = &dsidev->dev; + out->id = dsi->module_id == 0 ? + OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; + + out->output_type = OMAP_DISPLAY_TYPE_DSI; + out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; + out->dispc_channel = dsi_get_channel(dsi->module_id); + out->ops.dsi = &dsi_ops; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void dsi_uninit_output(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_dss_device *out = &dsi->output; + + omapdss_unregister_output(out); +} + +static int dsi_probe_of(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct dsi_data *dsi = dsi_get_dsidrv_data(pdev); + struct property *prop; + u32 lane_arr[10]; + int len, num_pins; + int r, i; + struct device_node *ep; + struct omap_dsi_pin_config pin_cfg; + + ep = omapdss_of_get_first_endpoint(node); + if (!ep) + return 0; + + prop = of_find_property(ep, "lanes", &len); + if (prop == NULL) { + dev_err(&pdev->dev, "failed to find lane data\n"); + r = -EINVAL; + goto err; + } + + num_pins = len / sizeof(u32); + + if (num_pins < 4 || num_pins % 2 != 0 || + num_pins > dsi->num_lanes_supported * 2) { + dev_err(&pdev->dev, "bad number of lanes\n"); + r = -EINVAL; + goto err; + } + + r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins); + if (r) { + dev_err(&pdev->dev, "failed to read lane data\n"); + goto err; + } + + pin_cfg.num_pins = num_pins; + for (i = 0; i < num_pins; ++i) + pin_cfg.pins[i] = (int)lane_arr[i]; + + r = dsi_configure_pins(&dsi->output, &pin_cfg); + if (r) { + dev_err(&pdev->dev, "failed to configure pins"); + goto err; + } + + of_node_put(ep); + + return 0; + +err: + of_node_put(ep); + return r; +} + +static const struct dss_pll_ops dsi_pll_ops = { + .enable = dsi_pll_enable, + .disable = dsi_pll_disable, + .set_config = dss_pll_write_config_type_a, +}; + +static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { + .n_max = (1 << 7) - 1, + .m_max = (1 << 11) - 1, + .mX_max = (1 << 4) - 1, + .fint_min = 750000, + .fint_max = 2100000, + .clkdco_low = 1000000000, + .clkdco_max = 1800000000, + + .n_msb = 7, + .n_lsb = 1, + .m_msb = 18, + .m_lsb = 8, + + .mX_msb[0] = 22, + .mX_lsb[0] = 19, + .mX_msb[1] = 26, + .mX_lsb[1] = 23, + + .has_stopmode = true, + .has_freqsel = true, + .has_selfreqdco = false, + .has_refsel = false, +}; + +static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { + .n_max = (1 << 8) - 1, + .m_max = (1 << 12) - 1, + .mX_max = (1 << 5) - 1, + .fint_min = 500000, + .fint_max = 2500000, + .clkdco_low = 1000000000, + .clkdco_max = 1800000000, + + .n_msb = 8, + .n_lsb = 1, + .m_msb = 20, + .m_lsb = 9, + + .mX_msb[0] = 25, + .mX_lsb[0] = 21, + .mX_msb[1] = 30, + .mX_lsb[1] = 26, + + .has_stopmode = true, + .has_freqsel = false, + .has_selfreqdco = false, + .has_refsel = false, +}; + +static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { + .n_max = (1 << 8) - 1, + .m_max = (1 << 12) - 1, + .mX_max = (1 << 5) - 1, + .fint_min = 150000, + .fint_max = 52000000, + .clkdco_low = 1000000000, + .clkdco_max = 1800000000, + + .n_msb = 8, + .n_lsb = 1, + .m_msb = 20, + .m_lsb = 9, + + .mX_msb[0] = 25, + .mX_lsb[0] = 21, + .mX_msb[1] = 30, + .mX_lsb[1] = 26, + + .has_stopmode = true, + .has_freqsel = false, + .has_selfreqdco = true, + .has_refsel = true, +}; + +static int dsi_init_pll_data(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dss_pll *pll = &dsi->pll; + struct clk *clk; + int r; + + clk = devm_clk_get(&dsidev->dev, "sys_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get sys_clk\n"); + return PTR_ERR(clk); + } + + pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1"; + pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2; + pll->clkin = clk; + pll->base = dsi->pll_base; + + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP34xx_ES1: + case OMAPDSS_VER_OMAP34xx_ES3: + case OMAPDSS_VER_OMAP3630: + case OMAPDSS_VER_AM35xx: + pll->hw = &dss_omap3_dsi_pll_hw; + break; + + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + pll->hw = &dss_omap4_dsi_pll_hw; + break; + + case OMAPDSS_VER_OMAP5: + pll->hw = &dss_omap5_dsi_pll_hw; + break; + + default: + return -ENODEV; + } + + pll->ops = &dsi_pll_ops; + + r = dss_pll_register(pll); + if (r) + return r; + + return 0; +} + +/* DSI1 HW IP initialisation */ +static int dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *dsidev = to_platform_device(dev); + u32 rev; + int r, i; + struct dsi_data *dsi; + struct resource *dsi_mem; + struct resource *res; + struct resource temp_res; + + dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL); + if (!dsi) + return -ENOMEM; + + dsi->pdev = dsidev; + dev_set_drvdata(&dsidev->dev, dsi); + + spin_lock_init(&dsi->irq_lock); + spin_lock_init(&dsi->errors_lock); + dsi->errors = 0; + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + spin_lock_init(&dsi->irq_stats_lock); + dsi->irq_stats.last_reset = jiffies; +#endif + + mutex_init(&dsi->lock); + sema_init(&dsi->bus_lock, 1); + + INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work, + dsi_framedone_timeout_work_callback); + +#ifdef DSI_CATCH_MISSING_TE + init_timer(&dsi->te_timer); + dsi->te_timer.function = dsi_te_timeout; + dsi->te_timer.data = 0; +#endif + + res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto"); + if (!res) { + res = platform_get_resource(dsidev, IORESOURCE_MEM, 0); + if (!res) { + DSSERR("can't get IORESOURCE_MEM DSI\n"); + return -EINVAL; + } + + temp_res.start = res->start; + temp_res.end = temp_res.start + DSI_PROTO_SZ - 1; + res = &temp_res; + } + + dsi_mem = res; + + dsi->proto_base = devm_ioremap(&dsidev->dev, res->start, + resource_size(res)); + if (!dsi->proto_base) { + DSSERR("can't ioremap DSI protocol engine\n"); + return -ENOMEM; + } + + res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy"); + if (!res) { + res = platform_get_resource(dsidev, IORESOURCE_MEM, 0); + if (!res) { + DSSERR("can't get IORESOURCE_MEM DSI\n"); + return -EINVAL; + } + + temp_res.start = res->start + DSI_PHY_OFFSET; + temp_res.end = temp_res.start + DSI_PHY_SZ - 1; + res = &temp_res; + } + + dsi->phy_base = devm_ioremap(&dsidev->dev, res->start, + resource_size(res)); + if (!dsi->proto_base) { + DSSERR("can't ioremap DSI PHY\n"); + return -ENOMEM; + } + + res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll"); + if (!res) { + res = platform_get_resource(dsidev, IORESOURCE_MEM, 0); + if (!res) { + DSSERR("can't get IORESOURCE_MEM DSI\n"); + return -EINVAL; + } + + temp_res.start = res->start + DSI_PLL_OFFSET; + temp_res.end = temp_res.start + DSI_PLL_SZ - 1; + res = &temp_res; + } + + dsi->pll_base = devm_ioremap(&dsidev->dev, res->start, + resource_size(res)); + if (!dsi->proto_base) { + DSSERR("can't ioremap DSI PLL\n"); + return -ENOMEM; + } + + dsi->irq = platform_get_irq(dsi->pdev, 0); + if (dsi->irq < 0) { + DSSERR("platform_get_irq failed\n"); + return -ENODEV; + } + + r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler, + IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev); + if (r < 0) { + DSSERR("request_irq failed\n"); + return r; + } + + if (dsidev->dev.of_node) { + const struct of_device_id *match; + const struct dsi_module_id_data *d; + + match = of_match_node(dsi_of_match, dsidev->dev.of_node); + if (!match) { + DSSERR("unsupported DSI module\n"); + return -ENODEV; + } + + d = match->data; + + while (d->address != 0 && d->address != dsi_mem->start) + d++; + + if (d->address == 0) { + DSSERR("unsupported DSI module\n"); + return -ENODEV; + } + + dsi->module_id = d->id; + } else { + dsi->module_id = dsidev->id; + } + + /* DSI VCs initialization */ + for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { + dsi->vc[i].source = DSI_VC_SOURCE_L4; + dsi->vc[i].dssdev = NULL; + dsi->vc[i].vc_id = 0; + } + + r = dsi_get_clocks(dsidev); + if (r) + return r; + + dsi_init_pll_data(dsidev); + + pm_runtime_enable(&dsidev->dev); + + r = dsi_runtime_get(dsidev); + if (r) + goto err_runtime_get; + + rev = dsi_read_reg(dsidev, DSI_REVISION); + dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", + FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + /* DSI on OMAP3 doesn't have register DSI_GNQ, set number + * of data to 3 by default */ + if (dss_has_feature(FEAT_DSI_GNQ)) + /* NB_DATA_LANES */ + dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9); + else + dsi->num_lanes_supported = 3; + + dsi->line_buffer_size = dsi_get_line_buf_size(dsidev); + + dsi_init_output(dsidev); + + if (dsidev->dev.of_node) { + r = dsi_probe_of(dsidev); + if (r) { + DSSERR("Invalid DSI DT data\n"); + goto err_probe_of; + } + + r = of_platform_populate(dsidev->dev.of_node, NULL, NULL, + &dsidev->dev); + if (r) + DSSERR("Failed to populate DSI child devices: %d\n", r); + } + + dsi_runtime_put(dsidev); + + if (dsi->module_id == 0) + dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs); + else if (dsi->module_id == 1) + dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs); + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + if (dsi->module_id == 0) + dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs); + else if (dsi->module_id == 1) + dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs); +#endif + + return 0; + +err_probe_of: + dsi_uninit_output(dsidev); + dsi_runtime_put(dsidev); + +err_runtime_get: + pm_runtime_disable(&dsidev->dev); + return r; +} + +static void dsi_unbind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *dsidev = to_platform_device(dev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + of_platform_depopulate(&dsidev->dev); + + WARN_ON(dsi->scp_clk_refcount > 0); + + dss_pll_unregister(&dsi->pll); + + dsi_uninit_output(dsidev); + + pm_runtime_disable(&dsidev->dev); + + if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } +} + +static const struct component_ops dsi_component_ops = { + .bind = dsi_bind, + .unbind = dsi_unbind, +}; + +static int dsi_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dsi_component_ops); +} + +static int dsi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dsi_component_ops); + return 0; +} + +static int dsi_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dsi_data *dsi = dsi_get_dsidrv_data(pdev); + + dsi->is_enabled = false; + /* ensure the irq handler sees the is_enabled value */ + smp_wmb(); + /* wait for current handler to finish before turning the DSI off */ + synchronize_irq(dsi->irq); + + dispc_runtime_put(); + + return 0; +} + +static int dsi_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dsi_data *dsi = dsi_get_dsidrv_data(pdev); + int r; + + r = dispc_runtime_get(); + if (r) + return r; + + dsi->is_enabled = true; + /* ensure the irq handler sees the is_enabled value */ + smp_wmb(); + + return 0; +} + +static const struct dev_pm_ops dsi_pm_ops = { + .runtime_suspend = dsi_runtime_suspend, + .runtime_resume = dsi_runtime_resume, +}; + +static const struct dsi_module_id_data dsi_of_data_omap3[] = { + { .address = 0x4804fc00, .id = 0, }, + { }, +}; + +static const struct dsi_module_id_data dsi_of_data_omap4[] = { + { .address = 0x58004000, .id = 0, }, + { .address = 0x58005000, .id = 1, }, + { }, +}; + +static const struct dsi_module_id_data dsi_of_data_omap5[] = { + { .address = 0x58004000, .id = 0, }, + { .address = 0x58009000, .id = 1, }, + { }, +}; + +static const struct of_device_id dsi_of_match[] = { + { .compatible = "ti,omap3-dsi", .data = dsi_of_data_omap3, }, + { .compatible = "ti,omap4-dsi", .data = dsi_of_data_omap4, }, + { .compatible = "ti,omap5-dsi", .data = dsi_of_data_omap5, }, + {}, +}; + +static struct platform_driver omap_dsihw_driver = { + .probe = dsi_probe, + .remove = dsi_remove, + .driver = { + .name = "omapdss_dsi", + .pm = &dsi_pm_ops, + .of_match_table = dsi_of_match, + .suppress_bind_attrs = true, + }, +}; + +int __init dsi_init_platform_driver(void) +{ + return platform_driver_register(&omap_dsihw_driver); +} + +void dsi_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_dsihw_driver); +} diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c new file mode 100644 index 000000000000..bf407b6ba15c --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2013 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/seq_file.h> + +#include <video/omapdss.h> + +#include "dss.h" + +struct device_node * +omapdss_of_get_next_port(const struct device_node *parent, + struct device_node *prev) +{ + struct device_node *port = NULL; + + if (!parent) + return NULL; + + if (!prev) { + struct device_node *ports; + /* + * It's the first call, we have to find a port subnode + * within this node or within an optional 'ports' node. + */ + ports = of_get_child_by_name(parent, "ports"); + if (ports) + parent = ports; + + port = of_get_child_by_name(parent, "port"); + + /* release the 'ports' node */ + of_node_put(ports); + } else { + struct device_node *ports; + + ports = of_get_parent(prev); + if (!ports) + return NULL; + + do { + port = of_get_next_child(ports, prev); + if (!port) { + of_node_put(ports); + return NULL; + } + prev = port; + } while (of_node_cmp(port->name, "port") != 0); + + of_node_put(ports); + } + + return port; +} +EXPORT_SYMBOL_GPL(omapdss_of_get_next_port); + +struct device_node * +omapdss_of_get_next_endpoint(const struct device_node *parent, + struct device_node *prev) +{ + struct device_node *ep = NULL; + + if (!parent) + return NULL; + + do { + ep = of_get_next_child(parent, prev); + if (!ep) + return NULL; + prev = ep; + } while (of_node_cmp(ep->name, "endpoint") != 0); + + return ep; +} +EXPORT_SYMBOL_GPL(omapdss_of_get_next_endpoint); + +struct device_node *dss_of_port_get_parent_device(struct device_node *port) +{ + struct device_node *np; + int i; + + if (!port) + return NULL; + + np = of_get_parent(port); + + for (i = 0; i < 2 && np; ++i) { + struct property *prop; + + prop = of_find_property(np, "compatible", NULL); + + if (prop) + return np; + + np = of_get_next_parent(np); + } + + return NULL; +} + +u32 dss_of_port_get_port_number(struct device_node *port) +{ + int r; + u32 reg; + + r = of_property_read_u32(port, "reg", ®); + if (r) + reg = 0; + + return reg; +} + +static struct device_node *omapdss_of_get_remote_port(const struct device_node *node) +{ + struct device_node *np; + + np = of_parse_phandle(node, "remote-endpoint", 0); + if (!np) + return NULL; + + np = of_get_next_parent(np); + + return np; +} + +struct device_node * +omapdss_of_get_first_endpoint(const struct device_node *parent) +{ + struct device_node *port, *ep; + + port = omapdss_of_get_next_port(parent, NULL); + + if (!port) + return NULL; + + ep = omapdss_of_get_next_endpoint(port, NULL); + + of_node_put(port); + + return ep; +} +EXPORT_SYMBOL_GPL(omapdss_of_get_first_endpoint); + +struct omap_dss_device * +omapdss_of_find_source_for_first_ep(struct device_node *node) +{ + struct device_node *ep; + struct device_node *src_port; + struct omap_dss_device *src; + + ep = omapdss_of_get_first_endpoint(node); + if (!ep) + return ERR_PTR(-EINVAL); + + src_port = omapdss_of_get_remote_port(ep); + if (!src_port) { + of_node_put(ep); + return ERR_PTR(-EINVAL); + } + + of_node_put(ep); + + src = omap_dss_find_output_by_port_node(src_port); + + of_node_put(src_port); + + return src ? src : ERR_PTR(-EPROBE_DEFER); +} +EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c new file mode 100644 index 000000000000..f95ff319e68e --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -0,0 +1,1329 @@ +/* + * linux/drivers/video/omap2/dss/dss.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "DSS" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/export.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/seq_file.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/gfp.h> +#include <linux/sizes.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> +#include <linux/suspend.h> +#include <linux/component.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +#define DSS_SZ_REGS SZ_512 + +struct dss_reg { + u16 idx; +}; + +#define DSS_REG(idx) ((const struct dss_reg) { idx }) + +#define DSS_REVISION DSS_REG(0x0000) +#define DSS_SYSCONFIG DSS_REG(0x0010) +#define DSS_SYSSTATUS DSS_REG(0x0014) +#define DSS_CONTROL DSS_REG(0x0040) +#define DSS_SDI_CONTROL DSS_REG(0x0044) +#define DSS_PLL_CONTROL DSS_REG(0x0048) +#define DSS_SDI_STATUS DSS_REG(0x005C) + +#define REG_GET(idx, start, end) \ + FLD_GET(dss_read_reg(idx), start, end) + +#define REG_FLD_MOD(idx, val, start, end) \ + dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end)) + +struct dss_features { + u8 fck_div_max; + u8 dss_fck_multiplier; + const char *parent_clk_name; + const enum omap_display_type *ports; + int num_ports; + int (*dpi_select_source)(int port, enum omap_channel channel); +}; + +static struct { + struct platform_device *pdev; + void __iomem *base; + struct regmap *syscon_pll_ctrl; + u32 syscon_pll_ctrl_offset; + + struct clk *parent_clk; + struct clk *dss_clk; + unsigned long dss_clk_rate; + + unsigned long cache_req_pck; + unsigned long cache_prate; + struct dispc_clock_info cache_dispc_cinfo; + + enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; + enum omap_dss_clk_source dispc_clk_source; + enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; + + bool ctx_valid; + u32 ctx[DSS_SZ_REGS / sizeof(u32)]; + + const struct dss_features *feat; + + struct dss_pll *video1_pll; + struct dss_pll *video2_pll; +} dss; + +static const char * const dss_generic_clk_source_names[] = { + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", + [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI", +}; + +static bool dss_initialized; + +bool omapdss_is_initialized(void) +{ + return dss_initialized; +} +EXPORT_SYMBOL(omapdss_is_initialized); + +static inline void dss_write_reg(const struct dss_reg idx, u32 val) +{ + __raw_writel(val, dss.base + idx.idx); +} + +static inline u32 dss_read_reg(const struct dss_reg idx) +{ + return __raw_readl(dss.base + idx.idx); +} + +#define SR(reg) \ + dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg) +#define RR(reg) \ + dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)]) + +static void dss_save_context(void) +{ + DSSDBG("dss_save_context\n"); + + SR(CONTROL); + + if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & + OMAP_DISPLAY_TYPE_SDI) { + SR(SDI_CONTROL); + SR(PLL_CONTROL); + } + + dss.ctx_valid = true; + + DSSDBG("context saved\n"); +} + +static void dss_restore_context(void) +{ + DSSDBG("dss_restore_context\n"); + + if (!dss.ctx_valid) + return; + + RR(CONTROL); + + if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & + OMAP_DISPLAY_TYPE_SDI) { + RR(SDI_CONTROL); + RR(PLL_CONTROL); + } + + DSSDBG("context restored\n"); +} + +#undef SR +#undef RR + +void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable) +{ + unsigned shift; + unsigned val; + + if (!dss.syscon_pll_ctrl) + return; + + val = !enable; + + switch (pll_id) { + case DSS_PLL_VIDEO1: + shift = 0; + break; + case DSS_PLL_VIDEO2: + shift = 1; + break; + case DSS_PLL_HDMI: + shift = 2; + break; + default: + DSSERR("illegal DSS PLL ID %d\n", pll_id); + return; + } + + regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, + 1 << shift, val << shift); +} + +void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, + enum omap_channel channel) +{ + unsigned shift, val; + + if (!dss.syscon_pll_ctrl) + return; + + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + shift = 3; + + switch (pll_id) { + case DSS_PLL_VIDEO1: + val = 0; break; + case DSS_PLL_HDMI: + val = 1; break; + default: + DSSERR("error in PLL mux config for LCD\n"); + return; + } + + break; + case OMAP_DSS_CHANNEL_LCD2: + shift = 5; + + switch (pll_id) { + case DSS_PLL_VIDEO1: + val = 0; break; + case DSS_PLL_VIDEO2: + val = 1; break; + case DSS_PLL_HDMI: + val = 2; break; + default: + DSSERR("error in PLL mux config for LCD2\n"); + return; + } + + break; + case OMAP_DSS_CHANNEL_LCD3: + shift = 7; + + switch (pll_id) { + case DSS_PLL_VIDEO1: + val = 1; break; + case DSS_PLL_VIDEO2: + val = 0; break; + case DSS_PLL_HDMI: + val = 2; break; + default: + DSSERR("error in PLL mux config for LCD3\n"); + return; + } + + break; + default: + DSSERR("error in PLL mux config\n"); + return; + } + + regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, + 0x3 << shift, val << shift); +} + +void dss_sdi_init(int datapairs) +{ + u32 l; + + BUG_ON(datapairs > 3 || datapairs < 1); + + l = dss_read_reg(DSS_SDI_CONTROL); + l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */ + l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */ + l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */ + dss_write_reg(DSS_SDI_CONTROL, l); + + l = dss_read_reg(DSS_PLL_CONTROL); + l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */ + l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */ + l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */ + dss_write_reg(DSS_PLL_CONTROL, l); +} + +int dss_sdi_enable(void) +{ + unsigned long timeout; + + dispc_pck_free_enable(1); + + /* Reset SDI PLL */ + REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */ + udelay(1); /* wait 2x PCLK */ + + /* Lock SDI PLL */ + REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */ + + /* Waiting for PLL lock request to complete */ + timeout = jiffies + msecs_to_jiffies(500); + while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) { + if (time_after_eq(jiffies, timeout)) { + DSSERR("PLL lock request timed out\n"); + goto err1; + } + } + + /* Clearing PLL_GO bit */ + REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28); + + /* Waiting for PLL to lock */ + timeout = jiffies + msecs_to_jiffies(500); + while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) { + if (time_after_eq(jiffies, timeout)) { + DSSERR("PLL lock timed out\n"); + goto err1; + } + } + + dispc_lcd_enable_signal(1); + + /* Waiting for SDI reset to complete */ + timeout = jiffies + msecs_to_jiffies(500); + while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) { + if (time_after_eq(jiffies, timeout)) { + DSSERR("SDI reset timed out\n"); + goto err2; + } + } + + return 0; + + err2: + dispc_lcd_enable_signal(0); + err1: + /* Reset SDI PLL */ + REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ + + dispc_pck_free_enable(0); + + return -ETIMEDOUT; +} + +void dss_sdi_disable(void) +{ + dispc_lcd_enable_signal(0); + + dispc_pck_free_enable(0); + + /* Reset SDI PLL */ + REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ +} + +const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) +{ + return dss_generic_clk_source_names[clk_src]; +} + +void dss_dump_clocks(struct seq_file *s) +{ + const char *fclk_name, *fclk_real_name; + unsigned long fclk_rate; + + if (dss_runtime_get()) + return; + + seq_printf(s, "- DSS -\n"); + + fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); + fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); + fclk_rate = clk_get_rate(dss.dss_clk); + + seq_printf(s, "%s (%s) = %lu\n", + fclk_name, fclk_real_name, + fclk_rate); + + dss_runtime_put(); +} + +static void dss_dump_regs(struct seq_file *s) +{ +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) + + if (dss_runtime_get()) + return; + + DUMPREG(DSS_REVISION); + DUMPREG(DSS_SYSCONFIG); + DUMPREG(DSS_SYSSTATUS); + DUMPREG(DSS_CONTROL); + + if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & + OMAP_DISPLAY_TYPE_SDI) { + DUMPREG(DSS_SDI_CONTROL); + DUMPREG(DSS_PLL_CONTROL); + DUMPREG(DSS_SDI_STATUS); + } + + dss_runtime_put(); +#undef DUMPREG +} + +static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) +{ + int b; + u8 start, end; + + switch (clk_src) { + case OMAP_DSS_CLK_SRC_FCK: + b = 0; + break; + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + b = 1; + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + b = 2; + break; + default: + BUG(); + return; + } + + dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end); + + REG_FLD_MOD(DSS_CONTROL, b, start, end); /* DISPC_CLK_SWITCH */ + + dss.dispc_clk_source = clk_src; +} + +void dss_select_dsi_clk_source(int dsi_module, + enum omap_dss_clk_source clk_src) +{ + int b, pos; + + switch (clk_src) { + case OMAP_DSS_CLK_SRC_FCK: + b = 0; + break; + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: + BUG_ON(dsi_module != 0); + b = 1; + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: + BUG_ON(dsi_module != 1); + b = 1; + break; + default: + BUG(); + return; + } + + pos = dsi_module == 0 ? 1 : 10; + REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */ + + dss.dsi_clk_source[dsi_module] = clk_src; +} + +void dss_select_lcd_clk_source(enum omap_channel channel, + enum omap_dss_clk_source clk_src) +{ + int b, ix, pos; + + if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { + dss_select_dispc_clk_source(clk_src); + return; + } + + switch (clk_src) { + case OMAP_DSS_CLK_SRC_FCK: + b = 0; + break; + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); + b = 1; + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && + channel != OMAP_DSS_CHANNEL_LCD3); + b = 1; + break; + default: + BUG(); + return; + } + + pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19); + REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */ + + ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); + dss.lcd_clk_source[ix] = clk_src; +} + +enum omap_dss_clk_source dss_get_dispc_clk_source(void) +{ + return dss.dispc_clk_source; +} + +enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) +{ + return dss.dsi_clk_source[dsi_module]; +} + +enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) +{ + if (dss_has_feature(FEAT_LCD_CLK_SRC)) { + int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); + return dss.lcd_clk_source[ix]; + } else { + /* LCD_CLK source is the same as DISPC_FCLK source for + * OMAP2 and OMAP3 */ + return dss.dispc_clk_source; + } +} + +bool dss_div_calc(unsigned long pck, unsigned long fck_min, + dss_div_calc_func func, void *data) +{ + int fckd, fckd_start, fckd_stop; + unsigned long fck; + unsigned long fck_hw_max; + unsigned long fckd_hw_max; + unsigned long prate; + unsigned m; + + fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); + + if (dss.parent_clk == NULL) { + unsigned pckd; + + pckd = fck_hw_max / pck; + + fck = pck * pckd; + + fck = clk_round_rate(dss.dss_clk, fck); + + return func(fck, data); + } + + fckd_hw_max = dss.feat->fck_div_max; + + m = dss.feat->dss_fck_multiplier; + prate = clk_get_rate(dss.parent_clk); + + fck_min = fck_min ? fck_min : 1; + + fckd_start = min(prate * m / fck_min, fckd_hw_max); + fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul); + + for (fckd = fckd_start; fckd >= fckd_stop; --fckd) { + fck = DIV_ROUND_UP(prate, fckd) * m; + + if (func(fck, data)) + return true; + } + + return false; +} + +int dss_set_fck_rate(unsigned long rate) +{ + int r; + + DSSDBG("set fck to %lu\n", rate); + + r = clk_set_rate(dss.dss_clk, rate); + if (r) + return r; + + dss.dss_clk_rate = clk_get_rate(dss.dss_clk); + + WARN_ONCE(dss.dss_clk_rate != rate, + "clk rate mismatch: %lu != %lu", dss.dss_clk_rate, + rate); + + return 0; +} + +unsigned long dss_get_dispc_clk_rate(void) +{ + return dss.dss_clk_rate; +} + +static int dss_setup_default_clock(void) +{ + unsigned long max_dss_fck, prate; + unsigned long fck; + unsigned fck_div; + int r; + + max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); + + if (dss.parent_clk == NULL) { + fck = clk_round_rate(dss.dss_clk, max_dss_fck); + } else { + prate = clk_get_rate(dss.parent_clk); + + fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier, + max_dss_fck); + fck = DIV_ROUND_UP(prate, fck_div) * dss.feat->dss_fck_multiplier; + } + + r = dss_set_fck_rate(fck); + if (r) + return r; + + return 0; +} + +void dss_set_venc_output(enum omap_dss_venc_type type) +{ + int l = 0; + + if (type == OMAP_DSS_VENC_TYPE_COMPOSITE) + l = 0; + else if (type == OMAP_DSS_VENC_TYPE_SVIDEO) + l = 1; + else + BUG(); + + /* venc out selection. 0 = comp, 1 = svideo */ + REG_FLD_MOD(DSS_CONTROL, l, 6, 6); +} + +void dss_set_dac_pwrdn_bgz(bool enable) +{ + REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */ +} + +void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src) +{ + enum omap_display_type dp; + dp = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT); + + /* Complain about invalid selections */ + WARN_ON((src == DSS_VENC_TV_CLK) && !(dp & OMAP_DISPLAY_TYPE_VENC)); + WARN_ON((src == DSS_HDMI_M_PCLK) && !(dp & OMAP_DISPLAY_TYPE_HDMI)); + + /* Select only if we have options */ + if ((dp & OMAP_DISPLAY_TYPE_VENC) && (dp & OMAP_DISPLAY_TYPE_HDMI)) + REG_FLD_MOD(DSS_CONTROL, src, 15, 15); /* VENC_HDMI_SWITCH */ +} + +enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void) +{ + enum omap_display_type displays; + + displays = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT); + if ((displays & OMAP_DISPLAY_TYPE_HDMI) == 0) + return DSS_VENC_TV_CLK; + + if ((displays & OMAP_DISPLAY_TYPE_VENC) == 0) + return DSS_HDMI_M_PCLK; + + return REG_GET(DSS_CONTROL, 15, 15); +} + +static int dss_dpi_select_source_omap2_omap3(int port, enum omap_channel channel) +{ + if (channel != OMAP_DSS_CHANNEL_LCD) + return -EINVAL; + + return 0; +} + +static int dss_dpi_select_source_omap4(int port, enum omap_channel channel) +{ + int val; + + switch (channel) { + case OMAP_DSS_CHANNEL_LCD2: + val = 0; + break; + case OMAP_DSS_CHANNEL_DIGIT: + val = 1; + break; + default: + return -EINVAL; + } + + REG_FLD_MOD(DSS_CONTROL, val, 17, 17); + + return 0; +} + +static int dss_dpi_select_source_omap5(int port, enum omap_channel channel) +{ + int val; + + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + val = 1; + break; + case OMAP_DSS_CHANNEL_LCD2: + val = 2; + break; + case OMAP_DSS_CHANNEL_LCD3: + val = 3; + break; + case OMAP_DSS_CHANNEL_DIGIT: + val = 0; + break; + default: + return -EINVAL; + } + + REG_FLD_MOD(DSS_CONTROL, val, 17, 16); + + return 0; +} + +static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel) +{ + switch (port) { + case 0: + return dss_dpi_select_source_omap5(port, channel); + case 1: + if (channel != OMAP_DSS_CHANNEL_LCD2) + return -EINVAL; + break; + case 2: + if (channel != OMAP_DSS_CHANNEL_LCD3) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +int dss_dpi_select_source(int port, enum omap_channel channel) +{ + return dss.feat->dpi_select_source(port, channel); +} + +static int dss_get_clocks(void) +{ + struct clk *clk; + + clk = devm_clk_get(&dss.pdev->dev, "fck"); + if (IS_ERR(clk)) { + DSSERR("can't get clock fck\n"); + return PTR_ERR(clk); + } + + dss.dss_clk = clk; + + if (dss.feat->parent_clk_name) { + clk = clk_get(NULL, dss.feat->parent_clk_name); + if (IS_ERR(clk)) { + DSSERR("Failed to get %s\n", dss.feat->parent_clk_name); + return PTR_ERR(clk); + } + } else { + clk = NULL; + } + + dss.parent_clk = clk; + + return 0; +} + +static void dss_put_clocks(void) +{ + if (dss.parent_clk) + clk_put(dss.parent_clk); +} + +int dss_runtime_get(void) +{ + int r; + + DSSDBG("dss_runtime_get\n"); + + r = pm_runtime_get_sync(&dss.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +void dss_runtime_put(void) +{ + int r; + + DSSDBG("dss_runtime_put\n"); + + r = pm_runtime_put_sync(&dss.pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY); +} + +/* DEBUGFS */ +#if defined(CONFIG_OMAP2_DSS_DEBUGFS) +void dss_debug_dump_clocks(struct seq_file *s) +{ + dss_dump_clocks(s); + dispc_dump_clocks(s); +#ifdef CONFIG_OMAP2_DSS_DSI + dsi_dump_clocks(s); +#endif +} +#endif + + +static const enum omap_display_type omap2plus_ports[] = { + OMAP_DISPLAY_TYPE_DPI, +}; + +static const enum omap_display_type omap34xx_ports[] = { + OMAP_DISPLAY_TYPE_DPI, + OMAP_DISPLAY_TYPE_SDI, +}; + +static const enum omap_display_type dra7xx_ports[] = { + OMAP_DISPLAY_TYPE_DPI, + OMAP_DISPLAY_TYPE_DPI, + OMAP_DISPLAY_TYPE_DPI, +}; + +static const struct dss_features omap24xx_dss_feats = { + /* + * fck div max is really 16, but the divider range has gaps. The range + * from 1 to 6 has no gaps, so let's use that as a max. + */ + .fck_div_max = 6, + .dss_fck_multiplier = 2, + .parent_clk_name = "core_ck", + .dpi_select_source = &dss_dpi_select_source_omap2_omap3, + .ports = omap2plus_ports, + .num_ports = ARRAY_SIZE(omap2plus_ports), +}; + +static const struct dss_features omap34xx_dss_feats = { + .fck_div_max = 16, + .dss_fck_multiplier = 2, + .parent_clk_name = "dpll4_ck", + .dpi_select_source = &dss_dpi_select_source_omap2_omap3, + .ports = omap34xx_ports, + .num_ports = ARRAY_SIZE(omap34xx_ports), +}; + +static const struct dss_features omap3630_dss_feats = { + .fck_div_max = 32, + .dss_fck_multiplier = 1, + .parent_clk_name = "dpll4_ck", + .dpi_select_source = &dss_dpi_select_source_omap2_omap3, + .ports = omap2plus_ports, + .num_ports = ARRAY_SIZE(omap2plus_ports), +}; + +static const struct dss_features omap44xx_dss_feats = { + .fck_div_max = 32, + .dss_fck_multiplier = 1, + .parent_clk_name = "dpll_per_x2_ck", + .dpi_select_source = &dss_dpi_select_source_omap4, + .ports = omap2plus_ports, + .num_ports = ARRAY_SIZE(omap2plus_ports), +}; + +static const struct dss_features omap54xx_dss_feats = { + .fck_div_max = 64, + .dss_fck_multiplier = 1, + .parent_clk_name = "dpll_per_x2_ck", + .dpi_select_source = &dss_dpi_select_source_omap5, + .ports = omap2plus_ports, + .num_ports = ARRAY_SIZE(omap2plus_ports), +}; + +static const struct dss_features am43xx_dss_feats = { + .fck_div_max = 0, + .dss_fck_multiplier = 0, + .parent_clk_name = NULL, + .dpi_select_source = &dss_dpi_select_source_omap2_omap3, + .ports = omap2plus_ports, + .num_ports = ARRAY_SIZE(omap2plus_ports), +}; + +static const struct dss_features dra7xx_dss_feats = { + .fck_div_max = 64, + .dss_fck_multiplier = 1, + .parent_clk_name = "dpll_per_x2_ck", + .dpi_select_source = &dss_dpi_select_source_dra7xx, + .ports = dra7xx_ports, + .num_ports = ARRAY_SIZE(dra7xx_ports), +}; + +static int dss_init_features(struct platform_device *pdev) +{ + const struct dss_features *src; + struct dss_features *dst; + + dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); + if (!dst) { + dev_err(&pdev->dev, "Failed to allocate local DSS Features\n"); + return -ENOMEM; + } + + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP24xx: + src = &omap24xx_dss_feats; + break; + + case OMAPDSS_VER_OMAP34xx_ES1: + case OMAPDSS_VER_OMAP34xx_ES3: + case OMAPDSS_VER_AM35xx: + src = &omap34xx_dss_feats; + break; + + case OMAPDSS_VER_OMAP3630: + src = &omap3630_dss_feats; + break; + + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + src = &omap44xx_dss_feats; + break; + + case OMAPDSS_VER_OMAP5: + src = &omap54xx_dss_feats; + break; + + case OMAPDSS_VER_AM43xx: + src = &am43xx_dss_feats; + break; + + case OMAPDSS_VER_DRA7xx: + src = &dra7xx_dss_feats; + break; + + default: + return -ENODEV; + } + + memcpy(dst, src, sizeof(*dst)); + dss.feat = dst; + + return 0; +} + +static int dss_init_ports(struct platform_device *pdev) +{ + struct device_node *parent = pdev->dev.of_node; + struct device_node *port; + int r; + + if (parent == NULL) + return 0; + + port = omapdss_of_get_next_port(parent, NULL); + if (!port) + return 0; + + if (dss.feat->num_ports == 0) + return 0; + + do { + enum omap_display_type port_type; + u32 reg; + + r = of_property_read_u32(port, "reg", ®); + if (r) + reg = 0; + + if (reg >= dss.feat->num_ports) + continue; + + port_type = dss.feat->ports[reg]; + + switch (port_type) { + case OMAP_DISPLAY_TYPE_DPI: + dpi_init_port(pdev, port); + break; + case OMAP_DISPLAY_TYPE_SDI: + sdi_init_port(pdev, port); + break; + default: + break; + } + } while ((port = omapdss_of_get_next_port(parent, port)) != NULL); + + return 0; +} + +static void dss_uninit_ports(struct platform_device *pdev) +{ + struct device_node *parent = pdev->dev.of_node; + struct device_node *port; + + if (parent == NULL) + return; + + port = omapdss_of_get_next_port(parent, NULL); + if (!port) + return; + + if (dss.feat->num_ports == 0) + return; + + do { + enum omap_display_type port_type; + u32 reg; + int r; + + r = of_property_read_u32(port, "reg", ®); + if (r) + reg = 0; + + if (reg >= dss.feat->num_ports) + continue; + + port_type = dss.feat->ports[reg]; + + switch (port_type) { + case OMAP_DISPLAY_TYPE_DPI: + dpi_uninit_port(port); + break; + case OMAP_DISPLAY_TYPE_SDI: + sdi_uninit_port(port); + break; + default: + break; + } + } while ((port = omapdss_of_get_next_port(parent, port)) != NULL); +} + +static int dss_video_pll_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct regulator *pll_regulator; + int r; + + if (!np) + return 0; + + if (of_property_read_bool(np, "syscon-pll-ctrl")) { + dss.syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np, + "syscon-pll-ctrl"); + if (IS_ERR(dss.syscon_pll_ctrl)) { + dev_err(&pdev->dev, + "failed to get syscon-pll-ctrl regmap\n"); + return PTR_ERR(dss.syscon_pll_ctrl); + } + + if (of_property_read_u32_index(np, "syscon-pll-ctrl", 1, + &dss.syscon_pll_ctrl_offset)) { + dev_err(&pdev->dev, + "failed to get syscon-pll-ctrl offset\n"); + return -EINVAL; + } + } + + pll_regulator = devm_regulator_get(&pdev->dev, "vdda_video"); + if (IS_ERR(pll_regulator)) { + r = PTR_ERR(pll_regulator); + + switch (r) { + case -ENOENT: + pll_regulator = NULL; + break; + + case -EPROBE_DEFER: + return -EPROBE_DEFER; + + default: + DSSERR("can't get DPLL VDDA regulator\n"); + return r; + } + } + + if (of_property_match_string(np, "reg-names", "pll1") >= 0) { + dss.video1_pll = dss_video_pll_init(pdev, 0, pll_regulator); + if (IS_ERR(dss.video1_pll)) + return PTR_ERR(dss.video1_pll); + } + + if (of_property_match_string(np, "reg-names", "pll2") >= 0) { + dss.video2_pll = dss_video_pll_init(pdev, 1, pll_regulator); + if (IS_ERR(dss.video2_pll)) { + dss_video_pll_uninit(dss.video1_pll); + return PTR_ERR(dss.video2_pll); + } + } + + return 0; +} + +/* DSS HW IP initialisation */ +static int dss_bind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct resource *dss_mem; + u32 rev; + int r; + + dss.pdev = pdev; + + r = dss_init_features(dss.pdev); + if (r) + return r; + + dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); + if (!dss_mem) { + DSSERR("can't get IORESOURCE_MEM DSS\n"); + return -EINVAL; + } + + dss.base = devm_ioremap(&pdev->dev, dss_mem->start, + resource_size(dss_mem)); + if (!dss.base) { + DSSERR("can't ioremap DSS\n"); + return -ENOMEM; + } + + r = dss_get_clocks(); + if (r) + return r; + + r = dss_setup_default_clock(); + if (r) + goto err_setup_clocks; + + r = dss_video_pll_probe(pdev); + if (r) + goto err_pll_init; + + r = dss_init_ports(pdev); + if (r) + goto err_init_ports; + + pm_runtime_enable(&pdev->dev); + + r = dss_runtime_get(); + if (r) + goto err_runtime_get; + + dss.dss_clk_rate = clk_get_rate(dss.dss_clk); + + /* Select DPLL */ + REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); + + dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); + +#ifdef CONFIG_OMAP2_DSS_VENC + REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ + REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ + REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ +#endif + dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; + dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; + + rev = dss_read_reg(DSS_REVISION); + printk(KERN_INFO "OMAP DSS rev %d.%d\n", + FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + dss_runtime_put(); + + r = component_bind_all(&pdev->dev, NULL); + if (r) + goto err_component; + + dss_debugfs_create_file("dss", dss_dump_regs); + + pm_set_vt_switch(0); + + dss_initialized = true; + + return 0; + +err_component: +err_runtime_get: + pm_runtime_disable(&pdev->dev); + dss_uninit_ports(pdev); +err_init_ports: + if (dss.video1_pll) + dss_video_pll_uninit(dss.video1_pll); + + if (dss.video2_pll) + dss_video_pll_uninit(dss.video2_pll); +err_pll_init: +err_setup_clocks: + dss_put_clocks(); + return r; +} + +static void dss_unbind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + dss_initialized = false; + + component_unbind_all(&pdev->dev, NULL); + + if (dss.video1_pll) + dss_video_pll_uninit(dss.video1_pll); + + if (dss.video2_pll) + dss_video_pll_uninit(dss.video2_pll); + + dss_uninit_ports(pdev); + + pm_runtime_disable(&pdev->dev); + + dss_put_clocks(); +} + +static const struct component_master_ops dss_component_ops = { + .bind = dss_bind, + .unbind = dss_unbind, +}; + +static int dss_component_compare(struct device *dev, void *data) +{ + struct device *child = data; + return dev == child; +} + +static int dss_add_child_component(struct device *dev, void *data) +{ + struct component_match **match = data; + + /* + * HACK + * We don't have a working driver for rfbi, so skip it here always. + * Otherwise dss will never get probed successfully, as it will wait + * for rfbi to get probed. + */ + if (strstr(dev_name(dev), "rfbi")) + return 0; + + component_match_add(dev->parent, match, dss_component_compare, dev); + + return 0; +} + +static int dss_probe(struct platform_device *pdev) +{ + struct component_match *match = NULL; + int r; + + /* add all the child devices as components */ + device_for_each_child(&pdev->dev, &match, dss_add_child_component); + + r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); + if (r) + return r; + + return 0; +} + +static int dss_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &dss_component_ops); + return 0; +} + +static int dss_runtime_suspend(struct device *dev) +{ + dss_save_context(); + dss_set_min_bus_tput(dev, 0); + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int dss_runtime_resume(struct device *dev) +{ + int r; + + pinctrl_pm_select_default_state(dev); + + /* + * Set an arbitrarily high tput request to ensure OPP100. + * What we should really do is to make a request to stay in OPP100, + * without any tput requirements, but that is not currently possible + * via the PM layer. + */ + + r = dss_set_min_bus_tput(dev, 1000000000); + if (r) + return r; + + dss_restore_context(); + return 0; +} + +static const struct dev_pm_ops dss_pm_ops = { + .runtime_suspend = dss_runtime_suspend, + .runtime_resume = dss_runtime_resume, +}; + +static const struct of_device_id dss_of_match[] = { + { .compatible = "ti,omap2-dss", }, + { .compatible = "ti,omap3-dss", }, + { .compatible = "ti,omap4-dss", }, + { .compatible = "ti,omap5-dss", }, + { .compatible = "ti,dra7-dss", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, dss_of_match); + +static struct platform_driver omap_dsshw_driver = { + .probe = dss_probe, + .remove = dss_remove, + .driver = { + .name = "omapdss_dss", + .pm = &dss_pm_ops, + .of_match_table = dss_of_match, + .suppress_bind_attrs = true, + }, +}; + +int __init dss_init_platform_driver(void) +{ + return platform_driver_register(&omap_dsshw_driver); +} + +void dss_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_dsshw_driver); +} diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h new file mode 100644 index 000000000000..9a6453235585 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -0,0 +1,468 @@ +/* + * linux/drivers/video/omap2/dss/dss.h + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __OMAP2_DSS_H +#define __OMAP2_DSS_H + +#include <linux/interrupt.h> + +#ifdef pr_fmt +#undef pr_fmt +#endif + +#ifdef DSS_SUBSYS_NAME +#define pr_fmt(fmt) DSS_SUBSYS_NAME ": " fmt +#else +#define pr_fmt(fmt) fmt +#endif + +#define DSSDBG(format, ...) \ + pr_debug(format, ## __VA_ARGS__) + +#ifdef DSS_SUBSYS_NAME +#define DSSERR(format, ...) \ + printk(KERN_ERR "omapdss " DSS_SUBSYS_NAME " error: " format, \ + ## __VA_ARGS__) +#else +#define DSSERR(format, ...) \ + printk(KERN_ERR "omapdss error: " format, ## __VA_ARGS__) +#endif + +#ifdef DSS_SUBSYS_NAME +#define DSSINFO(format, ...) \ + printk(KERN_INFO "omapdss " DSS_SUBSYS_NAME ": " format, \ + ## __VA_ARGS__) +#else +#define DSSINFO(format, ...) \ + printk(KERN_INFO "omapdss: " format, ## __VA_ARGS__) +#endif + +#ifdef DSS_SUBSYS_NAME +#define DSSWARN(format, ...) \ + printk(KERN_WARNING "omapdss " DSS_SUBSYS_NAME ": " format, \ + ## __VA_ARGS__) +#else +#define DSSWARN(format, ...) \ + printk(KERN_WARNING "omapdss: " format, ## __VA_ARGS__) +#endif + +/* OMAP TRM gives bitfields as start:end, where start is the higher bit + number. For example 7:0 */ +#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) +#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) +#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end)) +#define FLD_MOD(orig, val, start, end) \ + (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) + +enum dss_io_pad_mode { + DSS_IO_PAD_MODE_RESET, + DSS_IO_PAD_MODE_RFBI, + DSS_IO_PAD_MODE_BYPASS, +}; + +enum dss_hdmi_venc_clk_source_select { + DSS_VENC_TV_CLK = 0, + DSS_HDMI_M_PCLK = 1, +}; + +enum dss_dsi_content_type { + DSS_DSI_CONTENT_DCS, + DSS_DSI_CONTENT_GENERIC, +}; + +enum dss_writeback_channel { + DSS_WB_LCD1_MGR = 0, + DSS_WB_LCD2_MGR = 1, + DSS_WB_TV_MGR = 2, + DSS_WB_OVL0 = 3, + DSS_WB_OVL1 = 4, + DSS_WB_OVL2 = 5, + DSS_WB_OVL3 = 6, + DSS_WB_LCD3_MGR = 7, +}; + +enum dss_pll_id { + DSS_PLL_DSI1, + DSS_PLL_DSI2, + DSS_PLL_HDMI, + DSS_PLL_VIDEO1, + DSS_PLL_VIDEO2, +}; + +struct dss_pll; + +#define DSS_PLL_MAX_HSDIVS 4 + +/* + * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7. + * Type-B PLLs: clkout[0] refers to m2. + */ +struct dss_pll_clock_info { + /* rates that we get with dividers below */ + unsigned long fint; + unsigned long clkdco; + unsigned long clkout[DSS_PLL_MAX_HSDIVS]; + + /* dividers */ + u16 n; + u16 m; + u32 mf; + u16 mX[DSS_PLL_MAX_HSDIVS]; + u16 sd; +}; + +struct dss_pll_ops { + int (*enable)(struct dss_pll *pll); + void (*disable)(struct dss_pll *pll); + int (*set_config)(struct dss_pll *pll, + const struct dss_pll_clock_info *cinfo); +}; + +struct dss_pll_hw { + unsigned n_max; + unsigned m_min; + unsigned m_max; + unsigned mX_max; + + unsigned long fint_min, fint_max; + unsigned long clkdco_min, clkdco_low, clkdco_max; + + u8 n_msb, n_lsb; + u8 m_msb, m_lsb; + u8 mX_msb[DSS_PLL_MAX_HSDIVS], mX_lsb[DSS_PLL_MAX_HSDIVS]; + + bool has_stopmode; + bool has_freqsel; + bool has_selfreqdco; + bool has_refsel; +}; + +struct dss_pll { + const char *name; + enum dss_pll_id id; + + struct clk *clkin; + struct regulator *regulator; + + void __iomem *base; + + const struct dss_pll_hw *hw; + + const struct dss_pll_ops *ops; + + struct dss_pll_clock_info cinfo; +}; + +struct dispc_clock_info { + /* rates that we get with dividers below */ + unsigned long lck; + unsigned long pck; + + /* dividers */ + u16 lck_div; + u16 pck_div; +}; + +struct dss_lcd_mgr_config { + enum dss_io_pad_mode io_pad_mode; + + bool stallmode; + bool fifohandcheck; + + struct dispc_clock_info clock_info; + + int video_port_width; + + int lcden_sig_polarity; +}; + +struct seq_file; +struct platform_device; + +/* core */ +struct platform_device *dss_get_core_pdev(void); +int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask); +void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask); +int dss_set_min_bus_tput(struct device *dev, unsigned long tput); +int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)); + +/* display */ +int dss_suspend_all_devices(void); +int dss_resume_all_devices(void); +void dss_disable_all_devices(void); + +int display_init_sysfs(struct platform_device *pdev); +void display_uninit_sysfs(struct platform_device *pdev); + +/* manager */ +int dss_init_overlay_managers(void); +void dss_uninit_overlay_managers(void); +int dss_init_overlay_managers_sysfs(struct platform_device *pdev); +void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev); +int dss_mgr_simple_check(struct omap_overlay_manager *mgr, + const struct omap_overlay_manager_info *info); +int dss_mgr_check_timings(struct omap_overlay_manager *mgr, + const struct omap_video_timings *timings); +int dss_mgr_check(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info, + const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *config, + struct omap_overlay_info **overlay_infos); + +static inline bool dss_mgr_is_lcd(enum omap_channel id) +{ + if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 || + id == OMAP_DSS_CHANNEL_LCD3) + return true; + else + return false; +} + +int dss_manager_kobj_init(struct omap_overlay_manager *mgr, + struct platform_device *pdev); +void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr); + +/* overlay */ +void dss_init_overlays(struct platform_device *pdev); +void dss_uninit_overlays(struct platform_device *pdev); +void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr); +int dss_ovl_simple_check(struct omap_overlay *ovl, + const struct omap_overlay_info *info); +int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, + const struct omap_video_timings *mgr_timings); +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode); +int dss_overlay_kobj_init(struct omap_overlay *ovl, + struct platform_device *pdev); +void dss_overlay_kobj_uninit(struct omap_overlay *ovl); + +/* DSS */ +int dss_init_platform_driver(void) __init; +void dss_uninit_platform_driver(void); + +int dss_runtime_get(void); +void dss_runtime_put(void); + +unsigned long dss_get_dispc_clk_rate(void); +int dss_dpi_select_source(int port, enum omap_channel channel); +void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); +enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); +const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); +void dss_dump_clocks(struct seq_file *s); + +/* DSS VIDEO PLL */ +struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id, + struct regulator *regulator); +void dss_video_pll_uninit(struct dss_pll *pll); + +/* dss-of */ +struct device_node *dss_of_port_get_parent_device(struct device_node *port); +u32 dss_of_port_get_port_number(struct device_node *port); + +#if defined(CONFIG_OMAP2_DSS_DEBUGFS) +void dss_debug_dump_clocks(struct seq_file *s); +#endif + +void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable); +void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, + enum omap_channel channel); + +void dss_sdi_init(int datapairs); +int dss_sdi_enable(void); +void dss_sdi_disable(void); + +void dss_select_dsi_clk_source(int dsi_module, + enum omap_dss_clk_source clk_src); +void dss_select_lcd_clk_source(enum omap_channel channel, + enum omap_dss_clk_source clk_src); +enum omap_dss_clk_source dss_get_dispc_clk_source(void); +enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); +enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); + +void dss_set_venc_output(enum omap_dss_venc_type type); +void dss_set_dac_pwrdn_bgz(bool enable); + +int dss_set_fck_rate(unsigned long rate); + +typedef bool (*dss_div_calc_func)(unsigned long fck, void *data); +bool dss_div_calc(unsigned long pck, unsigned long fck_min, + dss_div_calc_func func, void *data); + +/* SDI */ +int sdi_init_platform_driver(void) __init; +void sdi_uninit_platform_driver(void); + +#ifdef CONFIG_OMAP2_DSS_SDI +int sdi_init_port(struct platform_device *pdev, struct device_node *port); +void sdi_uninit_port(struct device_node *port); +#else +static inline int sdi_init_port(struct platform_device *pdev, + struct device_node *port) +{ + return 0; +} +static inline void sdi_uninit_port(struct device_node *port) +{ +} +#endif + +/* DSI */ + +#ifdef CONFIG_OMAP2_DSS_DSI + +struct dentry; +struct file_operations; + +int dsi_init_platform_driver(void) __init; +void dsi_uninit_platform_driver(void); + +void dsi_dump_clocks(struct seq_file *s); + +void dsi_irq_handler(void); +u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); + +#else +static inline u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt) +{ + WARN(1, "%s: DSI not compiled in, returning pixel_size as 0\n", + __func__); + return 0; +} +#endif + +/* DPI */ +int dpi_init_platform_driver(void) __init; +void dpi_uninit_platform_driver(void); + +#ifdef CONFIG_OMAP2_DSS_DPI +int dpi_init_port(struct platform_device *pdev, struct device_node *port); +void dpi_uninit_port(struct device_node *port); +#else +static inline int dpi_init_port(struct platform_device *pdev, + struct device_node *port) +{ + return 0; +} +static inline void dpi_uninit_port(struct device_node *port) +{ +} +#endif + +/* DISPC */ +int dispc_init_platform_driver(void) __init; +void dispc_uninit_platform_driver(void); +void dispc_dump_clocks(struct seq_file *s); + +void dispc_enable_sidle(void); +void dispc_disable_sidle(void); + +void dispc_lcd_enable_signal(bool enable); +void dispc_pck_free_enable(bool enable); +void dispc_enable_fifomerge(bool enable); +void dispc_enable_gamma_table(bool enable); + +typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck, + unsigned long pck, void *data); +bool dispc_div_calc(unsigned long dispc, + unsigned long pck_min, unsigned long pck_max, + dispc_div_calc_func func, void *data); + +bool dispc_mgr_timings_ok(enum omap_channel channel, + const struct omap_video_timings *timings); +int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, + struct dispc_clock_info *cinfo); + + +void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high); +void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, + u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, + bool manual_update); + +void dispc_mgr_set_clock_div(enum omap_channel channel, + const struct dispc_clock_info *cinfo); +int dispc_mgr_get_clock_div(enum omap_channel channel, + struct dispc_clock_info *cinfo); +void dispc_set_tv_pclk(unsigned long pclk); + +u32 dispc_wb_get_framedone_irq(void); +bool dispc_wb_go_busy(void); +void dispc_wb_go(void); +void dispc_wb_enable(bool enable); +bool dispc_wb_is_enabled(void); +void dispc_wb_set_channel_in(enum dss_writeback_channel channel); +int dispc_wb_setup(const struct omap_dss_writeback_info *wi, + bool mem_to_mem, const struct omap_video_timings *timings); + +/* VENC */ +int venc_init_platform_driver(void) __init; +void venc_uninit_platform_driver(void); + +/* HDMI */ +int hdmi4_init_platform_driver(void) __init; +void hdmi4_uninit_platform_driver(void); + +int hdmi5_init_platform_driver(void) __init; +void hdmi5_uninit_platform_driver(void); + +/* RFBI */ +int rfbi_init_platform_driver(void) __init; +void rfbi_uninit_platform_driver(void); + + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS +static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr) +{ + int b; + for (b = 0; b < 32; ++b) { + if (irqstatus & (1 << b)) + irq_arr[b]++; + } +} +#endif + +/* PLL */ +typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint, + unsigned long clkdco, void *data); +typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc, + void *data); + +int dss_pll_register(struct dss_pll *pll); +void dss_pll_unregister(struct dss_pll *pll); +struct dss_pll *dss_pll_find(const char *name); +int dss_pll_enable(struct dss_pll *pll); +void dss_pll_disable(struct dss_pll *pll); +int dss_pll_set_config(struct dss_pll *pll, + const struct dss_pll_clock_info *cinfo); + +bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, + unsigned long out_min, unsigned long out_max, + dss_hsdiv_calc_func func, void *data); +bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, + unsigned long pll_min, unsigned long pll_max, + dss_pll_calc_func func, void *data); +int dss_pll_write_config_type_a(struct dss_pll *pll, + const struct dss_pll_clock_info *cinfo); +int dss_pll_write_config_type_b(struct dss_pll *pll, + const struct dss_pll_clock_info *cinfo); +int dss_pll_wait_reset_done(struct dss_pll *pll); + +#endif diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c new file mode 100644 index 000000000000..c886a2927f73 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c @@ -0,0 +1,951 @@ +/* + * linux/drivers/video/omap2/dss/dss_features.c + * + * Copyright (C) 2010 Texas Instruments + * Author: Archit Taneja <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/slab.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +/* Defines a generic omap register field */ +struct dss_reg_field { + u8 start, end; +}; + +struct dss_param_range { + int min, max; +}; + +struct omap_dss_features { + const struct dss_reg_field *reg_fields; + const int num_reg_fields; + + const enum dss_feat_id *features; + const int num_features; + + const int num_mgrs; + const int num_ovls; + const enum omap_display_type *supported_displays; + const enum omap_dss_output_id *supported_outputs; + const enum omap_color_mode *supported_color_modes; + const enum omap_overlay_caps *overlay_caps; + const char * const *clksrc_names; + const struct dss_param_range *dss_params; + + const enum omap_dss_rotation_type supported_rotation_types; + + const u32 buffer_size_unit; + const u32 burst_size_unit; +}; + +/* This struct is assigned to one of the below during initialization */ +static const struct omap_dss_features *omap_current_dss_features; + +static const struct dss_reg_field omap2_dss_reg_fields[] = { + [FEAT_REG_FIRHINC] = { 11, 0 }, + [FEAT_REG_FIRVINC] = { 27, 16 }, + [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 }, + [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 }, + [FEAT_REG_FIFOSIZE] = { 8, 0 }, + [FEAT_REG_HORIZONTALACCU] = { 9, 0 }, + [FEAT_REG_VERTICALACCU] = { 25, 16 }, + [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 }, +}; + +static const struct dss_reg_field omap3_dss_reg_fields[] = { + [FEAT_REG_FIRHINC] = { 12, 0 }, + [FEAT_REG_FIRVINC] = { 28, 16 }, + [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 }, + [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 }, + [FEAT_REG_FIFOSIZE] = { 10, 0 }, + [FEAT_REG_HORIZONTALACCU] = { 9, 0 }, + [FEAT_REG_VERTICALACCU] = { 25, 16 }, + [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 }, +}; + +static const struct dss_reg_field am43xx_dss_reg_fields[] = { + [FEAT_REG_FIRHINC] = { 12, 0 }, + [FEAT_REG_FIRVINC] = { 28, 16 }, + [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 }, + [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 }, + [FEAT_REG_FIFOSIZE] = { 10, 0 }, + [FEAT_REG_HORIZONTALACCU] = { 9, 0 }, + [FEAT_REG_VERTICALACCU] = { 25, 16 }, + [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 }, +}; + +static const struct dss_reg_field omap4_dss_reg_fields[] = { + [FEAT_REG_FIRHINC] = { 12, 0 }, + [FEAT_REG_FIRVINC] = { 28, 16 }, + [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 }, + [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 }, + [FEAT_REG_FIFOSIZE] = { 15, 0 }, + [FEAT_REG_HORIZONTALACCU] = { 10, 0 }, + [FEAT_REG_VERTICALACCU] = { 26, 16 }, + [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 }, +}; + +static const struct dss_reg_field omap5_dss_reg_fields[] = { + [FEAT_REG_FIRHINC] = { 12, 0 }, + [FEAT_REG_FIRVINC] = { 28, 16 }, + [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 }, + [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 }, + [FEAT_REG_FIFOSIZE] = { 15, 0 }, + [FEAT_REG_HORIZONTALACCU] = { 10, 0 }, + [FEAT_REG_VERTICALACCU] = { 26, 16 }, + [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 7 }, +}; + +static const enum omap_display_type omap2_dss_supported_displays[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DISPLAY_TYPE_VENC, +}; + +static const enum omap_display_type omap3430_dss_supported_displays[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | + OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DISPLAY_TYPE_VENC, +}; + +static const enum omap_display_type omap3630_dss_supported_displays[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | + OMAP_DISPLAY_TYPE_DSI, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DISPLAY_TYPE_VENC, +}; + +static const enum omap_display_type am43xx_dss_supported_displays[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI, +}; + +static const enum omap_display_type omap4_dss_supported_displays[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DISPLAY_TYPE_VENC | OMAP_DISPLAY_TYPE_HDMI, + + /* OMAP_DSS_CHANNEL_LCD2 */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | + OMAP_DISPLAY_TYPE_DSI, +}; + +static const enum omap_display_type omap5_dss_supported_displays[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | + OMAP_DISPLAY_TYPE_DSI, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DISPLAY_TYPE_HDMI | OMAP_DISPLAY_TYPE_DPI, + + /* OMAP_DSS_CHANNEL_LCD2 */ + OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI | + OMAP_DISPLAY_TYPE_DSI, +}; + +static const enum omap_dss_output_id omap2_dss_supported_outputs[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DSS_OUTPUT_VENC, +}; + +static const enum omap_dss_output_id omap3430_dss_supported_outputs[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | + OMAP_DSS_OUTPUT_SDI | OMAP_DSS_OUTPUT_DSI1, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DSS_OUTPUT_VENC, +}; + +static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | + OMAP_DSS_OUTPUT_DSI1, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DSS_OUTPUT_VENC, +}; + +static const enum omap_dss_output_id am43xx_dss_supported_outputs[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI, +}; + +static const enum omap_dss_output_id omap4_dss_supported_outputs[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI, + + /* OMAP_DSS_CHANNEL_LCD2 */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | + OMAP_DSS_OUTPUT_DSI2, +}; + +static const enum omap_dss_output_id omap5_dss_supported_outputs[] = { + /* OMAP_DSS_CHANNEL_LCD */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | + OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2, + + /* OMAP_DSS_CHANNEL_DIGIT */ + OMAP_DSS_OUTPUT_HDMI, + + /* OMAP_DSS_CHANNEL_LCD2 */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | + OMAP_DSS_OUTPUT_DSI1, + + /* OMAP_DSS_CHANNEL_LCD3 */ + OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | + OMAP_DSS_OUTPUT_DSI2, +}; + +static const enum omap_color_mode omap2_dss_supported_color_modes[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | + OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | + OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 | + OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | + OMAP_DSS_COLOR_UYVY, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | + OMAP_DSS_COLOR_UYVY, +}; + +static const enum omap_color_mode omap3_dss_supported_color_modes[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | + OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | + OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P | + OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 | + OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 | + OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, +}; + +static const enum omap_color_mode omap4_dss_supported_color_modes[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | + OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | + OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32 | + OMAP_DSS_COLOR_ARGB16_1555 | OMAP_DSS_COLOR_RGBX16 | + OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_XRGB16_1555, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | + OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | + OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | + OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | + OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | + OMAP_DSS_COLOR_RGBX32, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | + OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | + OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | + OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | + OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | + OMAP_DSS_COLOR_RGBX32, + + /* OMAP_DSS_VIDEO3 */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | + OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | + OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | + OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | + OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | + OMAP_DSS_COLOR_RGBX32, + + /* OMAP_DSS_WB */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | + OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | + OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | + OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | + OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | + OMAP_DSS_COLOR_RGBX32, +}; + +static const enum omap_overlay_caps omap2_dss_overlay_caps[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS | + OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS | + OMAP_DSS_OVL_CAP_REPLICATION, +}; + +static const enum omap_overlay_caps omap3430_dss_overlay_caps[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_POS | + OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS | + OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | + OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, +}; + +static const enum omap_overlay_caps omap3630_dss_overlay_caps[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | + OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS | + OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | + OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_POS | + OMAP_DSS_OVL_CAP_REPLICATION, +}; + +static const enum omap_overlay_caps omap4_dss_overlay_caps[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | + OMAP_DSS_OVL_CAP_ZORDER | OMAP_DSS_OVL_CAP_POS | + OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | + OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER | + OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | + OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER | + OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, + + /* OMAP_DSS_VIDEO3 */ + OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | + OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER | + OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, +}; + +static const char * const omap2_dss_clk_source_names[] = { + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", + [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1", +}; + +static const char * const omap3_dss_clk_source_names[] = { + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", + [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", +}; + +static const char * const omap4_dss_clk_source_names[] = { + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", + [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2", +}; + +static const char * const omap5_dss_clk_source_names[] = { + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2", + [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2", +}; + +static const struct dss_param_range omap2_dss_param_range[] = { + [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, + [FEAT_PARAM_DSS_PCD] = { 2, 255 }, + [FEAT_PARAM_DOWNSCALE] = { 1, 2 }, + /* + * Assuming the line width buffer to be 768 pixels as OMAP2 DISPC + * scaler cannot scale a image with width more than 768. + */ + [FEAT_PARAM_LINEWIDTH] = { 1, 768 }, +}; + +static const struct dss_param_range omap3_dss_param_range[] = { + [FEAT_PARAM_DSS_FCK] = { 0, 173000000 }, + [FEAT_PARAM_DSS_PCD] = { 1, 255 }, + [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1}, + [FEAT_PARAM_DSI_FCK] = { 0, 173000000 }, + [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, + [FEAT_PARAM_LINEWIDTH] = { 1, 1024 }, +}; + +static const struct dss_param_range am43xx_dss_param_range[] = { + [FEAT_PARAM_DSS_FCK] = { 0, 200000000 }, + [FEAT_PARAM_DSS_PCD] = { 1, 255 }, + [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, + [FEAT_PARAM_LINEWIDTH] = { 1, 1024 }, +}; + +static const struct dss_param_range omap4_dss_param_range[] = { + [FEAT_PARAM_DSS_FCK] = { 0, 186000000 }, + [FEAT_PARAM_DSS_PCD] = { 1, 255 }, + [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, + [FEAT_PARAM_DSI_FCK] = { 0, 170000000 }, + [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, + [FEAT_PARAM_LINEWIDTH] = { 1, 2048 }, +}; + +static const struct dss_param_range omap5_dss_param_range[] = { + [FEAT_PARAM_DSS_FCK] = { 0, 209250000 }, + [FEAT_PARAM_DSS_PCD] = { 1, 255 }, + [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, + [FEAT_PARAM_DSI_FCK] = { 0, 209250000 }, + [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, + [FEAT_PARAM_LINEWIDTH] = { 1, 2048 }, +}; + +static const enum dss_feat_id omap2_dss_feat_list[] = { + FEAT_LCDENABLEPOL, + FEAT_LCDENABLESIGNAL, + FEAT_PCKFREEENABLE, + FEAT_FUNCGATED, + FEAT_ROWREPEATENABLE, + FEAT_RESIZECONF, +}; + +static const enum dss_feat_id omap3430_dss_feat_list[] = { + FEAT_LCDENABLEPOL, + FEAT_LCDENABLESIGNAL, + FEAT_PCKFREEENABLE, + FEAT_FUNCGATED, + FEAT_LINEBUFFERSPLIT, + FEAT_ROWREPEATENABLE, + FEAT_RESIZECONF, + FEAT_DSI_REVERSE_TXCLKESC, + FEAT_VENC_REQUIRES_TV_DAC_CLK, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FIXED_ZORDER, + FEAT_FIFO_MERGE, + FEAT_OMAP3_DSI_FIFO_BUG, + FEAT_DPI_USES_VDDS_DSI, +}; + +static const enum dss_feat_id am35xx_dss_feat_list[] = { + FEAT_LCDENABLEPOL, + FEAT_LCDENABLESIGNAL, + FEAT_PCKFREEENABLE, + FEAT_FUNCGATED, + FEAT_LINEBUFFERSPLIT, + FEAT_ROWREPEATENABLE, + FEAT_RESIZECONF, + FEAT_DSI_REVERSE_TXCLKESC, + FEAT_VENC_REQUIRES_TV_DAC_CLK, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FIXED_ZORDER, + FEAT_FIFO_MERGE, + FEAT_OMAP3_DSI_FIFO_BUG, +}; + +static const enum dss_feat_id am43xx_dss_feat_list[] = { + FEAT_LCDENABLEPOL, + FEAT_LCDENABLESIGNAL, + FEAT_PCKFREEENABLE, + FEAT_FUNCGATED, + FEAT_LINEBUFFERSPLIT, + FEAT_ROWREPEATENABLE, + FEAT_RESIZECONF, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FIXED_ZORDER, + FEAT_FIFO_MERGE, +}; + +static const enum dss_feat_id omap3630_dss_feat_list[] = { + FEAT_LCDENABLEPOL, + FEAT_LCDENABLESIGNAL, + FEAT_PCKFREEENABLE, + FEAT_FUNCGATED, + FEAT_LINEBUFFERSPLIT, + FEAT_ROWREPEATENABLE, + FEAT_RESIZECONF, + FEAT_DSI_PLL_PWR_BUG, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FIXED_ZORDER, + FEAT_FIFO_MERGE, + FEAT_OMAP3_DSI_FIFO_BUG, + FEAT_DPI_USES_VDDS_DSI, +}; + +static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = { + FEAT_MGR_LCD2, + FEAT_CORE_CLK_DIV, + FEAT_LCD_CLK_SRC, + FEAT_DSI_DCS_CMD_CONFIG_VC, + FEAT_DSI_VC_OCP_WIDTH, + FEAT_DSI_GNQ, + FEAT_HANDLE_UV_SEPARATE, + FEAT_ATTR2, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FREE_ZORDER, + FEAT_FIFO_MERGE, + FEAT_BURST_2D, +}; + +static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = { + FEAT_MGR_LCD2, + FEAT_CORE_CLK_DIV, + FEAT_LCD_CLK_SRC, + FEAT_DSI_DCS_CMD_CONFIG_VC, + FEAT_DSI_VC_OCP_WIDTH, + FEAT_DSI_GNQ, + FEAT_HDMI_CTS_SWMODE, + FEAT_HANDLE_UV_SEPARATE, + FEAT_ATTR2, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FREE_ZORDER, + FEAT_FIFO_MERGE, + FEAT_BURST_2D, +}; + +static const enum dss_feat_id omap4_dss_feat_list[] = { + FEAT_MGR_LCD2, + FEAT_CORE_CLK_DIV, + FEAT_LCD_CLK_SRC, + FEAT_DSI_DCS_CMD_CONFIG_VC, + FEAT_DSI_VC_OCP_WIDTH, + FEAT_DSI_GNQ, + FEAT_HDMI_CTS_SWMODE, + FEAT_HDMI_AUDIO_USE_MCLK, + FEAT_HANDLE_UV_SEPARATE, + FEAT_ATTR2, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FREE_ZORDER, + FEAT_FIFO_MERGE, + FEAT_BURST_2D, +}; + +static const enum dss_feat_id omap5_dss_feat_list[] = { + FEAT_MGR_LCD2, + FEAT_MGR_LCD3, + FEAT_CORE_CLK_DIV, + FEAT_LCD_CLK_SRC, + FEAT_DSI_DCS_CMD_CONFIG_VC, + FEAT_DSI_VC_OCP_WIDTH, + FEAT_DSI_GNQ, + FEAT_HDMI_CTS_SWMODE, + FEAT_HDMI_AUDIO_USE_MCLK, + FEAT_HANDLE_UV_SEPARATE, + FEAT_ATTR2, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FREE_ZORDER, + FEAT_FIFO_MERGE, + FEAT_BURST_2D, + FEAT_DSI_PHY_DCC, + FEAT_MFLAG, +}; + +/* OMAP2 DSS Features */ +static const struct omap_dss_features omap2_dss_features = { + .reg_fields = omap2_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields), + + .features = omap2_dss_feat_list, + .num_features = ARRAY_SIZE(omap2_dss_feat_list), + + .num_mgrs = 2, + .num_ovls = 3, + .supported_displays = omap2_dss_supported_displays, + .supported_outputs = omap2_dss_supported_outputs, + .supported_color_modes = omap2_dss_supported_color_modes, + .overlay_caps = omap2_dss_overlay_caps, + .clksrc_names = omap2_dss_clk_source_names, + .dss_params = omap2_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, + .buffer_size_unit = 1, + .burst_size_unit = 8, +}; + +/* OMAP3 DSS Features */ +static const struct omap_dss_features omap3430_dss_features = { + .reg_fields = omap3_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), + + .features = omap3430_dss_feat_list, + .num_features = ARRAY_SIZE(omap3430_dss_feat_list), + + .num_mgrs = 2, + .num_ovls = 3, + .supported_displays = omap3430_dss_supported_displays, + .supported_outputs = omap3430_dss_supported_outputs, + .supported_color_modes = omap3_dss_supported_color_modes, + .overlay_caps = omap3430_dss_overlay_caps, + .clksrc_names = omap3_dss_clk_source_names, + .dss_params = omap3_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, + .buffer_size_unit = 1, + .burst_size_unit = 8, +}; + +/* + * AM35xx DSS Features. This is basically OMAP3 DSS Features without the + * vdds_dsi regulator. + */ +static const struct omap_dss_features am35xx_dss_features = { + .reg_fields = omap3_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), + + .features = am35xx_dss_feat_list, + .num_features = ARRAY_SIZE(am35xx_dss_feat_list), + + .num_mgrs = 2, + .num_ovls = 3, + .supported_displays = omap3430_dss_supported_displays, + .supported_outputs = omap3430_dss_supported_outputs, + .supported_color_modes = omap3_dss_supported_color_modes, + .overlay_caps = omap3430_dss_overlay_caps, + .clksrc_names = omap3_dss_clk_source_names, + .dss_params = omap3_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, + .buffer_size_unit = 1, + .burst_size_unit = 8, +}; + +static const struct omap_dss_features am43xx_dss_features = { + .reg_fields = am43xx_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(am43xx_dss_reg_fields), + + .features = am43xx_dss_feat_list, + .num_features = ARRAY_SIZE(am43xx_dss_feat_list), + + .num_mgrs = 1, + .num_ovls = 3, + .supported_displays = am43xx_dss_supported_displays, + .supported_outputs = am43xx_dss_supported_outputs, + .supported_color_modes = omap3_dss_supported_color_modes, + .overlay_caps = omap3430_dss_overlay_caps, + .clksrc_names = omap2_dss_clk_source_names, + .dss_params = am43xx_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA, + .buffer_size_unit = 1, + .burst_size_unit = 8, +}; + +static const struct omap_dss_features omap3630_dss_features = { + .reg_fields = omap3_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), + + .features = omap3630_dss_feat_list, + .num_features = ARRAY_SIZE(omap3630_dss_feat_list), + + .num_mgrs = 2, + .num_ovls = 3, + .supported_displays = omap3630_dss_supported_displays, + .supported_outputs = omap3630_dss_supported_outputs, + .supported_color_modes = omap3_dss_supported_color_modes, + .overlay_caps = omap3630_dss_overlay_caps, + .clksrc_names = omap3_dss_clk_source_names, + .dss_params = omap3_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, + .buffer_size_unit = 1, + .burst_size_unit = 8, +}; + +/* OMAP4 DSS Features */ +/* For OMAP4430 ES 1.0 revision */ +static const struct omap_dss_features omap4430_es1_0_dss_features = { + .reg_fields = omap4_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), + + .features = omap4430_es1_0_dss_feat_list, + .num_features = ARRAY_SIZE(omap4430_es1_0_dss_feat_list), + + .num_mgrs = 3, + .num_ovls = 4, + .supported_displays = omap4_dss_supported_displays, + .supported_outputs = omap4_dss_supported_outputs, + .supported_color_modes = omap4_dss_supported_color_modes, + .overlay_caps = omap4_dss_overlay_caps, + .clksrc_names = omap4_dss_clk_source_names, + .dss_params = omap4_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, + .buffer_size_unit = 16, + .burst_size_unit = 16, +}; + +/* For OMAP4430 ES 2.0, 2.1 and 2.2 revisions */ +static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = { + .reg_fields = omap4_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), + + .features = omap4430_es2_0_1_2_dss_feat_list, + .num_features = ARRAY_SIZE(omap4430_es2_0_1_2_dss_feat_list), + + .num_mgrs = 3, + .num_ovls = 4, + .supported_displays = omap4_dss_supported_displays, + .supported_outputs = omap4_dss_supported_outputs, + .supported_color_modes = omap4_dss_supported_color_modes, + .overlay_caps = omap4_dss_overlay_caps, + .clksrc_names = omap4_dss_clk_source_names, + .dss_params = omap4_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, + .buffer_size_unit = 16, + .burst_size_unit = 16, +}; + +/* For all the other OMAP4 versions */ +static const struct omap_dss_features omap4_dss_features = { + .reg_fields = omap4_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), + + .features = omap4_dss_feat_list, + .num_features = ARRAY_SIZE(omap4_dss_feat_list), + + .num_mgrs = 3, + .num_ovls = 4, + .supported_displays = omap4_dss_supported_displays, + .supported_outputs = omap4_dss_supported_outputs, + .supported_color_modes = omap4_dss_supported_color_modes, + .overlay_caps = omap4_dss_overlay_caps, + .clksrc_names = omap4_dss_clk_source_names, + .dss_params = omap4_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, + .buffer_size_unit = 16, + .burst_size_unit = 16, +}; + +/* OMAP5 DSS Features */ +static const struct omap_dss_features omap5_dss_features = { + .reg_fields = omap5_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap5_dss_reg_fields), + + .features = omap5_dss_feat_list, + .num_features = ARRAY_SIZE(omap5_dss_feat_list), + + .num_mgrs = 4, + .num_ovls = 4, + .supported_displays = omap5_dss_supported_displays, + .supported_outputs = omap5_dss_supported_outputs, + .supported_color_modes = omap4_dss_supported_color_modes, + .overlay_caps = omap4_dss_overlay_caps, + .clksrc_names = omap5_dss_clk_source_names, + .dss_params = omap5_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, + .buffer_size_unit = 16, + .burst_size_unit = 16, +}; + +/* Functions returning values related to a DSS feature */ +int dss_feat_get_num_mgrs(void) +{ + return omap_current_dss_features->num_mgrs; +} +EXPORT_SYMBOL(dss_feat_get_num_mgrs); + +int dss_feat_get_num_ovls(void) +{ + return omap_current_dss_features->num_ovls; +} +EXPORT_SYMBOL(dss_feat_get_num_ovls); + +unsigned long dss_feat_get_param_min(enum dss_range_param param) +{ + return omap_current_dss_features->dss_params[param].min; +} + +unsigned long dss_feat_get_param_max(enum dss_range_param param) +{ + return omap_current_dss_features->dss_params[param].max; +} + +enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel) +{ + return omap_current_dss_features->supported_displays[channel]; +} + +enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel) +{ + return omap_current_dss_features->supported_outputs[channel]; +} + +enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane) +{ + return omap_current_dss_features->supported_color_modes[plane]; +} +EXPORT_SYMBOL(dss_feat_get_supported_color_modes); + +enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane) +{ + return omap_current_dss_features->overlay_caps[plane]; +} + +bool dss_feat_color_mode_supported(enum omap_plane plane, + enum omap_color_mode color_mode) +{ + return omap_current_dss_features->supported_color_modes[plane] & + color_mode; +} + +const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) +{ + return omap_current_dss_features->clksrc_names[id]; +} + +u32 dss_feat_get_buffer_size_unit(void) +{ + return omap_current_dss_features->buffer_size_unit; +} + +u32 dss_feat_get_burst_size_unit(void) +{ + return omap_current_dss_features->burst_size_unit; +} + +/* DSS has_feature check */ +bool dss_has_feature(enum dss_feat_id id) +{ + int i; + const enum dss_feat_id *features = omap_current_dss_features->features; + const int num_features = omap_current_dss_features->num_features; + + for (i = 0; i < num_features; i++) { + if (features[i] == id) + return true; + } + + return false; +} + +void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end) +{ + if (id >= omap_current_dss_features->num_reg_fields) + BUG(); + + *start = omap_current_dss_features->reg_fields[id].start; + *end = omap_current_dss_features->reg_fields[id].end; +} + +bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type) +{ + return omap_current_dss_features->supported_rotation_types & rot_type; +} + +void dss_features_init(enum omapdss_version version) +{ + switch (version) { + case OMAPDSS_VER_OMAP24xx: + omap_current_dss_features = &omap2_dss_features; + break; + + case OMAPDSS_VER_OMAP34xx_ES1: + case OMAPDSS_VER_OMAP34xx_ES3: + omap_current_dss_features = &omap3430_dss_features; + break; + + case OMAPDSS_VER_OMAP3630: + omap_current_dss_features = &omap3630_dss_features; + break; + + case OMAPDSS_VER_OMAP4430_ES1: + omap_current_dss_features = &omap4430_es1_0_dss_features; + break; + + case OMAPDSS_VER_OMAP4430_ES2: + omap_current_dss_features = &omap4430_es2_0_1_2_dss_features; + break; + + case OMAPDSS_VER_OMAP4: + omap_current_dss_features = &omap4_dss_features; + break; + + case OMAPDSS_VER_OMAP5: + case OMAPDSS_VER_DRA7xx: + omap_current_dss_features = &omap5_dss_features; + break; + + case OMAPDSS_VER_AM35xx: + omap_current_dss_features = &am35xx_dss_features; + break; + + case OMAPDSS_VER_AM43xx: + omap_current_dss_features = &am43xx_dss_features; + break; + + default: + DSSWARN("Unsupported OMAP version"); + break; + } +} diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h new file mode 100644 index 000000000000..3d67d39f192f --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h @@ -0,0 +1,108 @@ +/* + * linux/drivers/video/omap2/dss/dss_features.h + * + * Copyright (C) 2010 Texas Instruments + * Author: Archit Taneja <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __OMAP2_DSS_FEATURES_H +#define __OMAP2_DSS_FEATURES_H + +#define MAX_DSS_MANAGERS 4 +#define MAX_DSS_OVERLAYS 4 +#define MAX_DSS_LCD_MANAGERS 3 +#define MAX_NUM_DSI 2 + +/* DSS has feature id */ +enum dss_feat_id { + FEAT_LCDENABLEPOL, + FEAT_LCDENABLESIGNAL, + FEAT_PCKFREEENABLE, + FEAT_FUNCGATED, + FEAT_MGR_LCD2, + FEAT_MGR_LCD3, + FEAT_LINEBUFFERSPLIT, + FEAT_ROWREPEATENABLE, + FEAT_RESIZECONF, + /* Independent core clk divider */ + FEAT_CORE_CLK_DIV, + FEAT_LCD_CLK_SRC, + /* DSI-PLL power command 0x3 is not working */ + FEAT_DSI_PLL_PWR_BUG, + FEAT_DSI_DCS_CMD_CONFIG_VC, + FEAT_DSI_VC_OCP_WIDTH, + FEAT_DSI_REVERSE_TXCLKESC, + FEAT_DSI_GNQ, + FEAT_DPI_USES_VDDS_DSI, + FEAT_HDMI_CTS_SWMODE, + FEAT_HDMI_AUDIO_USE_MCLK, + FEAT_HANDLE_UV_SEPARATE, + FEAT_ATTR2, + FEAT_VENC_REQUIRES_TV_DAC_CLK, + FEAT_CPR, + FEAT_PRELOAD, + FEAT_FIR_COEF_V, + FEAT_ALPHA_FIXED_ZORDER, + FEAT_ALPHA_FREE_ZORDER, + FEAT_FIFO_MERGE, + /* An unknown HW bug causing the normal FIFO thresholds not to work */ + FEAT_OMAP3_DSI_FIFO_BUG, + FEAT_BURST_2D, + FEAT_DSI_PHY_DCC, + FEAT_MFLAG, +}; + +/* DSS register field id */ +enum dss_feat_reg_field { + FEAT_REG_FIRHINC, + FEAT_REG_FIRVINC, + FEAT_REG_FIFOHIGHTHRESHOLD, + FEAT_REG_FIFOLOWTHRESHOLD, + FEAT_REG_FIFOSIZE, + FEAT_REG_HORIZONTALACCU, + FEAT_REG_VERTICALACCU, + FEAT_REG_DISPC_CLK_SWITCH, +}; + +enum dss_range_param { + FEAT_PARAM_DSS_FCK, + FEAT_PARAM_DSS_PCD, + FEAT_PARAM_DSIPLL_LPDIV, + FEAT_PARAM_DSI_FCK, + FEAT_PARAM_DOWNSCALE, + FEAT_PARAM_LINEWIDTH, +}; + +/* DSS Feature Functions */ +unsigned long dss_feat_get_param_min(enum dss_range_param param); +unsigned long dss_feat_get_param_max(enum dss_range_param param); +enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); +bool dss_feat_color_mode_supported(enum omap_plane plane, + enum omap_color_mode color_mode); +const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); + +u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ +u32 dss_feat_get_burst_size_unit(void); /* in bytes */ + +bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type); + +bool dss_has_feature(enum dss_feat_id id); +void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); +void dss_features_init(enum omapdss_version version); + +enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel); +enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel); + +#endif diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h new file mode 100644 index 000000000000..53616b02b613 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -0,0 +1,370 @@ +/* + * HDMI driver definition for TI OMAP4 Processor. + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _HDMI_H +#define _HDMI_H + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/hdmi.h> +#include <video/omapdss.h> + +#include "dss.h" + +/* HDMI Wrapper */ + +#define HDMI_WP_REVISION 0x0 +#define HDMI_WP_SYSCONFIG 0x10 +#define HDMI_WP_IRQSTATUS_RAW 0x24 +#define HDMI_WP_IRQSTATUS 0x28 +#define HDMI_WP_IRQENABLE_SET 0x2C +#define HDMI_WP_IRQENABLE_CLR 0x30 +#define HDMI_WP_IRQWAKEEN 0x34 +#define HDMI_WP_PWR_CTRL 0x40 +#define HDMI_WP_DEBOUNCE 0x44 +#define HDMI_WP_VIDEO_CFG 0x50 +#define HDMI_WP_VIDEO_SIZE 0x60 +#define HDMI_WP_VIDEO_TIMING_H 0x68 +#define HDMI_WP_VIDEO_TIMING_V 0x6C +#define HDMI_WP_CLK 0x70 +#define HDMI_WP_AUDIO_CFG 0x80 +#define HDMI_WP_AUDIO_CFG2 0x84 +#define HDMI_WP_AUDIO_CTRL 0x88 +#define HDMI_WP_AUDIO_DATA 0x8C + +/* HDMI WP IRQ flags */ +#define HDMI_IRQ_CORE (1 << 0) +#define HDMI_IRQ_OCP_TIMEOUT (1 << 4) +#define HDMI_IRQ_AUDIO_FIFO_UNDERFLOW (1 << 8) +#define HDMI_IRQ_AUDIO_FIFO_OVERFLOW (1 << 9) +#define HDMI_IRQ_AUDIO_FIFO_SAMPLE_REQ (1 << 10) +#define HDMI_IRQ_VIDEO_VSYNC (1 << 16) +#define HDMI_IRQ_VIDEO_FRAME_DONE (1 << 17) +#define HDMI_IRQ_PHY_LINE5V_ASSERT (1 << 24) +#define HDMI_IRQ_LINK_CONNECT (1 << 25) +#define HDMI_IRQ_LINK_DISCONNECT (1 << 26) +#define HDMI_IRQ_PLL_LOCK (1 << 29) +#define HDMI_IRQ_PLL_UNLOCK (1 << 30) +#define HDMI_IRQ_PLL_RECAL (1 << 31) + +/* HDMI PLL */ + +#define PLLCTRL_PLL_CONTROL 0x0 +#define PLLCTRL_PLL_STATUS 0x4 +#define PLLCTRL_PLL_GO 0x8 +#define PLLCTRL_CFG1 0xC +#define PLLCTRL_CFG2 0x10 +#define PLLCTRL_CFG3 0x14 +#define PLLCTRL_SSC_CFG1 0x18 +#define PLLCTRL_SSC_CFG2 0x1C +#define PLLCTRL_CFG4 0x20 + +/* HDMI PHY */ + +#define HDMI_TXPHY_TX_CTRL 0x0 +#define HDMI_TXPHY_DIGITAL_CTRL 0x4 +#define HDMI_TXPHY_POWER_CTRL 0x8 +#define HDMI_TXPHY_PAD_CFG_CTRL 0xC +#define HDMI_TXPHY_BIST_CONTROL 0x1C + +enum hdmi_pll_pwr { + HDMI_PLLPWRCMD_ALLOFF = 0, + HDMI_PLLPWRCMD_PLLONLY = 1, + HDMI_PLLPWRCMD_BOTHON_ALLCLKS = 2, + HDMI_PLLPWRCMD_BOTHON_NOPHYCLK = 3 +}; + +enum hdmi_phy_pwr { + HDMI_PHYPWRCMD_OFF = 0, + HDMI_PHYPWRCMD_LDOON = 1, + HDMI_PHYPWRCMD_TXON = 2 +}; + +enum hdmi_core_hdmi_dvi { + HDMI_DVI = 0, + HDMI_HDMI = 1 +}; + +enum hdmi_packing_mode { + HDMI_PACK_10b_RGB_YUV444 = 0, + HDMI_PACK_24b_RGB_YUV444_YUV422 = 1, + HDMI_PACK_20b_YUV422 = 2, + HDMI_PACK_ALREADYPACKED = 7 +}; + +enum hdmi_stereo_channels { + HDMI_AUDIO_STEREO_NOCHANNELS = 0, + HDMI_AUDIO_STEREO_ONECHANNEL = 1, + HDMI_AUDIO_STEREO_TWOCHANNELS = 2, + HDMI_AUDIO_STEREO_THREECHANNELS = 3, + HDMI_AUDIO_STEREO_FOURCHANNELS = 4 +}; + +enum hdmi_audio_type { + HDMI_AUDIO_TYPE_LPCM = 0, + HDMI_AUDIO_TYPE_IEC = 1 +}; + +enum hdmi_audio_justify { + HDMI_AUDIO_JUSTIFY_LEFT = 0, + HDMI_AUDIO_JUSTIFY_RIGHT = 1 +}; + +enum hdmi_audio_sample_order { + HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0, + HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1 +}; + +enum hdmi_audio_samples_perword { + HDMI_AUDIO_ONEWORD_ONESAMPLE = 0, + HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1 +}; + +enum hdmi_audio_sample_size_omap { + HDMI_AUDIO_SAMPLE_16BITS = 0, + HDMI_AUDIO_SAMPLE_24BITS = 1 +}; + +enum hdmi_audio_transf_mode { + HDMI_AUDIO_TRANSF_DMA = 0, + HDMI_AUDIO_TRANSF_IRQ = 1 +}; + +enum hdmi_audio_blk_strt_end_sig { + HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0, + HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1 +}; + +enum hdmi_core_audio_layout { + HDMI_AUDIO_LAYOUT_2CH = 0, + HDMI_AUDIO_LAYOUT_8CH = 1, + HDMI_AUDIO_LAYOUT_6CH = 2 +}; + +enum hdmi_core_cts_mode { + HDMI_AUDIO_CTS_MODE_HW = 0, + HDMI_AUDIO_CTS_MODE_SW = 1 +}; + +enum hdmi_audio_mclk_mode { + HDMI_AUDIO_MCLK_128FS = 0, + HDMI_AUDIO_MCLK_256FS = 1, + HDMI_AUDIO_MCLK_384FS = 2, + HDMI_AUDIO_MCLK_512FS = 3, + HDMI_AUDIO_MCLK_768FS = 4, + HDMI_AUDIO_MCLK_1024FS = 5, + HDMI_AUDIO_MCLK_1152FS = 6, + HDMI_AUDIO_MCLK_192FS = 7 +}; + +struct hdmi_video_format { + enum hdmi_packing_mode packing_mode; + u32 y_res; /* Line per panel */ + u32 x_res; /* pixel per line */ +}; + +struct hdmi_config { + struct omap_video_timings timings; + struct hdmi_avi_infoframe infoframe; + enum hdmi_core_hdmi_dvi hdmi_dvi_mode; +}; + +struct hdmi_audio_format { + enum hdmi_stereo_channels stereo_channels; + u8 active_chnnls_msk; + enum hdmi_audio_type type; + enum hdmi_audio_justify justification; + enum hdmi_audio_sample_order sample_order; + enum hdmi_audio_samples_perword samples_per_word; + enum hdmi_audio_sample_size_omap sample_size; + enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end; +}; + +struct hdmi_audio_dma { + u8 transfer_size; + u8 block_size; + enum hdmi_audio_transf_mode mode; + u16 fifo_threshold; +}; + +struct hdmi_core_audio_i2s_config { + u8 in_length_bits; + u8 justification; + u8 sck_edge_mode; + u8 vbit; + u8 direction; + u8 shift; + u8 active_sds; +}; + +struct hdmi_core_audio_config { + struct hdmi_core_audio_i2s_config i2s_cfg; + struct snd_aes_iec958 *iec60958_cfg; + bool fs_override; + u32 n; + u32 cts; + u32 aud_par_busclk; + enum hdmi_core_audio_layout layout; + enum hdmi_core_cts_mode cts_mode; + bool use_mclk; + enum hdmi_audio_mclk_mode mclk_mode; + bool en_acr_pkt; + bool en_dsd_audio; + bool en_parallel_aud_input; + bool en_spdif; +}; + +struct hdmi_wp_data { + void __iomem *base; + phys_addr_t phys_base; +}; + +struct hdmi_pll_data { + struct dss_pll pll; + + void __iomem *base; + + struct hdmi_wp_data *wp; +}; + +struct hdmi_phy_data { + void __iomem *base; + + u8 lane_function[4]; + u8 lane_polarity[4]; +}; + +struct hdmi_core_data { + void __iomem *base; +}; + +static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx, + u32 val) +{ + __raw_writel(val, base_addr + idx); +} + +static inline u32 hdmi_read_reg(void __iomem *base_addr, const u32 idx) +{ + return __raw_readl(base_addr + idx); +} + +#define REG_FLD_MOD(base, idx, val, start, end) \ + hdmi_write_reg(base, idx, FLD_MOD(hdmi_read_reg(base, idx),\ + val, start, end)) +#define REG_GET(base, idx, start, end) \ + FLD_GET(hdmi_read_reg(base, idx), start, end) + +static inline int hdmi_wait_for_bit_change(void __iomem *base_addr, + const u32 idx, int b2, int b1, u32 val) +{ + u32 t = 0, v; + while (val != (v = REG_GET(base_addr, idx, b2, b1))) { + if (t++ > 10000) + return v; + udelay(1); + } + return v; +} + +/* HDMI wrapper funcs */ +int hdmi_wp_video_start(struct hdmi_wp_data *wp); +void hdmi_wp_video_stop(struct hdmi_wp_data *wp); +void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s); +u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp); +void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus); +void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask); +void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask); +int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val); +int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val); +void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, + struct hdmi_video_format *video_fmt); +void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, + struct omap_video_timings *timings); +void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, + struct omap_video_timings *timings); +void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, + struct omap_video_timings *timings, struct hdmi_config *param); +int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp); +phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp); + +/* HDMI PLL funcs */ +void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); +void hdmi_pll_compute(struct hdmi_pll_data *pll, + unsigned long target_tmds, struct dss_pll_clock_info *pi); +int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, + struct hdmi_wp_data *wp); +void hdmi_pll_uninit(struct hdmi_pll_data *hpll); + +/* HDMI PHY funcs */ +int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk, + unsigned long lfbitclk); +void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s); +int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy); +int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes); + +/* HDMI common funcs */ +int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, + struct hdmi_phy_data *phy); + +/* Audio funcs */ +int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts); +int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable); +int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable); +void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp, + struct hdmi_audio_format *aud_fmt); +void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp, + struct hdmi_audio_dma *aud_dma); +static inline bool hdmi_mode_has_audio(struct hdmi_config *cfg) +{ + return cfg->hdmi_dvi_mode == HDMI_HDMI ? true : false; +} + +/* HDMI DRV data */ +struct omap_hdmi { + struct mutex lock; + struct platform_device *pdev; + + struct hdmi_wp_data wp; + struct hdmi_pll_data pll; + struct hdmi_phy_data phy; + struct hdmi_core_data core; + + struct hdmi_config cfg; + + struct regulator *vdda_reg; + + bool core_enabled; + + struct omap_dss_device output; + + struct platform_device *audio_pdev; + void (*audio_abort_cb)(struct device *dev); + int wp_idlemode; + + bool audio_configured; + struct omap_dss_audio audio_config; + + /* This lock should be taken when booleans bellow are touched. */ + spinlock_t audio_playing_lock; + bool audio_playing; + bool display_enabled; +}; + +#endif diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c new file mode 100644 index 000000000000..7103c659a534 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -0,0 +1,839 @@ +/* + * HDMI interface DSS driver for TI's OMAP4 family of SoCs. + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Authors: Yong Zhi + * Mythri pk <mythripk@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "HDMI" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/clk.h> +#include <linux/gpio.h> +#include <linux/regulator/consumer.h> +#include <linux/component.h> +#include <video/omapdss.h> +#include <sound/omap-hdmi-audio.h> + +#include "hdmi4_core.h" +#include "dss.h" +#include "dss_features.h" +#include "hdmi.h" + +static struct omap_hdmi hdmi; + +static int hdmi_runtime_get(void) +{ + int r; + + DSSDBG("hdmi_runtime_get\n"); + + r = pm_runtime_get_sync(&hdmi.pdev->dev); + WARN_ON(r < 0); + if (r < 0) + return r; + + return 0; +} + +static void hdmi_runtime_put(void) +{ + int r; + + DSSDBG("hdmi_runtime_put\n"); + + r = pm_runtime_put_sync(&hdmi.pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} + +static irqreturn_t hdmi_irq_handler(int irq, void *data) +{ + struct hdmi_wp_data *wp = data; + u32 irqstatus; + + irqstatus = hdmi_wp_get_irqstatus(wp); + hdmi_wp_set_irqstatus(wp, irqstatus); + + if ((irqstatus & HDMI_IRQ_LINK_CONNECT) && + irqstatus & HDMI_IRQ_LINK_DISCONNECT) { + /* + * If we get both connect and disconnect interrupts at the same + * time, turn off the PHY, clear interrupts, and restart, which + * raises connect interrupt if a cable is connected, or nothing + * if cable is not connected. + */ + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF); + + hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT | + HDMI_IRQ_LINK_DISCONNECT); + + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); + } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) { + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON); + } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) { + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); + } + + return IRQ_HANDLED; +} + +static int hdmi_init_regulator(void) +{ + int r; + struct regulator *reg; + + if (hdmi.vdda_reg != NULL) + return 0; + + reg = devm_regulator_get(&hdmi.pdev->dev, "vdda"); + + if (IS_ERR(reg)) { + if (PTR_ERR(reg) != -EPROBE_DEFER) + DSSERR("can't get VDDA regulator\n"); + return PTR_ERR(reg); + } + + if (regulator_can_change_voltage(reg)) { + r = regulator_set_voltage(reg, 1800000, 1800000); + if (r) { + devm_regulator_put(reg); + DSSWARN("can't set the regulator voltage\n"); + return r; + } + } + + hdmi.vdda_reg = reg; + + return 0; +} + +static int hdmi_power_on_core(struct omap_dss_device *dssdev) +{ + int r; + + r = regulator_enable(hdmi.vdda_reg); + if (r) + return r; + + r = hdmi_runtime_get(); + if (r) + goto err_runtime_get; + + /* Make selection of HDMI in DSS */ + dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK); + + hdmi.core_enabled = true; + + return 0; + +err_runtime_get: + regulator_disable(hdmi.vdda_reg); + + return r; +} + +static void hdmi_power_off_core(struct omap_dss_device *dssdev) +{ + hdmi.core_enabled = false; + + hdmi_runtime_put(); + regulator_disable(hdmi.vdda_reg); +} + +static int hdmi_power_on_full(struct omap_dss_device *dssdev) +{ + int r; + struct omap_video_timings *p; + struct omap_overlay_manager *mgr = hdmi.output.manager; + struct hdmi_wp_data *wp = &hdmi.wp; + struct dss_pll_clock_info hdmi_cinfo = { 0 }; + + r = hdmi_power_on_core(dssdev); + if (r) + return r; + + /* disable and clear irqs */ + hdmi_wp_clear_irqenable(wp, 0xffffffff); + hdmi_wp_set_irqstatus(wp, 0xffffffff); + + p = &hdmi.cfg.timings; + + DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res); + + hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo); + + r = dss_pll_enable(&hdmi.pll.pll); + if (r) { + DSSERR("Failed to enable PLL\n"); + goto err_pll_enable; + } + + r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo); + if (r) { + DSSERR("Failed to configure PLL\n"); + goto err_pll_cfg; + } + + r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco, + hdmi_cinfo.clkout[0]); + if (r) { + DSSDBG("Failed to configure PHY\n"); + goto err_phy_cfg; + } + + r = hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); + if (r) + goto err_phy_pwr; + + hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); + + /* bypass TV gamma table */ + dispc_enable_gamma_table(0); + + /* tv size */ + dss_mgr_set_timings(mgr, p); + + r = hdmi_wp_video_start(&hdmi.wp); + if (r) + goto err_vid_enable; + + r = dss_mgr_enable(mgr); + if (r) + goto err_mgr_enable; + + hdmi_wp_set_irqenable(wp, + HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT); + + return 0; + +err_mgr_enable: + hdmi_wp_video_stop(&hdmi.wp); +err_vid_enable: + hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); +err_phy_pwr: +err_phy_cfg: +err_pll_cfg: + dss_pll_disable(&hdmi.pll.pll); +err_pll_enable: + hdmi_power_off_core(dssdev); + return -EIO; +} + +static void hdmi_power_off_full(struct omap_dss_device *dssdev) +{ + struct omap_overlay_manager *mgr = hdmi.output.manager; + + hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); + + dss_mgr_disable(mgr); + + hdmi_wp_video_stop(&hdmi.wp); + + hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); + + dss_pll_disable(&hdmi.pll.pll); + + hdmi_power_off_core(dssdev); +} + +static int hdmi_display_check_timing(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct omap_dss_device *out = &hdmi.output; + + if (!dispc_mgr_timings_ok(out->dispc_channel, timings)) + return -EINVAL; + + return 0; +} + +static void hdmi_display_set_timing(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + mutex_lock(&hdmi.lock); + + hdmi.cfg.timings = *timings; + + dispc_set_tv_pclk(timings->pixelclock); + + mutex_unlock(&hdmi.lock); +} + +static void hdmi_display_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + *timings = hdmi.cfg.timings; +} + +static void hdmi_dump_regs(struct seq_file *s) +{ + mutex_lock(&hdmi.lock); + + if (hdmi_runtime_get()) { + mutex_unlock(&hdmi.lock); + return; + } + + hdmi_wp_dump(&hdmi.wp, s); + hdmi_pll_dump(&hdmi.pll, s); + hdmi_phy_dump(&hdmi.phy, s); + hdmi4_core_dump(&hdmi.core, s); + + hdmi_runtime_put(); + mutex_unlock(&hdmi.lock); +} + +static int read_edid(u8 *buf, int len) +{ + int r; + + mutex_lock(&hdmi.lock); + + r = hdmi_runtime_get(); + BUG_ON(r); + + r = hdmi4_read_edid(&hdmi.core, buf, len); + + hdmi_runtime_put(); + mutex_unlock(&hdmi.lock); + + return r; +} + +static void hdmi_start_audio_stream(struct omap_hdmi *hd) +{ + hdmi_wp_audio_enable(&hd->wp, true); + hdmi4_audio_start(&hd->core, &hd->wp); +} + +static void hdmi_stop_audio_stream(struct omap_hdmi *hd) +{ + hdmi4_audio_stop(&hd->core, &hd->wp); + hdmi_wp_audio_enable(&hd->wp, false); +} + +static int hdmi_display_enable(struct omap_dss_device *dssdev) +{ + struct omap_dss_device *out = &hdmi.output; + unsigned long flags; + int r = 0; + + DSSDBG("ENTER hdmi_display_enable\n"); + + mutex_lock(&hdmi.lock); + + if (out->manager == NULL) { + DSSERR("failed to enable display: no output/manager\n"); + r = -ENODEV; + goto err0; + } + + r = hdmi_power_on_full(dssdev); + if (r) { + DSSERR("failed to power on device\n"); + goto err0; + } + + if (hdmi.audio_configured) { + r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config, + hdmi.cfg.timings.pixelclock); + if (r) { + DSSERR("Error restoring audio configuration: %d", r); + hdmi.audio_abort_cb(&hdmi.pdev->dev); + hdmi.audio_configured = false; + } + } + + spin_lock_irqsave(&hdmi.audio_playing_lock, flags); + if (hdmi.audio_configured && hdmi.audio_playing) + hdmi_start_audio_stream(&hdmi); + hdmi.display_enabled = true; + spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags); + + mutex_unlock(&hdmi.lock); + return 0; + +err0: + mutex_unlock(&hdmi.lock); + return r; +} + +static void hdmi_display_disable(struct omap_dss_device *dssdev) +{ + unsigned long flags; + + DSSDBG("Enter hdmi_display_disable\n"); + + mutex_lock(&hdmi.lock); + + spin_lock_irqsave(&hdmi.audio_playing_lock, flags); + hdmi_stop_audio_stream(&hdmi); + hdmi.display_enabled = false; + spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags); + + hdmi_power_off_full(dssdev); + + mutex_unlock(&hdmi.lock); +} + +static int hdmi_core_enable(struct omap_dss_device *dssdev) +{ + int r = 0; + + DSSDBG("ENTER omapdss_hdmi_core_enable\n"); + + mutex_lock(&hdmi.lock); + + r = hdmi_power_on_core(dssdev); + if (r) { + DSSERR("failed to power on device\n"); + goto err0; + } + + mutex_unlock(&hdmi.lock); + return 0; + +err0: + mutex_unlock(&hdmi.lock); + return r; +} + +static void hdmi_core_disable(struct omap_dss_device *dssdev) +{ + DSSDBG("Enter omapdss_hdmi_core_disable\n"); + + mutex_lock(&hdmi.lock); + + hdmi_power_off_core(dssdev); + + mutex_unlock(&hdmi.lock); +} + +static int hdmi_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct omap_overlay_manager *mgr; + int r; + + r = hdmi_init_regulator(); + if (r) + return r; + + mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); + if (!mgr) + return -ENODEV; + + r = dss_mgr_connect(mgr, dssdev); + if (r) + return r; + + r = omapdss_output_set_device(dssdev, dst); + if (r) { + DSSERR("failed to connect output to new device: %s\n", + dst->name); + dss_mgr_disconnect(mgr, dssdev); + return r; + } + + return 0; +} + +static void hdmi_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + WARN_ON(dst != dssdev->dst); + + if (dst != dssdev->dst) + return; + + omapdss_output_unset_device(dssdev); + + if (dssdev->manager) + dss_mgr_disconnect(dssdev->manager, dssdev); +} + +static int hdmi_read_edid(struct omap_dss_device *dssdev, + u8 *edid, int len) +{ + bool need_enable; + int r; + + need_enable = hdmi.core_enabled == false; + + if (need_enable) { + r = hdmi_core_enable(dssdev); + if (r) + return r; + } + + r = read_edid(edid, len); + + if (need_enable) + hdmi_core_disable(dssdev); + + return r; +} + +static int hdmi_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + hdmi.cfg.infoframe = *avi; + return 0; +} + +static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, + bool hdmi_mode) +{ + hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI; + return 0; +} + +static const struct omapdss_hdmi_ops hdmi_ops = { + .connect = hdmi_connect, + .disconnect = hdmi_disconnect, + + .enable = hdmi_display_enable, + .disable = hdmi_display_disable, + + .check_timings = hdmi_display_check_timing, + .set_timings = hdmi_display_set_timing, + .get_timings = hdmi_display_get_timings, + + .read_edid = hdmi_read_edid, + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, +}; + +static void hdmi_init_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &hdmi.output; + + out->dev = &pdev->dev; + out->id = OMAP_DSS_OUTPUT_HDMI; + out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->name = "hdmi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; + out->ops.hdmi = &hdmi_ops; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void hdmi_uninit_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &hdmi.output; + + omapdss_unregister_output(out); +} + +static int hdmi_probe_of(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *ep; + int r; + + ep = omapdss_of_get_first_endpoint(node); + if (!ep) + return 0; + + r = hdmi_parse_lanes_of(pdev, ep, &hdmi.phy); + if (r) + goto err; + + of_node_put(ep); + return 0; + +err: + of_node_put(ep); + return r; +} + +/* Audio callbacks */ +static int hdmi_audio_startup(struct device *dev, + void (*abort_cb)(struct device *dev)) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + int ret = 0; + + mutex_lock(&hd->lock); + + if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { + ret = -EPERM; + goto out; + } + + hd->audio_abort_cb = abort_cb; + +out: + mutex_unlock(&hd->lock); + + return ret; +} + +static int hdmi_audio_shutdown(struct device *dev) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + + mutex_lock(&hd->lock); + hd->audio_abort_cb = NULL; + hd->audio_configured = false; + hd->audio_playing = false; + mutex_unlock(&hd->lock); + + return 0; +} + +static int hdmi_audio_start(struct device *dev) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + unsigned long flags; + + WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); + + spin_lock_irqsave(&hd->audio_playing_lock, flags); + + if (hd->display_enabled) + hdmi_start_audio_stream(hd); + hd->audio_playing = true; + + spin_unlock_irqrestore(&hd->audio_playing_lock, flags); + return 0; +} + +static void hdmi_audio_stop(struct device *dev) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + unsigned long flags; + + WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); + + spin_lock_irqsave(&hd->audio_playing_lock, flags); + + if (hd->display_enabled) + hdmi_stop_audio_stream(hd); + hd->audio_playing = false; + + spin_unlock_irqrestore(&hd->audio_playing_lock, flags); +} + +static int hdmi_audio_config(struct device *dev, + struct omap_dss_audio *dss_audio) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + int ret; + + mutex_lock(&hd->lock); + + if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { + ret = -EPERM; + goto out; + } + + ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio, + hd->cfg.timings.pixelclock); + if (!ret) { + hd->audio_configured = true; + hd->audio_config = *dss_audio; + } +out: + mutex_unlock(&hd->lock); + + return ret; +} + +static const struct omap_hdmi_audio_ops hdmi_audio_ops = { + .audio_startup = hdmi_audio_startup, + .audio_shutdown = hdmi_audio_shutdown, + .audio_start = hdmi_audio_start, + .audio_stop = hdmi_audio_stop, + .audio_config = hdmi_audio_config, +}; + +static int hdmi_audio_register(struct device *dev) +{ + struct omap_hdmi_audio_pdata pdata = { + .dev = dev, + .dss_version = omapdss_get_version(), + .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp), + .ops = &hdmi_audio_ops, + }; + + hdmi.audio_pdev = platform_device_register_data( + dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO, + &pdata, sizeof(pdata)); + + if (IS_ERR(hdmi.audio_pdev)) + return PTR_ERR(hdmi.audio_pdev); + + return 0; +} + +/* HDMI HW IP initialisation */ +static int hdmi4_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + int r; + int irq; + + hdmi.pdev = pdev; + dev_set_drvdata(&pdev->dev, &hdmi); + + mutex_init(&hdmi.lock); + spin_lock_init(&hdmi.audio_playing_lock); + + if (pdev->dev.of_node) { + r = hdmi_probe_of(pdev); + if (r) + return r; + } + + r = hdmi_wp_init(pdev, &hdmi.wp); + if (r) + return r; + + r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp); + if (r) + return r; + + r = hdmi_phy_init(pdev, &hdmi.phy); + if (r) + goto err; + + r = hdmi4_core_init(pdev, &hdmi.core); + if (r) + goto err; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + DSSERR("platform_get_irq failed\n"); + r = -ENODEV; + goto err; + } + + r = devm_request_threaded_irq(&pdev->dev, irq, + NULL, hdmi_irq_handler, + IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp); + if (r) { + DSSERR("HDMI IRQ request failed\n"); + goto err; + } + + pm_runtime_enable(&pdev->dev); + + hdmi_init_output(pdev); + + r = hdmi_audio_register(&pdev->dev); + if (r) { + DSSERR("Registering HDMI audio failed\n"); + hdmi_uninit_output(pdev); + pm_runtime_disable(&pdev->dev); + return r; + } + + dss_debugfs_create_file("hdmi", hdmi_dump_regs); + + return 0; +err: + hdmi_pll_uninit(&hdmi.pll); + return r; +} + +static void hdmi4_unbind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + if (hdmi.audio_pdev) + platform_device_unregister(hdmi.audio_pdev); + + hdmi_uninit_output(pdev); + + hdmi_pll_uninit(&hdmi.pll); + + pm_runtime_disable(&pdev->dev); +} + +static const struct component_ops hdmi4_component_ops = { + .bind = hdmi4_bind, + .unbind = hdmi4_unbind, +}; + +static int hdmi4_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &hdmi4_component_ops); +} + +static int hdmi4_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &hdmi4_component_ops); + return 0; +} + +static int hdmi_runtime_suspend(struct device *dev) +{ + dispc_runtime_put(); + + return 0; +} + +static int hdmi_runtime_resume(struct device *dev) +{ + int r; + + r = dispc_runtime_get(); + if (r < 0) + return r; + + return 0; +} + +static const struct dev_pm_ops hdmi_pm_ops = { + .runtime_suspend = hdmi_runtime_suspend, + .runtime_resume = hdmi_runtime_resume, +}; + +static const struct of_device_id hdmi_of_match[] = { + { .compatible = "ti,omap4-hdmi", }, + {}, +}; + +static struct platform_driver omapdss_hdmihw_driver = { + .probe = hdmi4_probe, + .remove = hdmi4_remove, + .driver = { + .name = "omapdss_hdmi", + .pm = &hdmi_pm_ops, + .of_match_table = hdmi_of_match, + .suppress_bind_attrs = true, + }, +}; + +int __init hdmi4_init_platform_driver(void) +{ + return platform_driver_register(&omapdss_hdmihw_driver); +} + +void hdmi4_uninit_platform_driver(void) +{ + platform_driver_unregister(&omapdss_hdmihw_driver); +} diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c new file mode 100644 index 000000000000..fa72e735dad2 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -0,0 +1,904 @@ +/* + * ti_hdmi_4xxx_ip.c + * + * HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Authors: Yong Zhi + * Mythri pk <mythripk@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "HDMICORE" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/seq_file.h> +#include <sound/asound.h> +#include <sound/asoundef.h> + +#include "hdmi4_core.h" +#include "dss_features.h" + +#define HDMI_CORE_AV 0x500 + +static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core) +{ + return core->base + HDMI_CORE_AV; +} + +static int hdmi_core_ddc_init(struct hdmi_core_data *core) +{ + void __iomem *base = core->base; + + /* Turn on CLK for DDC */ + REG_FLD_MOD(base, HDMI_CORE_AV_DPD, 0x7, 2, 0); + + /* IN_PROG */ + if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 1) { + /* Abort transaction */ + REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xf, 3, 0); + /* IN_PROG */ + if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS, + 4, 4, 0) != 0) { + DSSERR("Timeout aborting DDC transaction\n"); + return -ETIMEDOUT; + } + } + + /* Clk SCL Devices */ + REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0xA, 3, 0); + + /* HDMI_CORE_DDC_STATUS_IN_PROG */ + if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS, + 4, 4, 0) != 0) { + DSSERR("Timeout starting SCL clock\n"); + return -ETIMEDOUT; + } + + /* Clear FIFO */ + REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x9, 3, 0); + + /* HDMI_CORE_DDC_STATUS_IN_PROG */ + if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS, + 4, 4, 0) != 0) { + DSSERR("Timeout clearing DDC fifo\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int hdmi_core_ddc_edid(struct hdmi_core_data *core, + u8 *pedid, int ext) +{ + void __iomem *base = core->base; + u32 i; + char checksum; + u32 offset = 0; + + /* HDMI_CORE_DDC_STATUS_IN_PROG */ + if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS, + 4, 4, 0) != 0) { + DSSERR("Timeout waiting DDC to be ready\n"); + return -ETIMEDOUT; + } + + if (ext % 2 != 0) + offset = 0x80; + + /* Load Segment Address Register */ + REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0); + + /* Load Slave Address Register */ + REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1); + + /* Load Offset Address Register */ + REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0); + + /* Load Byte Count */ + REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0); + + /* Set DDC_CMD */ + if (ext) + REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0); + else + REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0); + + /* HDMI_CORE_DDC_STATUS_BUS_LOW */ + if (REG_GET(base, HDMI_CORE_DDC_STATUS, 6, 6) == 1) { + DSSERR("I2C Bus Low?\n"); + return -EIO; + } + /* HDMI_CORE_DDC_STATUS_NO_ACK */ + if (REG_GET(base, HDMI_CORE_DDC_STATUS, 5, 5) == 1) { + DSSERR("I2C No Ack\n"); + return -EIO; + } + + for (i = 0; i < 0x80; ++i) { + int t; + + /* IN_PROG */ + if (REG_GET(base, HDMI_CORE_DDC_STATUS, 4, 4) == 0) { + DSSERR("operation stopped when reading edid\n"); + return -EIO; + } + + t = 0; + /* FIFO_EMPTY */ + while (REG_GET(base, HDMI_CORE_DDC_STATUS, 2, 2) == 1) { + if (t++ > 10000) { + DSSERR("timeout reading edid\n"); + return -ETIMEDOUT; + } + udelay(1); + } + + pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0); + } + + checksum = 0; + for (i = 0; i < 0x80; ++i) + checksum += pedid[i]; + + if (checksum != 0) { + DSSERR("E-EDID checksum failed!!\n"); + return -EIO; + } + + return 0; +} + +int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len) +{ + int r, l; + + if (len < 128) + return -EINVAL; + + r = hdmi_core_ddc_init(core); + if (r) + return r; + + r = hdmi_core_ddc_edid(core, edid, 0); + if (r) + return r; + + l = 128; + + if (len >= 128 * 2 && edid[0x7e] > 0) { + r = hdmi_core_ddc_edid(core, edid + 0x80, 1); + if (r) + return r; + l += 128; + } + + return l; +} + +static void hdmi_core_init(struct hdmi_core_video_config *video_cfg) +{ + DSSDBG("Enter hdmi_core_init\n"); + + /* video core */ + video_cfg->ip_bus_width = HDMI_INPUT_8BIT; + video_cfg->op_dither_truc = HDMI_OUTPUTTRUNCATION_8BIT; + video_cfg->deep_color_pkt = HDMI_DEEPCOLORPACKECTDISABLE; + video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE; + video_cfg->hdmi_dvi = HDMI_DVI; + video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK; +} + +static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) +{ + DSSDBG("Enter hdmi_core_powerdown_disable\n"); + REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); +} + +static void hdmi_core_swreset_release(struct hdmi_core_data *core) +{ + DSSDBG("Enter hdmi_core_swreset_release\n"); + REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x0, 0, 0); +} + +static void hdmi_core_swreset_assert(struct hdmi_core_data *core) +{ + DSSDBG("Enter hdmi_core_swreset_assert\n"); + REG_FLD_MOD(core->base, HDMI_CORE_SYS_SRST, 0x1, 0, 0); +} + +/* HDMI_CORE_VIDEO_CONFIG */ +static void hdmi_core_video_config(struct hdmi_core_data *core, + struct hdmi_core_video_config *cfg) +{ + u32 r = 0; + void __iomem *core_sys_base = core->base; + void __iomem *core_av_base = hdmi_av_base(core); + + /* sys_ctrl1 default configuration not tunable */ + r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1); + r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC, 5, 5); + r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC, 4, 4); + r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS, 2, 2); + r = FLD_MOD(r, HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE, 1, 1); + hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_SYS_CTRL1, r); + + REG_FLD_MOD(core_sys_base, + HDMI_CORE_SYS_VID_ACEN, cfg->ip_bus_width, 7, 6); + + /* Vid_Mode */ + r = hdmi_read_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE); + + /* dither truncation configuration */ + if (cfg->op_dither_truc > HDMI_OUTPUTTRUNCATION_12BIT) { + r = FLD_MOD(r, cfg->op_dither_truc - 3, 7, 6); + r = FLD_MOD(r, 1, 5, 5); + } else { + r = FLD_MOD(r, cfg->op_dither_truc, 7, 6); + r = FLD_MOD(r, 0, 5, 5); + } + hdmi_write_reg(core_sys_base, HDMI_CORE_SYS_VID_MODE, r); + + /* HDMI_Ctrl */ + r = hdmi_read_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL); + r = FLD_MOD(r, cfg->deep_color_pkt, 6, 6); + r = FLD_MOD(r, cfg->pkt_mode, 5, 3); + r = FLD_MOD(r, cfg->hdmi_dvi, 0, 0); + hdmi_write_reg(core_av_base, HDMI_CORE_AV_HDMI_CTRL, r); + + /* TMDS_CTRL */ + REG_FLD_MOD(core_sys_base, + HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5); +} + +static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, + struct hdmi_avi_infoframe *frame) +{ + void __iomem *av_base = hdmi_av_base(core); + u8 data[HDMI_INFOFRAME_SIZE(AVI)]; + int i; + + hdmi_avi_infoframe_pack(frame, data, sizeof(data)); + + print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data, + HDMI_INFOFRAME_SIZE(AVI), false); + + for (i = 0; i < sizeof(data); ++i) { + hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_BASE + i * 4, + data[i]); + } +} + +static void hdmi_core_av_packet_config(struct hdmi_core_data *core, + struct hdmi_core_packet_enable_repeat repeat_cfg) +{ + /* enable/repeat the infoframe */ + hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL1, + (repeat_cfg.audio_pkt << 5) | + (repeat_cfg.audio_pkt_repeat << 4) | + (repeat_cfg.avi_infoframe << 1) | + (repeat_cfg.avi_infoframe_repeat)); + + /* enable/repeat the packet */ + hdmi_write_reg(hdmi_av_base(core), HDMI_CORE_AV_PB_CTRL2, + (repeat_cfg.gen_cntrl_pkt << 3) | + (repeat_cfg.gen_cntrl_pkt_repeat << 2) | + (repeat_cfg.generic_pkt << 1) | + (repeat_cfg.generic_pkt_repeat)); +} + +void hdmi4_configure(struct hdmi_core_data *core, + struct hdmi_wp_data *wp, struct hdmi_config *cfg) +{ + /* HDMI */ + struct omap_video_timings video_timing; + struct hdmi_video_format video_format; + /* HDMI core */ + struct hdmi_core_video_config v_core_cfg; + struct hdmi_core_packet_enable_repeat repeat_cfg = { 0 }; + + hdmi_core_init(&v_core_cfg); + + hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg); + + hdmi_wp_video_config_timing(wp, &video_timing); + + /* video config */ + video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422; + + hdmi_wp_video_config_format(wp, &video_format); + + hdmi_wp_video_config_interface(wp, &video_timing); + + /* + * configure core video part + * set software reset in the core + */ + hdmi_core_swreset_assert(core); + + /* power down off */ + hdmi_core_powerdown_disable(core); + + v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL; + v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode; + + hdmi_core_video_config(core, &v_core_cfg); + + /* release software reset in the core */ + hdmi_core_swreset_release(core); + + if (cfg->hdmi_dvi_mode == HDMI_HDMI) { + hdmi_core_write_avi_infoframe(core, &cfg->infoframe); + + /* enable/repeat the infoframe */ + repeat_cfg.avi_infoframe = HDMI_PACKETENABLE; + repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON; + /* wakeup */ + repeat_cfg.audio_pkt = HDMI_PACKETENABLE; + repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON; + } + + hdmi_core_av_packet_config(core, repeat_cfg); +} + +void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s) +{ + int i; + +#define CORE_REG(i, name) name(i) +#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\ + hdmi_read_reg(core->base, r)) +#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\ + hdmi_read_reg(hdmi_av_base(core), r)) +#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \ + (i < 10) ? 32 - (int)strlen(#r) : 31 - (int)strlen(#r), " ", \ + hdmi_read_reg(hdmi_av_base(core), CORE_REG(i, r))) + + DUMPCORE(HDMI_CORE_SYS_VND_IDL); + DUMPCORE(HDMI_CORE_SYS_DEV_IDL); + DUMPCORE(HDMI_CORE_SYS_DEV_IDH); + DUMPCORE(HDMI_CORE_SYS_DEV_REV); + DUMPCORE(HDMI_CORE_SYS_SRST); + DUMPCORE(HDMI_CORE_SYS_SYS_CTRL1); + DUMPCORE(HDMI_CORE_SYS_SYS_STAT); + DUMPCORE(HDMI_CORE_SYS_SYS_CTRL3); + DUMPCORE(HDMI_CORE_SYS_DE_DLY); + DUMPCORE(HDMI_CORE_SYS_DE_CTRL); + DUMPCORE(HDMI_CORE_SYS_DE_TOP); + DUMPCORE(HDMI_CORE_SYS_DE_CNTL); + DUMPCORE(HDMI_CORE_SYS_DE_CNTH); + DUMPCORE(HDMI_CORE_SYS_DE_LINL); + DUMPCORE(HDMI_CORE_SYS_DE_LINH_1); + DUMPCORE(HDMI_CORE_SYS_HRES_L); + DUMPCORE(HDMI_CORE_SYS_HRES_H); + DUMPCORE(HDMI_CORE_SYS_VRES_L); + DUMPCORE(HDMI_CORE_SYS_VRES_H); + DUMPCORE(HDMI_CORE_SYS_IADJUST); + DUMPCORE(HDMI_CORE_SYS_POLDETECT); + DUMPCORE(HDMI_CORE_SYS_HWIDTH1); + DUMPCORE(HDMI_CORE_SYS_HWIDTH2); + DUMPCORE(HDMI_CORE_SYS_VWIDTH); + DUMPCORE(HDMI_CORE_SYS_VID_CTRL); + DUMPCORE(HDMI_CORE_SYS_VID_ACEN); + DUMPCORE(HDMI_CORE_SYS_VID_MODE); + DUMPCORE(HDMI_CORE_SYS_VID_BLANK1); + DUMPCORE(HDMI_CORE_SYS_VID_BLANK3); + DUMPCORE(HDMI_CORE_SYS_VID_BLANK1); + DUMPCORE(HDMI_CORE_SYS_DC_HEADER); + DUMPCORE(HDMI_CORE_SYS_VID_DITHER); + DUMPCORE(HDMI_CORE_SYS_RGB2XVYCC_CT); + DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_R2Y_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_G2Y_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_B2Y_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_R2CB_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_G2CB_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_B2CB_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_R2CR_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_G2CR_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_LOW); + DUMPCORE(HDMI_CORE_SYS_B2CR_COEFF_UP); + DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_LOW); + DUMPCORE(HDMI_CORE_SYS_RGB_OFFSET_UP); + DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_LOW); + DUMPCORE(HDMI_CORE_SYS_Y_OFFSET_UP); + DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_LOW); + DUMPCORE(HDMI_CORE_SYS_CBCR_OFFSET_UP); + DUMPCORE(HDMI_CORE_SYS_INTR_STATE); + DUMPCORE(HDMI_CORE_SYS_INTR1); + DUMPCORE(HDMI_CORE_SYS_INTR2); + DUMPCORE(HDMI_CORE_SYS_INTR3); + DUMPCORE(HDMI_CORE_SYS_INTR4); + DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK1); + DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK2); + DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK3); + DUMPCORE(HDMI_CORE_SYS_INTR_UNMASK4); + DUMPCORE(HDMI_CORE_SYS_INTR_CTRL); + DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL); + + DUMPCORE(HDMI_CORE_DDC_ADDR); + DUMPCORE(HDMI_CORE_DDC_SEGM); + DUMPCORE(HDMI_CORE_DDC_OFFSET); + DUMPCORE(HDMI_CORE_DDC_COUNT1); + DUMPCORE(HDMI_CORE_DDC_COUNT2); + DUMPCORE(HDMI_CORE_DDC_STATUS); + DUMPCORE(HDMI_CORE_DDC_CMD); + DUMPCORE(HDMI_CORE_DDC_DATA); + + DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL); + DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL); + DUMPCOREAV(HDMI_CORE_AV_N_SVAL1); + DUMPCOREAV(HDMI_CORE_AV_N_SVAL2); + DUMPCOREAV(HDMI_CORE_AV_N_SVAL3); + DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1); + DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2); + DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3); + DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1); + DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2); + DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3); + DUMPCOREAV(HDMI_CORE_AV_AUD_MODE); + DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL); + DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS); + DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S); + DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH); + DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP); + DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5); + DUMPCOREAV(HDMI_CORE_AV_ASRC); + DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN); + DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL); + DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT); + DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1); + DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2); + DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3); + DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL); + DUMPCOREAV(HDMI_CORE_AV_DPD); + DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1); + DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2); + DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE); + DUMPCOREAV(HDMI_CORE_AV_AVI_VERS); + DUMPCOREAV(HDMI_CORE_AV_AVI_LEN); + DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM); + + for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++) + DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE); + DUMPCOREAV(HDMI_CORE_AV_SPD_VERS); + DUMPCOREAV(HDMI_CORE_AV_SPD_LEN); + DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM); + + for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++) + DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE); + DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS); + DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN); + DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM); + + for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++) + DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE); + DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS); + DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN); + DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM); + + for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++) + DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE); + + for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++) + DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1); + + for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++) + DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID); +} + +static void hdmi_core_audio_config(struct hdmi_core_data *core, + struct hdmi_core_audio_config *cfg) +{ + u32 r; + void __iomem *av_base = hdmi_av_base(core); + + /* + * Parameters for generation of Audio Clock Recovery packets + */ + REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0); + REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0); + REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0); + + if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) { + REG_FLD_MOD(av_base, HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0); + REG_FLD_MOD(av_base, + HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0); + REG_FLD_MOD(av_base, + HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0); + } else { + REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_1, + cfg->aud_par_busclk, 7, 0); + REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_2, + (cfg->aud_par_busclk >> 8), 7, 0); + REG_FLD_MOD(av_base, HDMI_CORE_AV_AUD_PAR_BUSCLK_3, + (cfg->aud_par_busclk >> 16), 7, 0); + } + + /* Set ACR clock divisor */ + REG_FLD_MOD(av_base, + HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0); + + r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL); + /* + * Use TMDS clock for ACR packets. For devices that use + * the MCLK, this is the first part of the MCLK initialization. + */ + r = FLD_MOD(r, 0, 2, 2); + + r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1); + r = FLD_MOD(r, cfg->cts_mode, 0, 0); + hdmi_write_reg(av_base, HDMI_CORE_AV_ACR_CTRL, r); + + /* For devices using MCLK, this completes its initialization. */ + if (cfg->use_mclk) + REG_FLD_MOD(av_base, HDMI_CORE_AV_ACR_CTRL, 1, 2, 2); + + /* Override of SPDIF sample frequency with value in I2S_CHST4 */ + REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL, + cfg->fs_override, 1, 1); + + /* + * Set IEC-60958-3 channel status word. It is passed to the IP + * just as it is received. The user of the driver is responsible + * for its contents. + */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0, + cfg->iec60958_cfg->status[0]); + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1, + cfg->iec60958_cfg->status[1]); + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2, + cfg->iec60958_cfg->status[2]); + /* yes, this is correct: status[3] goes to CHST4 register */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4, + cfg->iec60958_cfg->status[3]); + /* yes, this is correct: status[4] goes to CHST5 register */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, + cfg->iec60958_cfg->status[4]); + + /* set I2S parameters */ + r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL); + r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6); + r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4); + r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2); + r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1); + r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0); + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r); + + REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN, + cfg->i2s_cfg.in_length_bits, 3, 0); + + /* Audio channels and mode parameters */ + REG_FLD_MOD(av_base, HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1); + r = hdmi_read_reg(av_base, HDMI_CORE_AV_AUD_MODE); + r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4); + r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3); + r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2); + r = FLD_MOD(r, cfg->en_spdif, 1, 1); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r); + + /* Audio channel mappings */ + /* TODO: Make channel mapping dynamic. For now, map channels + * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as + * HDMI speaker order is different. See CEA-861 Section 6.6.2. + */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78); + REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5); +} + +static void hdmi_core_audio_infoframe_cfg(struct hdmi_core_data *core, + struct snd_cea_861_aud_if *info_aud) +{ + u8 sum = 0, checksum = 0; + void __iomem *av_base = hdmi_av_base(core); + + /* + * Set audio info frame type, version and length as + * described in HDMI 1.4a Section 8.2.2 specification. + * Checksum calculation is defined in Section 5.3.5. + */ + hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_TYPE, 0x84); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_VERS, 0x01); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a); + sum += 0x84 + 0x001 + 0x00a; + + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), + info_aud->db1_ct_cc); + sum += info_aud->db1_ct_cc; + + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), + info_aud->db2_sf_ss); + sum += info_aud->db2_sf_ss; + + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3); + sum += info_aud->db3; + + /* + * The OMAP HDMI IP requires to use the 8-channel channel code when + * transmitting more than two channels. + */ + if (info_aud->db4_ca != 0x00) + info_aud->db4_ca = 0x13; + + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca); + sum += info_aud->db4_ca; + + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), + info_aud->db5_dminh_lsv); + sum += info_aud->db5_dminh_lsv; + + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(7), 0x00); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(8), 0x00); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(9), 0x00); + + checksum = 0x100 - sum; + hdmi_write_reg(av_base, + HDMI_CORE_AV_AUDIO_CHSUM, checksum); + + /* + * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing + * is available. + */ +} + +int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, + struct omap_dss_audio *audio, u32 pclk) +{ + struct hdmi_audio_format audio_format; + struct hdmi_audio_dma audio_dma; + struct hdmi_core_audio_config acore; + int err, n, cts, channel_count; + unsigned int fs_nr; + bool word_length_16b = false; + + if (!audio || !audio->iec || !audio->cea || !core) + return -EINVAL; + + acore.iec60958_cfg = audio->iec; + /* + * In the IEC-60958 status word, check if the audio sample word length + * is 16-bit as several optimizations can be performed in such case. + */ + if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)) + if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16) + word_length_16b = true; + + /* I2S configuration. See Phillips' specification */ + if (word_length_16b) + acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT; + else + acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; + /* + * The I2S input word length is twice the lenght given in the IEC-60958 + * status word. If the word size is greater than + * 20 bits, increment by one. + */ + acore.i2s_cfg.in_length_bits = audio->iec->status[4] + & IEC958_AES4_CON_WORDLEN; + if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24) + acore.i2s_cfg.in_length_bits++; + acore.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING; + acore.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM; + acore.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST; + acore.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT; + + /* convert sample frequency to a number */ + switch (audio->iec->status[3] & IEC958_AES3_CON_FS) { + case IEC958_AES3_CON_FS_32000: + fs_nr = 32000; + break; + case IEC958_AES3_CON_FS_44100: + fs_nr = 44100; + break; + case IEC958_AES3_CON_FS_48000: + fs_nr = 48000; + break; + case IEC958_AES3_CON_FS_88200: + fs_nr = 88200; + break; + case IEC958_AES3_CON_FS_96000: + fs_nr = 96000; + break; + case IEC958_AES3_CON_FS_176400: + fs_nr = 176400; + break; + case IEC958_AES3_CON_FS_192000: + fs_nr = 192000; + break; + default: + return -EINVAL; + } + + err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); + + /* Audio clock regeneration settings */ + acore.n = n; + acore.cts = cts; + if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) { + acore.aud_par_busclk = 0; + acore.cts_mode = HDMI_AUDIO_CTS_MODE_SW; + acore.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK); + } else { + acore.aud_par_busclk = (((128 * 31) - 1) << 8); + acore.cts_mode = HDMI_AUDIO_CTS_MODE_HW; + acore.use_mclk = true; + } + + if (acore.use_mclk) + acore.mclk_mode = HDMI_AUDIO_MCLK_128FS; + + /* Audio channels settings */ + channel_count = (audio->cea->db1_ct_cc & + CEA861_AUDIO_INFOFRAME_DB1CC) + 1; + + switch (channel_count) { + case 2: + audio_format.active_chnnls_msk = 0x03; + break; + case 3: + audio_format.active_chnnls_msk = 0x07; + break; + case 4: + audio_format.active_chnnls_msk = 0x0f; + break; + case 5: + audio_format.active_chnnls_msk = 0x1f; + break; + case 6: + audio_format.active_chnnls_msk = 0x3f; + break; + case 7: + audio_format.active_chnnls_msk = 0x7f; + break; + case 8: + audio_format.active_chnnls_msk = 0xff; + break; + default: + return -EINVAL; + } + + /* + * the HDMI IP needs to enable four stereo channels when transmitting + * more than 2 audio channels. Similarly, the channel count in the + * Audio InfoFrame has to match the sample_present bits (some channels + * are padded with zeroes) + */ + if (channel_count == 2) { + audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL; + acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN; + acore.layout = HDMI_AUDIO_LAYOUT_2CH; + } else { + audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS; + acore.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN | + HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN | + HDMI_AUDIO_I2S_SD3_EN; + acore.layout = HDMI_AUDIO_LAYOUT_8CH; + audio->cea->db1_ct_cc = 7; + } + + acore.en_spdif = false; + /* use sample frequency from channel status word */ + acore.fs_override = true; + /* enable ACR packets */ + acore.en_acr_pkt = true; + /* disable direct streaming digital audio */ + acore.en_dsd_audio = false; + /* use parallel audio interface */ + acore.en_parallel_aud_input = true; + + /* DMA settings */ + if (word_length_16b) + audio_dma.transfer_size = 0x10; + else + audio_dma.transfer_size = 0x20; + audio_dma.block_size = 0xC0; + audio_dma.mode = HDMI_AUDIO_TRANSF_DMA; + audio_dma.fifo_threshold = 0x20; /* in number of samples */ + + /* audio FIFO format settings */ + if (word_length_16b) { + audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES; + audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS; + audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT; + } else { + audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE; + audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS; + audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT; + } + audio_format.type = HDMI_AUDIO_TYPE_LPCM; + audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; + /* disable start/stop signals of IEC 60958 blocks */ + audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON; + + /* configure DMA and audio FIFO format*/ + hdmi_wp_audio_config_dma(wp, &audio_dma); + hdmi_wp_audio_config_format(wp, &audio_format); + + /* configure the core*/ + hdmi_core_audio_config(core, &acore); + + /* configure CEA 861 audio infoframe*/ + hdmi_core_audio_infoframe_cfg(core, audio->cea); + + return 0; +} + +int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp) +{ + REG_FLD_MOD(hdmi_av_base(core), + HDMI_CORE_AV_AUD_MODE, true, 0, 0); + + hdmi_wp_audio_core_req_enable(wp, true); + + return 0; +} + +void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp) +{ + REG_FLD_MOD(hdmi_av_base(core), + HDMI_CORE_AV_AUD_MODE, false, 0, 0); + + hdmi_wp_audio_core_req_enable(wp, false); +} + +int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) +{ + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); + if (!res) { + DSSERR("can't get CORE mem resource\n"); + return -EINVAL; + } + + core->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(core->base)) { + DSSERR("can't ioremap CORE\n"); + return PTR_ERR(core->base); + } + + return 0; +} diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h new file mode 100644 index 000000000000..a069f96ec6f6 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h @@ -0,0 +1,273 @@ +/* + * HDMI header definition for OMAP4 HDMI core IP + * + * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _HDMI4_CORE_H_ +#define _HDMI4_CORE_H_ + +#include "hdmi.h" + +/* OMAP4 HDMI IP Core System */ + +#define HDMI_CORE_SYS_VND_IDL 0x0 +#define HDMI_CORE_SYS_DEV_IDL 0x8 +#define HDMI_CORE_SYS_DEV_IDH 0xC +#define HDMI_CORE_SYS_DEV_REV 0x10 +#define HDMI_CORE_SYS_SRST 0x14 +#define HDMI_CORE_SYS_SYS_CTRL1 0x20 +#define HDMI_CORE_SYS_SYS_STAT 0x24 +#define HDMI_CORE_SYS_SYS_CTRL3 0x28 +#define HDMI_CORE_SYS_DCTL 0x34 +#define HDMI_CORE_SYS_DE_DLY 0xC8 +#define HDMI_CORE_SYS_DE_CTRL 0xCC +#define HDMI_CORE_SYS_DE_TOP 0xD0 +#define HDMI_CORE_SYS_DE_CNTL 0xD8 +#define HDMI_CORE_SYS_DE_CNTH 0xDC +#define HDMI_CORE_SYS_DE_LINL 0xE0 +#define HDMI_CORE_SYS_DE_LINH_1 0xE4 +#define HDMI_CORE_SYS_HRES_L 0xE8 +#define HDMI_CORE_SYS_HRES_H 0xEC +#define HDMI_CORE_SYS_VRES_L 0xF0 +#define HDMI_CORE_SYS_VRES_H 0xF4 +#define HDMI_CORE_SYS_IADJUST 0xF8 +#define HDMI_CORE_SYS_POLDETECT 0xFC +#define HDMI_CORE_SYS_HWIDTH1 0x110 +#define HDMI_CORE_SYS_HWIDTH2 0x114 +#define HDMI_CORE_SYS_VWIDTH 0x11C +#define HDMI_CORE_SYS_VID_CTRL 0x120 +#define HDMI_CORE_SYS_VID_ACEN 0x124 +#define HDMI_CORE_SYS_VID_MODE 0x128 +#define HDMI_CORE_SYS_VID_BLANK1 0x12C +#define HDMI_CORE_SYS_VID_BLANK2 0x130 +#define HDMI_CORE_SYS_VID_BLANK3 0x134 +#define HDMI_CORE_SYS_DC_HEADER 0x138 +#define HDMI_CORE_SYS_VID_DITHER 0x13C +#define HDMI_CORE_SYS_RGB2XVYCC_CT 0x140 +#define HDMI_CORE_SYS_R2Y_COEFF_LOW 0x144 +#define HDMI_CORE_SYS_R2Y_COEFF_UP 0x148 +#define HDMI_CORE_SYS_G2Y_COEFF_LOW 0x14C +#define HDMI_CORE_SYS_G2Y_COEFF_UP 0x150 +#define HDMI_CORE_SYS_B2Y_COEFF_LOW 0x154 +#define HDMI_CORE_SYS_B2Y_COEFF_UP 0x158 +#define HDMI_CORE_SYS_R2CB_COEFF_LOW 0x15C +#define HDMI_CORE_SYS_R2CB_COEFF_UP 0x160 +#define HDMI_CORE_SYS_G2CB_COEFF_LOW 0x164 +#define HDMI_CORE_SYS_G2CB_COEFF_UP 0x168 +#define HDMI_CORE_SYS_B2CB_COEFF_LOW 0x16C +#define HDMI_CORE_SYS_B2CB_COEFF_UP 0x170 +#define HDMI_CORE_SYS_R2CR_COEFF_LOW 0x174 +#define HDMI_CORE_SYS_R2CR_COEFF_UP 0x178 +#define HDMI_CORE_SYS_G2CR_COEFF_LOW 0x17C +#define HDMI_CORE_SYS_G2CR_COEFF_UP 0x180 +#define HDMI_CORE_SYS_B2CR_COEFF_LOW 0x184 +#define HDMI_CORE_SYS_B2CR_COEFF_UP 0x188 +#define HDMI_CORE_SYS_RGB_OFFSET_LOW 0x18C +#define HDMI_CORE_SYS_RGB_OFFSET_UP 0x190 +#define HDMI_CORE_SYS_Y_OFFSET_LOW 0x194 +#define HDMI_CORE_SYS_Y_OFFSET_UP 0x198 +#define HDMI_CORE_SYS_CBCR_OFFSET_LOW 0x19C +#define HDMI_CORE_SYS_CBCR_OFFSET_UP 0x1A0 +#define HDMI_CORE_SYS_INTR_STATE 0x1C0 +#define HDMI_CORE_SYS_INTR1 0x1C4 +#define HDMI_CORE_SYS_INTR2 0x1C8 +#define HDMI_CORE_SYS_INTR3 0x1CC +#define HDMI_CORE_SYS_INTR4 0x1D0 +#define HDMI_CORE_SYS_INTR_UNMASK1 0x1D4 +#define HDMI_CORE_SYS_INTR_UNMASK2 0x1D8 +#define HDMI_CORE_SYS_INTR_UNMASK3 0x1DC +#define HDMI_CORE_SYS_INTR_UNMASK4 0x1E0 +#define HDMI_CORE_SYS_INTR_CTRL 0x1E4 +#define HDMI_CORE_SYS_TMDS_CTRL 0x208 + +/* value definitions for HDMI_CORE_SYS_SYS_CTRL1 fields */ +#define HDMI_CORE_SYS_SYS_CTRL1_VEN_FOLLOWVSYNC 0x1 +#define HDMI_CORE_SYS_SYS_CTRL1_HEN_FOLLOWHSYNC 0x1 +#define HDMI_CORE_SYS_SYS_CTRL1_BSEL_24BITBUS 0x1 +#define HDMI_CORE_SYS_SYS_CTRL1_EDGE_RISINGEDGE 0x1 + +/* HDMI DDC E-DID */ +#define HDMI_CORE_DDC_ADDR 0x3B4 +#define HDMI_CORE_DDC_SEGM 0x3B8 +#define HDMI_CORE_DDC_OFFSET 0x3BC +#define HDMI_CORE_DDC_COUNT1 0x3C0 +#define HDMI_CORE_DDC_COUNT2 0x3C4 +#define HDMI_CORE_DDC_STATUS 0x3C8 +#define HDMI_CORE_DDC_CMD 0x3CC +#define HDMI_CORE_DDC_DATA 0x3D0 + +/* HDMI IP Core Audio Video */ + +#define HDMI_CORE_AV_ACR_CTRL 0x4 +#define HDMI_CORE_AV_FREQ_SVAL 0x8 +#define HDMI_CORE_AV_N_SVAL1 0xC +#define HDMI_CORE_AV_N_SVAL2 0x10 +#define HDMI_CORE_AV_N_SVAL3 0x14 +#define HDMI_CORE_AV_CTS_SVAL1 0x18 +#define HDMI_CORE_AV_CTS_SVAL2 0x1C +#define HDMI_CORE_AV_CTS_SVAL3 0x20 +#define HDMI_CORE_AV_CTS_HVAL1 0x24 +#define HDMI_CORE_AV_CTS_HVAL2 0x28 +#define HDMI_CORE_AV_CTS_HVAL3 0x2C +#define HDMI_CORE_AV_AUD_MODE 0x50 +#define HDMI_CORE_AV_SPDIF_CTRL 0x54 +#define HDMI_CORE_AV_HW_SPDIF_FS 0x60 +#define HDMI_CORE_AV_SWAP_I2S 0x64 +#define HDMI_CORE_AV_SPDIF_ERTH 0x6C +#define HDMI_CORE_AV_I2S_IN_MAP 0x70 +#define HDMI_CORE_AV_I2S_IN_CTRL 0x74 +#define HDMI_CORE_AV_I2S_CHST0 0x78 +#define HDMI_CORE_AV_I2S_CHST1 0x7C +#define HDMI_CORE_AV_I2S_CHST2 0x80 +#define HDMI_CORE_AV_I2S_CHST4 0x84 +#define HDMI_CORE_AV_I2S_CHST5 0x88 +#define HDMI_CORE_AV_ASRC 0x8C +#define HDMI_CORE_AV_I2S_IN_LEN 0x90 +#define HDMI_CORE_AV_HDMI_CTRL 0xBC +#define HDMI_CORE_AV_AUDO_TXSTAT 0xC0 +#define HDMI_CORE_AV_AUD_PAR_BUSCLK_1 0xCC +#define HDMI_CORE_AV_AUD_PAR_BUSCLK_2 0xD0 +#define HDMI_CORE_AV_AUD_PAR_BUSCLK_3 0xD4 +#define HDMI_CORE_AV_TEST_TXCTRL 0xF0 +#define HDMI_CORE_AV_DPD 0xF4 +#define HDMI_CORE_AV_PB_CTRL1 0xF8 +#define HDMI_CORE_AV_PB_CTRL2 0xFC +#define HDMI_CORE_AV_AVI_BASE 0x100 +#define HDMI_CORE_AV_AVI_TYPE 0x100 +#define HDMI_CORE_AV_AVI_VERS 0x104 +#define HDMI_CORE_AV_AVI_LEN 0x108 +#define HDMI_CORE_AV_AVI_CHSUM 0x10C +#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110) +#define HDMI_CORE_AV_SPD_TYPE 0x180 +#define HDMI_CORE_AV_SPD_VERS 0x184 +#define HDMI_CORE_AV_SPD_LEN 0x188 +#define HDMI_CORE_AV_SPD_CHSUM 0x18C +#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190) +#define HDMI_CORE_AV_AUDIO_TYPE 0x200 +#define HDMI_CORE_AV_AUDIO_VERS 0x204 +#define HDMI_CORE_AV_AUDIO_LEN 0x208 +#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C +#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210) +#define HDMI_CORE_AV_MPEG_TYPE 0x280 +#define HDMI_CORE_AV_MPEG_VERS 0x284 +#define HDMI_CORE_AV_MPEG_LEN 0x288 +#define HDMI_CORE_AV_MPEG_CHSUM 0x28C +#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290) +#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300) +#define HDMI_CORE_AV_CP_BYTE1 0x37C +#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380) +#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC + +#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4 +#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4 +#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4 +#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4 + +#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15 +#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27 +#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10 +#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27 +#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31 +#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31 + +enum hdmi_core_inputbus_width { + HDMI_INPUT_8BIT = 0, + HDMI_INPUT_10BIT = 1, + HDMI_INPUT_12BIT = 2 +}; + +enum hdmi_core_dither_trunc { + HDMI_OUTPUTTRUNCATION_8BIT = 0, + HDMI_OUTPUTTRUNCATION_10BIT = 1, + HDMI_OUTPUTTRUNCATION_12BIT = 2, + HDMI_OUTPUTDITHER_8BIT = 3, + HDMI_OUTPUTDITHER_10BIT = 4, + HDMI_OUTPUTDITHER_12BIT = 5 +}; + +enum hdmi_core_deepcolor_ed { + HDMI_DEEPCOLORPACKECTDISABLE = 0, + HDMI_DEEPCOLORPACKECTENABLE = 1 +}; + +enum hdmi_core_packet_mode { + HDMI_PACKETMODERESERVEDVALUE = 0, + HDMI_PACKETMODE24BITPERPIXEL = 4, + HDMI_PACKETMODE30BITPERPIXEL = 5, + HDMI_PACKETMODE36BITPERPIXEL = 6, + HDMI_PACKETMODE48BITPERPIXEL = 7 +}; + +enum hdmi_core_tclkselclkmult { + HDMI_FPLL05IDCK = 0, + HDMI_FPLL10IDCK = 1, + HDMI_FPLL20IDCK = 2, + HDMI_FPLL40IDCK = 3 +}; + +enum hdmi_core_packet_ctrl { + HDMI_PACKETENABLE = 1, + HDMI_PACKETDISABLE = 0, + HDMI_PACKETREPEATON = 1, + HDMI_PACKETREPEATOFF = 0 +}; + +enum hdmi_audio_i2s_config { + HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0, + HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1, + HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0, + HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1, + HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0, + HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1, + HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0, + HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1, + HDMI_AUDIO_I2S_SD0_EN = 1, + HDMI_AUDIO_I2S_SD1_EN = 1 << 1, + HDMI_AUDIO_I2S_SD2_EN = 1 << 2, + HDMI_AUDIO_I2S_SD3_EN = 1 << 3, +}; + +struct hdmi_core_video_config { + enum hdmi_core_inputbus_width ip_bus_width; + enum hdmi_core_dither_trunc op_dither_truc; + enum hdmi_core_deepcolor_ed deep_color_pkt; + enum hdmi_core_packet_mode pkt_mode; + enum hdmi_core_hdmi_dvi hdmi_dvi; + enum hdmi_core_tclkselclkmult tclk_sel_clkmult; +}; + +struct hdmi_core_packet_enable_repeat { + u32 audio_pkt; + u32 audio_pkt_repeat; + u32 avi_infoframe; + u32 avi_infoframe_repeat; + u32 gen_cntrl_pkt; + u32 gen_cntrl_pkt_repeat; + u32 generic_pkt; + u32 generic_pkt_repeat; +}; + +int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len); +void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, + struct hdmi_config *cfg); +void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s); +int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core); + +int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp); +void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp); +int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, + struct omap_dss_audio *audio, u32 pclk); +#endif diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c new file mode 100644 index 000000000000..a955a2c4c061 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -0,0 +1,876 @@ +/* + * HDMI driver for OMAP5 + * + * Copyright (C) 2014 Texas Instruments Incorporated + * + * Authors: + * Yong Zhi + * Mythri pk + * Archit Taneja <archit@ti.com> + * Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "HDMI" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/clk.h> +#include <linux/gpio.h> +#include <linux/regulator/consumer.h> +#include <linux/component.h> +#include <video/omapdss.h> +#include <sound/omap-hdmi-audio.h> + +#include "hdmi5_core.h" +#include "dss.h" +#include "dss_features.h" + +static struct omap_hdmi hdmi; + +static int hdmi_runtime_get(void) +{ + int r; + + DSSDBG("hdmi_runtime_get\n"); + + r = pm_runtime_get_sync(&hdmi.pdev->dev); + WARN_ON(r < 0); + if (r < 0) + return r; + + return 0; +} + +static void hdmi_runtime_put(void) +{ + int r; + + DSSDBG("hdmi_runtime_put\n"); + + r = pm_runtime_put_sync(&hdmi.pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} + +static irqreturn_t hdmi_irq_handler(int irq, void *data) +{ + struct hdmi_wp_data *wp = data; + u32 irqstatus; + + irqstatus = hdmi_wp_get_irqstatus(wp); + hdmi_wp_set_irqstatus(wp, irqstatus); + + if ((irqstatus & HDMI_IRQ_LINK_CONNECT) && + irqstatus & HDMI_IRQ_LINK_DISCONNECT) { + u32 v; + /* + * If we get both connect and disconnect interrupts at the same + * time, turn off the PHY, clear interrupts, and restart, which + * raises connect interrupt if a cable is connected, or nothing + * if cable is not connected. + */ + + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_OFF); + + /* + * We always get bogus CONNECT & DISCONNECT interrupts when + * setting the PHY to LDOON. To ignore those, we force the RXDET + * line to 0 until the PHY power state has been changed. + */ + v = hdmi_read_reg(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL); + v = FLD_MOD(v, 1, 15, 15); /* FORCE_RXDET_HIGH */ + v = FLD_MOD(v, 0, 14, 7); /* RXDET_LINE */ + hdmi_write_reg(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL, v); + + hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT | + HDMI_IRQ_LINK_DISCONNECT); + + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); + + REG_FLD_MOD(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL, 0, 15, 15); + + } else if (irqstatus & HDMI_IRQ_LINK_CONNECT) { + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON); + } else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) { + hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON); + } + + return IRQ_HANDLED; +} + +static int hdmi_init_regulator(void) +{ + int r; + struct regulator *reg; + + if (hdmi.vdda_reg != NULL) + return 0; + + reg = devm_regulator_get(&hdmi.pdev->dev, "vdda"); + if (IS_ERR(reg)) { + DSSERR("can't get VDDA regulator\n"); + return PTR_ERR(reg); + } + + if (regulator_can_change_voltage(reg)) { + r = regulator_set_voltage(reg, 1800000, 1800000); + if (r) { + devm_regulator_put(reg); + DSSWARN("can't set the regulator voltage\n"); + return r; + } + } + + hdmi.vdda_reg = reg; + + return 0; +} + +static int hdmi_power_on_core(struct omap_dss_device *dssdev) +{ + int r; + + r = regulator_enable(hdmi.vdda_reg); + if (r) + return r; + + r = hdmi_runtime_get(); + if (r) + goto err_runtime_get; + + /* Make selection of HDMI in DSS */ + dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK); + + hdmi.core_enabled = true; + + return 0; + +err_runtime_get: + regulator_disable(hdmi.vdda_reg); + + return r; +} + +static void hdmi_power_off_core(struct omap_dss_device *dssdev) +{ + hdmi.core_enabled = false; + + hdmi_runtime_put(); + regulator_disable(hdmi.vdda_reg); +} + +static int hdmi_power_on_full(struct omap_dss_device *dssdev) +{ + int r; + struct omap_video_timings *p; + struct omap_overlay_manager *mgr = hdmi.output.manager; + struct dss_pll_clock_info hdmi_cinfo = { 0 }; + + r = hdmi_power_on_core(dssdev); + if (r) + return r; + + p = &hdmi.cfg.timings; + + DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res); + + hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo); + + /* disable and clear irqs */ + hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); + hdmi_wp_set_irqstatus(&hdmi.wp, + hdmi_wp_get_irqstatus(&hdmi.wp)); + + r = dss_pll_enable(&hdmi.pll.pll); + if (r) { + DSSERR("Failed to enable PLL\n"); + goto err_pll_enable; + } + + r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo); + if (r) { + DSSERR("Failed to configure PLL\n"); + goto err_pll_cfg; + } + + r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco, + hdmi_cinfo.clkout[0]); + if (r) { + DSSDBG("Failed to start PHY\n"); + goto err_phy_cfg; + } + + r = hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_LDOON); + if (r) + goto err_phy_pwr; + + hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); + + /* bypass TV gamma table */ + dispc_enable_gamma_table(0); + + /* tv size */ + dss_mgr_set_timings(mgr, p); + + r = hdmi_wp_video_start(&hdmi.wp); + if (r) + goto err_vid_enable; + + r = dss_mgr_enable(mgr); + if (r) + goto err_mgr_enable; + + hdmi_wp_set_irqenable(&hdmi.wp, + HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT); + + return 0; + +err_mgr_enable: + hdmi_wp_video_stop(&hdmi.wp); +err_vid_enable: + hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); +err_phy_pwr: +err_phy_cfg: +err_pll_cfg: + dss_pll_disable(&hdmi.pll.pll); +err_pll_enable: + hdmi_power_off_core(dssdev); + return -EIO; +} + +static void hdmi_power_off_full(struct omap_dss_device *dssdev) +{ + struct omap_overlay_manager *mgr = hdmi.output.manager; + + hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); + + dss_mgr_disable(mgr); + + hdmi_wp_video_stop(&hdmi.wp); + + hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF); + + dss_pll_disable(&hdmi.pll.pll); + + hdmi_power_off_core(dssdev); +} + +static int hdmi_display_check_timing(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct omap_dss_device *out = &hdmi.output; + + /* TODO: proper interlace support */ + if (timings->interlace) + return -EINVAL; + + if (!dispc_mgr_timings_ok(out->dispc_channel, timings)) + return -EINVAL; + + return 0; +} + +static void hdmi_display_set_timing(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + mutex_lock(&hdmi.lock); + + hdmi.cfg.timings = *timings; + + dispc_set_tv_pclk(timings->pixelclock); + + mutex_unlock(&hdmi.lock); +} + +static void hdmi_display_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + *timings = hdmi.cfg.timings; +} + +static void hdmi_dump_regs(struct seq_file *s) +{ + mutex_lock(&hdmi.lock); + + if (hdmi_runtime_get()) { + mutex_unlock(&hdmi.lock); + return; + } + + hdmi_wp_dump(&hdmi.wp, s); + hdmi_pll_dump(&hdmi.pll, s); + hdmi_phy_dump(&hdmi.phy, s); + hdmi5_core_dump(&hdmi.core, s); + + hdmi_runtime_put(); + mutex_unlock(&hdmi.lock); +} + +static int read_edid(u8 *buf, int len) +{ + int r; + int idlemode; + + mutex_lock(&hdmi.lock); + + r = hdmi_runtime_get(); + BUG_ON(r); + + idlemode = REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2); + /* No-idle mode */ + REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2); + + r = hdmi5_read_edid(&hdmi.core, buf, len); + + REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2); + + hdmi_runtime_put(); + mutex_unlock(&hdmi.lock); + + return r; +} + +static void hdmi_start_audio_stream(struct omap_hdmi *hd) +{ + REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2); + hdmi_wp_audio_enable(&hd->wp, true); + hdmi_wp_audio_core_req_enable(&hd->wp, true); +} + +static void hdmi_stop_audio_stream(struct omap_hdmi *hd) +{ + hdmi_wp_audio_core_req_enable(&hd->wp, false); + hdmi_wp_audio_enable(&hd->wp, false); + REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2); +} + +static int hdmi_display_enable(struct omap_dss_device *dssdev) +{ + struct omap_dss_device *out = &hdmi.output; + unsigned long flags; + int r = 0; + + DSSDBG("ENTER hdmi_display_enable\n"); + + mutex_lock(&hdmi.lock); + + if (out->manager == NULL) { + DSSERR("failed to enable display: no output/manager\n"); + r = -ENODEV; + goto err0; + } + + r = hdmi_power_on_full(dssdev); + if (r) { + DSSERR("failed to power on device\n"); + goto err0; + } + + if (hdmi.audio_configured) { + r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config, + hdmi.cfg.timings.pixelclock); + if (r) { + DSSERR("Error restoring audio configuration: %d", r); + hdmi.audio_abort_cb(&hdmi.pdev->dev); + hdmi.audio_configured = false; + } + } + + spin_lock_irqsave(&hdmi.audio_playing_lock, flags); + if (hdmi.audio_configured && hdmi.audio_playing) + hdmi_start_audio_stream(&hdmi); + hdmi.display_enabled = true; + spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags); + + mutex_unlock(&hdmi.lock); + return 0; + +err0: + mutex_unlock(&hdmi.lock); + return r; +} + +static void hdmi_display_disable(struct omap_dss_device *dssdev) +{ + unsigned long flags; + + DSSDBG("Enter hdmi_display_disable\n"); + + mutex_lock(&hdmi.lock); + + spin_lock_irqsave(&hdmi.audio_playing_lock, flags); + hdmi_stop_audio_stream(&hdmi); + hdmi.display_enabled = false; + spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags); + + hdmi_power_off_full(dssdev); + + mutex_unlock(&hdmi.lock); +} + +static int hdmi_core_enable(struct omap_dss_device *dssdev) +{ + int r = 0; + + DSSDBG("ENTER omapdss_hdmi_core_enable\n"); + + mutex_lock(&hdmi.lock); + + r = hdmi_power_on_core(dssdev); + if (r) { + DSSERR("failed to power on device\n"); + goto err0; + } + + mutex_unlock(&hdmi.lock); + return 0; + +err0: + mutex_unlock(&hdmi.lock); + return r; +} + +static void hdmi_core_disable(struct omap_dss_device *dssdev) +{ + DSSDBG("Enter omapdss_hdmi_core_disable\n"); + + mutex_lock(&hdmi.lock); + + hdmi_power_off_core(dssdev); + + mutex_unlock(&hdmi.lock); +} + +static int hdmi_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct omap_overlay_manager *mgr; + int r; + + r = hdmi_init_regulator(); + if (r) + return r; + + mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); + if (!mgr) + return -ENODEV; + + r = dss_mgr_connect(mgr, dssdev); + if (r) + return r; + + r = omapdss_output_set_device(dssdev, dst); + if (r) { + DSSERR("failed to connect output to new device: %s\n", + dst->name); + dss_mgr_disconnect(mgr, dssdev); + return r; + } + + return 0; +} + +static void hdmi_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + WARN_ON(dst != dssdev->dst); + + if (dst != dssdev->dst) + return; + + omapdss_output_unset_device(dssdev); + + if (dssdev->manager) + dss_mgr_disconnect(dssdev->manager, dssdev); +} + +static int hdmi_read_edid(struct omap_dss_device *dssdev, + u8 *edid, int len) +{ + bool need_enable; + int r; + + need_enable = hdmi.core_enabled == false; + + if (need_enable) { + r = hdmi_core_enable(dssdev); + if (r) + return r; + } + + r = read_edid(edid, len); + + if (need_enable) + hdmi_core_disable(dssdev); + + return r; +} + +static int hdmi_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + hdmi.cfg.infoframe = *avi; + return 0; +} + +static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, + bool hdmi_mode) +{ + hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI; + return 0; +} + +static const struct omapdss_hdmi_ops hdmi_ops = { + .connect = hdmi_connect, + .disconnect = hdmi_disconnect, + + .enable = hdmi_display_enable, + .disable = hdmi_display_disable, + + .check_timings = hdmi_display_check_timing, + .set_timings = hdmi_display_set_timing, + .get_timings = hdmi_display_get_timings, + + .read_edid = hdmi_read_edid, + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, +}; + +static void hdmi_init_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &hdmi.output; + + out->dev = &pdev->dev; + out->id = OMAP_DSS_OUTPUT_HDMI; + out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->name = "hdmi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; + out->ops.hdmi = &hdmi_ops; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void hdmi_uninit_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &hdmi.output; + + omapdss_unregister_output(out); +} + +static int hdmi_probe_of(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *ep; + int r; + + ep = omapdss_of_get_first_endpoint(node); + if (!ep) + return 0; + + r = hdmi_parse_lanes_of(pdev, ep, &hdmi.phy); + if (r) + goto err; + + of_node_put(ep); + return 0; + +err: + of_node_put(ep); + return r; +} + +/* Audio callbacks */ +static int hdmi_audio_startup(struct device *dev, + void (*abort_cb)(struct device *dev)) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + int ret = 0; + + mutex_lock(&hd->lock); + + if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { + ret = -EPERM; + goto out; + } + + hd->audio_abort_cb = abort_cb; + +out: + mutex_unlock(&hd->lock); + + return ret; +} + +static int hdmi_audio_shutdown(struct device *dev) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + + mutex_lock(&hd->lock); + hd->audio_abort_cb = NULL; + hd->audio_configured = false; + hd->audio_playing = false; + mutex_unlock(&hd->lock); + + return 0; +} + +static int hdmi_audio_start(struct device *dev) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + unsigned long flags; + + WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); + + spin_lock_irqsave(&hd->audio_playing_lock, flags); + + if (hd->display_enabled) + hdmi_start_audio_stream(hd); + hd->audio_playing = true; + + spin_unlock_irqrestore(&hd->audio_playing_lock, flags); + return 0; +} + +static void hdmi_audio_stop(struct device *dev) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + unsigned long flags; + + WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); + + spin_lock_irqsave(&hd->audio_playing_lock, flags); + + if (hd->display_enabled) + hdmi_stop_audio_stream(hd); + hd->audio_playing = false; + + spin_unlock_irqrestore(&hd->audio_playing_lock, flags); +} + +static int hdmi_audio_config(struct device *dev, + struct omap_dss_audio *dss_audio) +{ + struct omap_hdmi *hd = dev_get_drvdata(dev); + int ret; + + mutex_lock(&hd->lock); + + if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { + ret = -EPERM; + goto out; + } + + ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio, + hd->cfg.timings.pixelclock); + + if (!ret) { + hd->audio_configured = true; + hd->audio_config = *dss_audio; + } +out: + mutex_unlock(&hd->lock); + + return ret; +} + +static const struct omap_hdmi_audio_ops hdmi_audio_ops = { + .audio_startup = hdmi_audio_startup, + .audio_shutdown = hdmi_audio_shutdown, + .audio_start = hdmi_audio_start, + .audio_stop = hdmi_audio_stop, + .audio_config = hdmi_audio_config, +}; + +static int hdmi_audio_register(struct device *dev) +{ + struct omap_hdmi_audio_pdata pdata = { + .dev = dev, + .dss_version = omapdss_get_version(), + .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp), + .ops = &hdmi_audio_ops, + }; + + hdmi.audio_pdev = platform_device_register_data( + dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO, + &pdata, sizeof(pdata)); + + if (IS_ERR(hdmi.audio_pdev)) + return PTR_ERR(hdmi.audio_pdev); + + hdmi_runtime_get(); + hdmi.wp_idlemode = + REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2); + hdmi_runtime_put(); + + return 0; +} + +/* HDMI HW IP initialisation */ +static int hdmi5_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + int r; + int irq; + + hdmi.pdev = pdev; + dev_set_drvdata(&pdev->dev, &hdmi); + + mutex_init(&hdmi.lock); + spin_lock_init(&hdmi.audio_playing_lock); + + if (pdev->dev.of_node) { + r = hdmi_probe_of(pdev); + if (r) + return r; + } + + r = hdmi_wp_init(pdev, &hdmi.wp); + if (r) + return r; + + r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp); + if (r) + return r; + + r = hdmi_phy_init(pdev, &hdmi.phy); + if (r) + goto err; + + r = hdmi5_core_init(pdev, &hdmi.core); + if (r) + goto err; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + DSSERR("platform_get_irq failed\n"); + r = -ENODEV; + goto err; + } + + r = devm_request_threaded_irq(&pdev->dev, irq, + NULL, hdmi_irq_handler, + IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp); + if (r) { + DSSERR("HDMI IRQ request failed\n"); + goto err; + } + + pm_runtime_enable(&pdev->dev); + + hdmi_init_output(pdev); + + r = hdmi_audio_register(&pdev->dev); + if (r) { + DSSERR("Registering HDMI audio failed %d\n", r); + hdmi_uninit_output(pdev); + pm_runtime_disable(&pdev->dev); + return r; + } + + dss_debugfs_create_file("hdmi", hdmi_dump_regs); + + return 0; +err: + hdmi_pll_uninit(&hdmi.pll); + return r; +} + +static void hdmi5_unbind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + if (hdmi.audio_pdev) + platform_device_unregister(hdmi.audio_pdev); + + hdmi_uninit_output(pdev); + + hdmi_pll_uninit(&hdmi.pll); + + pm_runtime_disable(&pdev->dev); +} + +static const struct component_ops hdmi5_component_ops = { + .bind = hdmi5_bind, + .unbind = hdmi5_unbind, +}; + +static int hdmi5_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &hdmi5_component_ops); +} + +static int hdmi5_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &hdmi5_component_ops); + return 0; +} + +static int hdmi_runtime_suspend(struct device *dev) +{ + dispc_runtime_put(); + + return 0; +} + +static int hdmi_runtime_resume(struct device *dev) +{ + int r; + + r = dispc_runtime_get(); + if (r < 0) + return r; + + return 0; +} + +static const struct dev_pm_ops hdmi_pm_ops = { + .runtime_suspend = hdmi_runtime_suspend, + .runtime_resume = hdmi_runtime_resume, +}; + +static const struct of_device_id hdmi_of_match[] = { + { .compatible = "ti,omap5-hdmi", }, + { .compatible = "ti,dra7-hdmi", }, + {}, +}; + +static struct platform_driver omapdss_hdmihw_driver = { + .probe = hdmi5_probe, + .remove = hdmi5_remove, + .driver = { + .name = "omapdss_hdmi5", + .pm = &hdmi_pm_ops, + .of_match_table = hdmi_of_match, + .suppress_bind_attrs = true, + }, +}; + +int __init hdmi5_init_platform_driver(void) +{ + return platform_driver_register(&omapdss_hdmihw_driver); +} + +void hdmi5_uninit_platform_driver(void) +{ + platform_driver_unregister(&omapdss_hdmihw_driver); +} diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c new file mode 100644 index 000000000000..8ea531d2652c --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -0,0 +1,916 @@ +/* + * OMAP5 HDMI CORE IP driver library + * + * Copyright (C) 2014 Texas Instruments Incorporated + * + * Authors: + * Yong Zhi + * Mythri pk + * Archit Taneja <archit@ti.com> + * Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/seq_file.h> +#include <drm/drm_edid.h> +#include <sound/asound.h> +#include <sound/asoundef.h> + +#include "hdmi5_core.h" + +/* only 24 bit color depth used for now */ +static const struct csc_table csc_table_deepcolor[] = { + /* HDMI_DEEP_COLOR_24BIT */ + [0] = { 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32, }, + /* HDMI_DEEP_COLOR_30BIT */ + [1] = { 7015, 0, 0, 128, 0, 7015, 0, 128, 0, 0, 7015, 128, }, + /* HDMI_DEEP_COLOR_36BIT */ + [2] = { 7010, 0, 0, 512, 0, 7010, 0, 512, 0, 0, 7010, 512, }, + /* FULL RANGE */ + [3] = { 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0, }, +}; + +static void hdmi_core_ddc_init(struct hdmi_core_data *core) +{ + void __iomem *base = core->base; + const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ + const unsigned ss_scl_high = 4000; /* ns */ + const unsigned ss_scl_low = 4700; /* ns */ + const unsigned fs_scl_high = 600; /* ns */ + const unsigned fs_scl_low = 1300; /* ns */ + const unsigned sda_hold = 1000; /* ns */ + const unsigned sfr_div = 10; + unsigned long long sfr; + unsigned v; + + sfr = iclk / sfr_div; /* SFR_DIV */ + sfr /= 1000; /* SFR clock in kHz */ + + /* Reset */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_SOFTRSTZ, 0, 0, 0); + if (hdmi_wait_for_bit_change(base, HDMI_CORE_I2CM_SOFTRSTZ, + 0, 0, 1) != 1) + DSSERR("HDMI I2CM reset failed\n"); + + /* Standard (0) or Fast (1) Mode */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_DIV, 0, 3, 3); + + /* Standard Mode SCL High counter */ + v = DIV_ROUND_UP_ULL(ss_scl_high * sfr, 1000000); + REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_HCNT_1_ADDR, + (v >> 8) & 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_HCNT_0_ADDR, + v & 0xff, 7, 0); + + /* Standard Mode SCL Low counter */ + v = DIV_ROUND_UP_ULL(ss_scl_low * sfr, 1000000); + REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_LCNT_1_ADDR, + (v >> 8) & 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_I2CM_SS_SCL_LCNT_0_ADDR, + v & 0xff, 7, 0); + + /* Fast Mode SCL High Counter */ + v = DIV_ROUND_UP_ULL(fs_scl_high * sfr, 1000000); + REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_HCNT_1_ADDR, + (v >> 8) & 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_HCNT_0_ADDR, + v & 0xff, 7, 0); + + /* Fast Mode SCL Low Counter */ + v = DIV_ROUND_UP_ULL(fs_scl_low * sfr, 1000000); + REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_LCNT_1_ADDR, + (v >> 8) & 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_I2CM_FS_SCL_LCNT_0_ADDR, + v & 0xff, 7, 0); + + /* SDA Hold Time */ + v = DIV_ROUND_UP_ULL(sda_hold * sfr, 1000000); + REG_FLD_MOD(base, HDMI_CORE_I2CM_SDA_HOLD_ADDR, v & 0xff, 7, 0); + + REG_FLD_MOD(base, HDMI_CORE_I2CM_SLAVE, 0x50, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGADDR, 0x30, 6, 0); + + /* NACK_POL to high */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 7, 7); + + /* NACK_MASK to unmasked */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x0, 6, 6); + + /* ARBITRATION_POL to high */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 3, 3); + + /* ARBITRATION_MASK to unmasked */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x0, 2, 2); + + /* DONE_POL to high */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 3, 3); + + /* DONE_MASK to unmasked */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x0, 2, 2); +} + +static void hdmi_core_ddc_uninit(struct hdmi_core_data *core) +{ + void __iomem *base = core->base; + + /* Mask I2C interrupts */ + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 6, 6); + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 2, 2); + REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2); +} + +static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext) +{ + void __iomem *base = core->base; + u8 cur_addr; + char checksum = 0; + const int retries = 1000; + u8 seg_ptr = ext / 2; + u8 edidbase = ((ext % 2) * 0x80); + + REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGPTR, seg_ptr, 7, 0); + + /* + * TODO: We use polling here, although we probably should use proper + * interrupts. + */ + for (cur_addr = 0; cur_addr < 128; ++cur_addr) { + int i; + + /* clear ERROR and DONE */ + REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0x3, 1, 0); + + REG_FLD_MOD(base, HDMI_CORE_I2CM_ADDRESS, + edidbase + cur_addr, 7, 0); + + if (seg_ptr) + REG_FLD_MOD(base, HDMI_CORE_I2CM_OPERATION, 1, 1, 1); + else + REG_FLD_MOD(base, HDMI_CORE_I2CM_OPERATION, 1, 0, 0); + + for (i = 0; i < retries; ++i) { + u32 stat; + + stat = REG_GET(base, HDMI_CORE_IH_I2CM_STAT0, 1, 0); + + /* I2CM_ERROR */ + if (stat & 1) { + DSSERR("HDMI I2C Master Error\n"); + return -EIO; + } + + /* I2CM_DONE */ + if (stat & (1 << 1)) + break; + + usleep_range(250, 1000); + } + + if (i == retries) { + DSSERR("HDMI I2C timeout reading EDID\n"); + return -EIO; + } + + pedid[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0); + checksum += pedid[cur_addr]; + } + + return 0; + +} + +int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len) +{ + int r, n, i; + int max_ext_blocks = (len / 128) - 1; + + if (len < 128) + return -EINVAL; + + hdmi_core_ddc_init(core); + + r = hdmi_core_ddc_edid(core, edid, 0); + if (r) + goto out; + + n = edid[0x7e]; + + if (n > max_ext_blocks) + n = max_ext_blocks; + + for (i = 1; i <= n; i++) { + r = hdmi_core_ddc_edid(core, edid + i * EDID_LENGTH, i); + if (r) + goto out; + } + +out: + hdmi_core_ddc_uninit(core); + + return r ? r : len; +} + +void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s) +{ + +#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\ + hdmi_read_reg(core->base, r)) + + DUMPCORE(HDMI_CORE_FC_INVIDCONF); + DUMPCORE(HDMI_CORE_FC_INHACTIV0); + DUMPCORE(HDMI_CORE_FC_INHACTIV1); + DUMPCORE(HDMI_CORE_FC_INHBLANK0); + DUMPCORE(HDMI_CORE_FC_INHBLANK1); + DUMPCORE(HDMI_CORE_FC_INVACTIV0); + DUMPCORE(HDMI_CORE_FC_INVACTIV1); + DUMPCORE(HDMI_CORE_FC_INVBLANK); + DUMPCORE(HDMI_CORE_FC_HSYNCINDELAY0); + DUMPCORE(HDMI_CORE_FC_HSYNCINDELAY1); + DUMPCORE(HDMI_CORE_FC_HSYNCINWIDTH0); + DUMPCORE(HDMI_CORE_FC_HSYNCINWIDTH1); + DUMPCORE(HDMI_CORE_FC_VSYNCINDELAY); + DUMPCORE(HDMI_CORE_FC_VSYNCINWIDTH); + DUMPCORE(HDMI_CORE_FC_CTRLDUR); + DUMPCORE(HDMI_CORE_FC_EXCTRLDUR); + DUMPCORE(HDMI_CORE_FC_EXCTRLSPAC); + DUMPCORE(HDMI_CORE_FC_CH0PREAM); + DUMPCORE(HDMI_CORE_FC_CH1PREAM); + DUMPCORE(HDMI_CORE_FC_CH2PREAM); + DUMPCORE(HDMI_CORE_FC_AVICONF0); + DUMPCORE(HDMI_CORE_FC_AVICONF1); + DUMPCORE(HDMI_CORE_FC_AVICONF2); + DUMPCORE(HDMI_CORE_FC_AVIVID); + DUMPCORE(HDMI_CORE_FC_PRCONF); + + DUMPCORE(HDMI_CORE_MC_CLKDIS); + DUMPCORE(HDMI_CORE_MC_SWRSTZREQ); + DUMPCORE(HDMI_CORE_MC_FLOWCTRL); + DUMPCORE(HDMI_CORE_MC_PHYRSTZ); + DUMPCORE(HDMI_CORE_MC_LOCKONCLOCK); + + DUMPCORE(HDMI_CORE_I2CM_SLAVE); + DUMPCORE(HDMI_CORE_I2CM_ADDRESS); + DUMPCORE(HDMI_CORE_I2CM_DATAO); + DUMPCORE(HDMI_CORE_I2CM_DATAI); + DUMPCORE(HDMI_CORE_I2CM_OPERATION); + DUMPCORE(HDMI_CORE_I2CM_INT); + DUMPCORE(HDMI_CORE_I2CM_CTLINT); + DUMPCORE(HDMI_CORE_I2CM_DIV); + DUMPCORE(HDMI_CORE_I2CM_SEGADDR); + DUMPCORE(HDMI_CORE_I2CM_SOFTRSTZ); + DUMPCORE(HDMI_CORE_I2CM_SEGPTR); + DUMPCORE(HDMI_CORE_I2CM_SS_SCL_HCNT_1_ADDR); + DUMPCORE(HDMI_CORE_I2CM_SS_SCL_HCNT_0_ADDR); + DUMPCORE(HDMI_CORE_I2CM_SS_SCL_LCNT_1_ADDR); + DUMPCORE(HDMI_CORE_I2CM_SS_SCL_LCNT_0_ADDR); + DUMPCORE(HDMI_CORE_I2CM_FS_SCL_HCNT_1_ADDR); + DUMPCORE(HDMI_CORE_I2CM_FS_SCL_HCNT_0_ADDR); + DUMPCORE(HDMI_CORE_I2CM_FS_SCL_LCNT_1_ADDR); + DUMPCORE(HDMI_CORE_I2CM_FS_SCL_LCNT_0_ADDR); + DUMPCORE(HDMI_CORE_I2CM_SDA_HOLD_ADDR); +} + +static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg, + struct hdmi_config *cfg) +{ + DSSDBG("hdmi_core_init\n"); + + /* video core */ + video_cfg->data_enable_pol = 1; /* It is always 1*/ + video_cfg->v_fc_config.timings.hsync_level = cfg->timings.hsync_level; + video_cfg->v_fc_config.timings.x_res = cfg->timings.x_res; + video_cfg->v_fc_config.timings.hsw = cfg->timings.hsw - 1; + video_cfg->v_fc_config.timings.hbp = cfg->timings.hbp; + video_cfg->v_fc_config.timings.hfp = cfg->timings.hfp; + video_cfg->hblank = cfg->timings.hfp + + cfg->timings.hbp + cfg->timings.hsw - 1; + video_cfg->v_fc_config.timings.vsync_level = cfg->timings.vsync_level; + video_cfg->v_fc_config.timings.y_res = cfg->timings.y_res; + video_cfg->v_fc_config.timings.vsw = cfg->timings.vsw; + video_cfg->v_fc_config.timings.vfp = cfg->timings.vfp; + video_cfg->v_fc_config.timings.vbp = cfg->timings.vbp; + video_cfg->vblank_osc = 0; /* Always 0 - need to confirm */ + video_cfg->vblank = cfg->timings.vsw + + cfg->timings.vfp + cfg->timings.vbp; + video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode; + video_cfg->v_fc_config.timings.interlace = cfg->timings.interlace; +} + +/* DSS_HDMI_CORE_VIDEO_CONFIG */ +static void hdmi_core_video_config(struct hdmi_core_data *core, + struct hdmi_core_vid_config *cfg) +{ + void __iomem *base = core->base; + unsigned char r = 0; + bool vsync_pol, hsync_pol; + + vsync_pol = + cfg->v_fc_config.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + hsync_pol = + cfg->v_fc_config.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + + /* Set hsync, vsync and data-enable polarity */ + r = hdmi_read_reg(base, HDMI_CORE_FC_INVIDCONF); + r = FLD_MOD(r, vsync_pol, 6, 6); + r = FLD_MOD(r, hsync_pol, 5, 5); + r = FLD_MOD(r, cfg->data_enable_pol, 4, 4); + r = FLD_MOD(r, cfg->vblank_osc, 1, 1); + r = FLD_MOD(r, cfg->v_fc_config.timings.interlace, 0, 0); + hdmi_write_reg(base, HDMI_CORE_FC_INVIDCONF, r); + + /* set x resolution */ + REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV1, + cfg->v_fc_config.timings.x_res >> 8, 4, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_INHACTIV0, + cfg->v_fc_config.timings.x_res & 0xFF, 7, 0); + + /* set y resolution */ + REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV1, + cfg->v_fc_config.timings.y_res >> 8, 4, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_INVACTIV0, + cfg->v_fc_config.timings.y_res & 0xFF, 7, 0); + + /* set horizontal blanking pixels */ + REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK1, cfg->hblank >> 8, 4, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_INHBLANK0, cfg->hblank & 0xFF, 7, 0); + + /* set vertial blanking pixels */ + REG_FLD_MOD(base, HDMI_CORE_FC_INVBLANK, cfg->vblank, 7, 0); + + /* set horizontal sync offset */ + REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY1, + cfg->v_fc_config.timings.hfp >> 8, 4, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINDELAY0, + cfg->v_fc_config.timings.hfp & 0xFF, 7, 0); + + /* set vertical sync offset */ + REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINDELAY, + cfg->v_fc_config.timings.vfp, 7, 0); + + /* set horizontal sync pulse width */ + REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH1, + (cfg->v_fc_config.timings.hsw >> 8), 1, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_HSYNCINWIDTH0, + cfg->v_fc_config.timings.hsw & 0xFF, 7, 0); + + /* set vertical sync pulse width */ + REG_FLD_MOD(base, HDMI_CORE_FC_VSYNCINWIDTH, + cfg->v_fc_config.timings.vsw, 5, 0); + + /* select DVI mode */ + REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF, + cfg->v_fc_config.hdmi_dvi_mode, 3, 3); +} + +static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core) +{ + void __iomem *base = core->base; + int clr_depth = 0; /* 24 bit color depth */ + + /* COLOR_DEPTH */ + REG_FLD_MOD(base, HDMI_CORE_VP_PR_CD, clr_depth, 7, 4); + /* BYPASS_EN */ + REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 1, 6, 6); + /* PP_EN */ + REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 1 : 0, 5, 5); + /* YCC422_EN */ + REG_FLD_MOD(base, HDMI_CORE_VP_CONF, 0, 3, 3); + /* PP_STUFFING */ + REG_FLD_MOD(base, HDMI_CORE_VP_STUFF, clr_depth ? 1 : 0, 1, 1); + /* YCC422_STUFFING */ + REG_FLD_MOD(base, HDMI_CORE_VP_STUFF, 1, 2, 2); + /* OUTPUT_SELECTOR */ + REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 2, 1, 0); +} + +static void hdmi_core_config_csc(struct hdmi_core_data *core) +{ + int clr_depth = 0; /* 24 bit color depth */ + + /* CSC_COLORDEPTH */ + REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, clr_depth, 7, 4); +} + +static void hdmi_core_config_video_sampler(struct hdmi_core_data *core) +{ + int video_mapping = 1; /* for 24 bit color depth */ + + /* VIDEO_MAPPING */ + REG_FLD_MOD(core->base, HDMI_CORE_TX_INVID0, video_mapping, 4, 0); +} + +static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, + struct hdmi_avi_infoframe *frame) +{ + void __iomem *base = core->base; + u8 data[HDMI_INFOFRAME_SIZE(AVI)]; + u8 *ptr; + unsigned y, a, b, s; + unsigned c, m, r; + unsigned itc, ec, q, sc; + unsigned vic; + unsigned yq, cn, pr; + + hdmi_avi_infoframe_pack(frame, data, sizeof(data)); + + print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data, + HDMI_INFOFRAME_SIZE(AVI), false); + + ptr = data + HDMI_INFOFRAME_HEADER_SIZE; + + y = (ptr[0] >> 5) & 0x3; + a = (ptr[0] >> 4) & 0x1; + b = (ptr[0] >> 2) & 0x3; + s = (ptr[0] >> 0) & 0x3; + + c = (ptr[1] >> 6) & 0x3; + m = (ptr[1] >> 4) & 0x3; + r = (ptr[1] >> 0) & 0x3; + + itc = (ptr[2] >> 7) & 0x1; + ec = (ptr[2] >> 4) & 0x7; + q = (ptr[2] >> 2) & 0x3; + sc = (ptr[2] >> 0) & 0x3; + + vic = ptr[3]; + + yq = (ptr[4] >> 6) & 0x3; + cn = (ptr[4] >> 4) & 0x3; + pr = (ptr[4] >> 0) & 0xf; + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF0, + (a << 6) | (s << 4) | (b << 2) | (y << 0)); + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF1, + (c << 6) | (m << 4) | (r << 0)); + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF2, + (itc << 7) | (ec << 4) | (q << 2) | (sc << 0)); + + hdmi_write_reg(base, HDMI_CORE_FC_AVIVID, vic); + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF3, + (yq << 2) | (cn << 0)); + + REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0); +} + +static void hdmi_core_csc_config(struct hdmi_core_data *core, + struct csc_table csc_coeff) +{ + void __iomem *base = core->base; + + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff.a1 >> 8 , 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff.a1, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff.a2 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff.a2, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff.a3 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff.a3, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff.a4 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff.a4, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff.b1 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff.b1, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff.b2 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff.b2, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff.b3 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff.b3, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff.b4 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff.b4, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff.c1 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff.c1, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff.c2 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff.c2, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff.c3 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff.c3, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff.c4 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff.c4, 7, 0); + + REG_FLD_MOD(base, HDMI_CORE_MC_FLOWCTRL, 0x1, 0, 0); +} + +static void hdmi_core_configure_range(struct hdmi_core_data *core) +{ + struct csc_table csc_coeff = { 0 }; + + /* support limited range with 24 bit color depth for now */ + csc_coeff = csc_table_deepcolor[0]; + + hdmi_core_csc_config(core, csc_coeff); +} + +static void hdmi_core_enable_video_path(struct hdmi_core_data *core) +{ + void __iomem *base = core->base; + + DSSDBG("hdmi_core_enable_video_path\n"); + + REG_FLD_MOD(base, HDMI_CORE_FC_CTRLDUR, 0x0C, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_EXCTRLDUR, 0x20, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_EXCTRLSPAC, 0x01, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_CH0PREAM, 0x0B, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_CH1PREAM, 0x16, 5, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_CH2PREAM, 0x21, 5, 0); + REG_FLD_MOD(base, HDMI_CORE_MC_CLKDIS, 0x00, 0, 0); + REG_FLD_MOD(base, HDMI_CORE_MC_CLKDIS, 0x00, 1, 1); +} + +static void hdmi_core_mask_interrupts(struct hdmi_core_data *core) +{ + void __iomem *base = core->base; + + /* Master IRQ mask */ + REG_FLD_MOD(base, HDMI_CORE_IH_MUTE, 0x3, 1, 0); + + /* Mask all the interrupts in HDMI core */ + + REG_FLD_MOD(base, HDMI_CORE_VP_MASK, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_MASK0, 0xe7, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_MASK1, 0xfb, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_MASK2, 0x3, 1, 0); + + REG_FLD_MOD(base, HDMI_CORE_AUD_INT, 0x3, 3, 2); + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_MASK, 0x3, 1, 0); + + REG_FLD_MOD(base, HDMI_CORE_CEC_MASK, 0x7f, 6, 0); + + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 6, 6); + REG_FLD_MOD(base, HDMI_CORE_I2CM_CTLINT, 0x1, 2, 2); + REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2); + + REG_FLD_MOD(base, HDMI_CORE_PHY_MASK0, 0xf3, 7, 0); + + REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0); + + /* Clear all the current interrupt bits */ + + REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xe7, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xfb, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0x3, 1, 0); + + REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0x7, 2, 0); + + REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0x7f, 6, 0); + + REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0x3, 1, 0); + + REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0); +} + +static void hdmi_core_enable_interrupts(struct hdmi_core_data *core) +{ + /* Unmute interrupts */ + REG_FLD_MOD(core->base, HDMI_CORE_IH_MUTE, 0x0, 1, 0); +} + +int hdmi5_core_handle_irqs(struct hdmi_core_data *core) +{ + void __iomem *base = core->base; + + REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT0, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT1, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_FC_STAT2, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_AS_STAT0, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_PHY_STAT0, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_I2CM_STAT0, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_CEC_STAT0, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_VP_STAT0, 0xff, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_IH_I2CMPHY_STAT0, 0xff, 7, 0); + + return 0; +} + +void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, + struct hdmi_config *cfg) +{ + struct omap_video_timings video_timing; + struct hdmi_video_format video_format; + struct hdmi_core_vid_config v_core_cfg; + + hdmi_core_mask_interrupts(core); + + hdmi_core_init(&v_core_cfg, cfg); + + hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg); + + hdmi_wp_video_config_timing(wp, &video_timing); + + /* video config */ + video_format.packing_mode = HDMI_PACK_24b_RGB_YUV444_YUV422; + + hdmi_wp_video_config_format(wp, &video_format); + + hdmi_wp_video_config_interface(wp, &video_timing); + + /* support limited range with 24 bit color depth for now */ + hdmi_core_configure_range(core); + cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; + + /* + * configure core video part, set software reset in the core + */ + v_core_cfg.packet_mode = HDMI_PACKETMODE24BITPERPIXEL; + + hdmi_core_video_config(core, &v_core_cfg); + + hdmi_core_config_video_packetizer(core); + hdmi_core_config_csc(core); + hdmi_core_config_video_sampler(core); + + if (cfg->hdmi_dvi_mode == HDMI_HDMI) + hdmi_core_write_avi_infoframe(core, &cfg->infoframe); + + hdmi_core_enable_video_path(core); + + hdmi_core_enable_interrupts(core); +} + +static void hdmi5_core_audio_config(struct hdmi_core_data *core, + struct hdmi_core_audio_config *cfg) +{ + void __iomem *base = core->base; + u8 val; + + /* Mute audio before configuring */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 0xf, 7, 4); + + /* Set the N parameter */ + REG_FLD_MOD(base, HDMI_CORE_AUD_N1, cfg->n, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_AUD_N2, cfg->n >> 8, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_AUD_N3, cfg->n >> 16, 3, 0); + + /* + * CTS manual mode. Automatic mode is not supported when using audio + * parallel interface. + */ + REG_FLD_MOD(base, HDMI_CORE_AUD_CTS3, 1, 4, 4); + REG_FLD_MOD(base, HDMI_CORE_AUD_CTS1, cfg->cts, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_AUD_CTS2, cfg->cts >> 8, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_AUD_CTS3, cfg->cts >> 16, 3, 0); + + /* Layout of Audio Sample Packets: 2-channel or multichannels */ + if (cfg->layout == HDMI_AUDIO_LAYOUT_2CH) + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 0, 0, 0); + else + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 1, 0, 0); + + /* Configure IEC-609580 Validity bits */ + /* Channel 0 is valid */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, 0, 0, 0); + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, 0, 4, 4); + + if (cfg->layout == HDMI_AUDIO_LAYOUT_2CH) + val = 1; + else + val = 0; + + /* Channels 1, 2 setting */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 1, 1); + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 5, 5); + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 2, 2); + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 6, 6); + /* Channel 3 setting */ + if (cfg->layout == HDMI_AUDIO_LAYOUT_6CH) + val = 1; + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 3, 3); + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSV, val, 7, 7); + + /* Configure IEC-60958 User bits */ + /* TODO: should be set by user. */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSU, 0, 7, 0); + + /* Configure IEC-60958 Channel Status word */ + /* CGMSA */ + val = cfg->iec60958_cfg->status[5] & IEC958_AES5_CON_CGMSA; + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(0), val, 5, 4); + + /* Copyright */ + val = (cfg->iec60958_cfg->status[0] & + IEC958_AES0_CON_NOT_COPYRIGHT) >> 2; + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(0), val, 0, 0); + + /* Category */ + hdmi_write_reg(base, HDMI_CORE_FC_AUDSCHNLS(1), + cfg->iec60958_cfg->status[1]); + + /* PCM audio mode */ + val = (cfg->iec60958_cfg->status[0] & IEC958_AES0_CON_MODE) >> 6; + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 6, 4); + + /* Source number */ + val = cfg->iec60958_cfg->status[2] & IEC958_AES2_CON_SOURCE; + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(2), val, 3, 0); + + /* Channel number right 0 */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(3), 2, 3, 0); + /* Channel number right 1*/ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(3), 4, 7, 4); + /* Channel number right 2 */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(4), 6, 3, 0); + /* Channel number right 3*/ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(4), 8, 7, 4); + /* Channel number left 0 */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(5), 1, 3, 0); + /* Channel number left 1*/ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(5), 3, 7, 4); + /* Channel number left 2 */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(6), 5, 3, 0); + /* Channel number left 3*/ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCHNLS(6), 7, 7, 4); + + /* Clock accuracy and sample rate */ + hdmi_write_reg(base, HDMI_CORE_FC_AUDSCHNLS(7), + cfg->iec60958_cfg->status[3]); + + /* Original sample rate and word length */ + hdmi_write_reg(base, HDMI_CORE_FC_AUDSCHNLS(8), + cfg->iec60958_cfg->status[4]); + + /* Enable FIFO empty and full interrupts */ + REG_FLD_MOD(base, HDMI_CORE_AUD_INT, 3, 3, 2); + + /* Configure GPA */ + /* select HBR/SPDIF interfaces */ + if (cfg->layout == HDMI_AUDIO_LAYOUT_2CH) { + /* select HBR/SPDIF interfaces */ + REG_FLD_MOD(base, HDMI_CORE_AUD_CONF0, 0, 5, 5); + /* enable two channels in GPA */ + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF1, 3, 7, 0); + } else if (cfg->layout == HDMI_AUDIO_LAYOUT_6CH) { + /* select HBR/SPDIF interfaces */ + REG_FLD_MOD(base, HDMI_CORE_AUD_CONF0, 0, 5, 5); + /* enable six channels in GPA */ + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF1, 0x3F, 7, 0); + } else { + /* select HBR/SPDIF interfaces */ + REG_FLD_MOD(base, HDMI_CORE_AUD_CONF0, 0, 5, 5); + /* enable eight channels in GPA */ + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF1, 0xFF, 7, 0); + } + + /* disable HBR */ + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF2, 0, 0, 0); + /* enable PCUV */ + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_CONF2, 1, 1, 1); + /* enable GPA FIFO full and empty mask */ + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_MASK, 3, 1, 0); + /* set polarity of GPA FIFO empty interrupts */ + REG_FLD_MOD(base, HDMI_CORE_AUD_GP_POL, 1, 0, 0); + + /* unmute audio */ + REG_FLD_MOD(base, HDMI_CORE_FC_AUDSCONF, 0, 7, 4); +} + +static void hdmi5_core_audio_infoframe_cfg(struct hdmi_core_data *core, + struct snd_cea_861_aud_if *info_aud) +{ + void __iomem *base = core->base; + + /* channel count and coding type fields in AUDICONF0 are swapped */ + hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF0, + (info_aud->db1_ct_cc & CEA861_AUDIO_INFOFRAME_DB1CC) << 4 | + (info_aud->db1_ct_cc & CEA861_AUDIO_INFOFRAME_DB1CT) >> 4); + + hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF1, info_aud->db2_sf_ss); + hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF2, info_aud->db4_ca); + hdmi_write_reg(base, HDMI_CORE_FC_AUDICONF3, + (info_aud->db5_dminh_lsv & CEA861_AUDIO_INFOFRAME_DB5_DM_INH) >> 3 | + (info_aud->db5_dminh_lsv & CEA861_AUDIO_INFOFRAME_DB5_LSV)); +} + +int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, + struct omap_dss_audio *audio, u32 pclk) +{ + struct hdmi_audio_format audio_format; + struct hdmi_audio_dma audio_dma; + struct hdmi_core_audio_config core_cfg; + int err, n, cts, channel_count; + unsigned int fs_nr; + bool word_length_16b = false; + + if (!audio || !audio->iec || !audio->cea || !core) + return -EINVAL; + + core_cfg.iec60958_cfg = audio->iec; + + if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24) && + (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)) + word_length_16b = true; + + /* only 16-bit word length supported atm */ + if (!word_length_16b) + return -EINVAL; + + switch (audio->iec->status[3] & IEC958_AES3_CON_FS) { + case IEC958_AES3_CON_FS_32000: + fs_nr = 32000; + break; + case IEC958_AES3_CON_FS_44100: + fs_nr = 44100; + break; + case IEC958_AES3_CON_FS_48000: + fs_nr = 48000; + break; + case IEC958_AES3_CON_FS_88200: + fs_nr = 88200; + break; + case IEC958_AES3_CON_FS_96000: + fs_nr = 96000; + break; + case IEC958_AES3_CON_FS_176400: + fs_nr = 176400; + break; + case IEC958_AES3_CON_FS_192000: + fs_nr = 192000; + break; + default: + return -EINVAL; + } + + err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); + core_cfg.n = n; + core_cfg.cts = cts; + + /* Audio channels settings */ + channel_count = (audio->cea->db1_ct_cc & CEA861_AUDIO_INFOFRAME_DB1CC) + + 1; + + if (channel_count == 2) + core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH; + else if (channel_count == 6) + core_cfg.layout = HDMI_AUDIO_LAYOUT_6CH; + else + core_cfg.layout = HDMI_AUDIO_LAYOUT_8CH; + + /* DMA settings */ + if (word_length_16b) + audio_dma.transfer_size = 0x10; + else + audio_dma.transfer_size = 0x20; + audio_dma.block_size = 0xC0; + audio_dma.mode = HDMI_AUDIO_TRANSF_DMA; + audio_dma.fifo_threshold = 0x20; /* in number of samples */ + + /* audio FIFO format settings for 16-bit samples*/ + audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES; + audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS; + audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT; + audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; + + /* only LPCM atm */ + audio_format.type = HDMI_AUDIO_TYPE_LPCM; + + /* only allowed option */ + audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; + + /* disable start/stop signals of IEC 60958 blocks */ + audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON; + + /* configure DMA and audio FIFO format*/ + hdmi_wp_audio_config_dma(wp, &audio_dma); + hdmi_wp_audio_config_format(wp, &audio_format); + + /* configure the core */ + hdmi5_core_audio_config(core, &core_cfg); + + /* configure CEA 861 audio infoframe */ + hdmi5_core_audio_infoframe_cfg(core, audio->cea); + + return 0; +} + +int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core) +{ + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); + if (!res) { + DSSERR("can't get CORE IORESOURCE_MEM HDMI\n"); + return -EINVAL; + } + + core->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(core->base)) { + DSSERR("can't ioremap HDMI core\n"); + return PTR_ERR(core->base); + } + + return 0; +} diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h new file mode 100644 index 000000000000..f2f1022c5516 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h @@ -0,0 +1,304 @@ +/* + * HDMI driver definition for TI OMAP5 processors. + * + * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _HDMI5_CORE_H_ +#define _HDMI5_CORE_H_ + +#include "hdmi.h" + +/* HDMI IP Core System */ + +/* HDMI Identification */ +#define HDMI_CORE_DESIGN_ID 0x00000 +#define HDMI_CORE_REVISION_ID 0x00004 +#define HDMI_CORE_PRODUCT_ID0 0x00008 +#define HDMI_CORE_PRODUCT_ID1 0x0000C +#define HDMI_CORE_CONFIG0_ID 0x00010 +#define HDMI_CORE_CONFIG1_ID 0x00014 +#define HDMI_CORE_CONFIG2_ID 0x00018 +#define HDMI_CORE_CONFIG3_ID 0x0001C + +/* HDMI Interrupt */ +#define HDMI_CORE_IH_FC_STAT0 0x00400 +#define HDMI_CORE_IH_FC_STAT1 0x00404 +#define HDMI_CORE_IH_FC_STAT2 0x00408 +#define HDMI_CORE_IH_AS_STAT0 0x0040C +#define HDMI_CORE_IH_PHY_STAT0 0x00410 +#define HDMI_CORE_IH_I2CM_STAT0 0x00414 +#define HDMI_CORE_IH_CEC_STAT0 0x00418 +#define HDMI_CORE_IH_VP_STAT0 0x0041C +#define HDMI_CORE_IH_I2CMPHY_STAT0 0x00420 +#define HDMI_CORE_IH_MUTE 0x007FC + +/* HDMI Video Sampler */ +#define HDMI_CORE_TX_INVID0 0x00800 +#define HDMI_CORE_TX_INSTUFFING 0x00804 +#define HDMI_CORE_TX_RGYDATA0 0x00808 +#define HDMI_CORE_TX_RGYDATA1 0x0080C +#define HDMI_CORE_TX_RCRDATA0 0x00810 +#define HDMI_CORE_TX_RCRDATA1 0x00814 +#define HDMI_CORE_TX_BCBDATA0 0x00818 +#define HDMI_CORE_TX_BCBDATA1 0x0081C + +/* HDMI Video Packetizer */ +#define HDMI_CORE_VP_STATUS 0x02000 +#define HDMI_CORE_VP_PR_CD 0x02004 +#define HDMI_CORE_VP_STUFF 0x02008 +#define HDMI_CORE_VP_REMAP 0x0200C +#define HDMI_CORE_VP_CONF 0x02010 +#define HDMI_CORE_VP_STAT 0x02014 +#define HDMI_CORE_VP_INT 0x02018 +#define HDMI_CORE_VP_MASK 0x0201C +#define HDMI_CORE_VP_POL 0x02020 + +/* Frame Composer */ +#define HDMI_CORE_FC_INVIDCONF 0x04000 +#define HDMI_CORE_FC_INHACTIV0 0x04004 +#define HDMI_CORE_FC_INHACTIV1 0x04008 +#define HDMI_CORE_FC_INHBLANK0 0x0400C +#define HDMI_CORE_FC_INHBLANK1 0x04010 +#define HDMI_CORE_FC_INVACTIV0 0x04014 +#define HDMI_CORE_FC_INVACTIV1 0x04018 +#define HDMI_CORE_FC_INVBLANK 0x0401C +#define HDMI_CORE_FC_HSYNCINDELAY0 0x04020 +#define HDMI_CORE_FC_HSYNCINDELAY1 0x04024 +#define HDMI_CORE_FC_HSYNCINWIDTH0 0x04028 +#define HDMI_CORE_FC_HSYNCINWIDTH1 0x0402C +#define HDMI_CORE_FC_VSYNCINDELAY 0x04030 +#define HDMI_CORE_FC_VSYNCINWIDTH 0x04034 +#define HDMI_CORE_FC_INFREQ0 0x04038 +#define HDMI_CORE_FC_INFREQ1 0x0403C +#define HDMI_CORE_FC_INFREQ2 0x04040 +#define HDMI_CORE_FC_CTRLDUR 0x04044 +#define HDMI_CORE_FC_EXCTRLDUR 0x04048 +#define HDMI_CORE_FC_EXCTRLSPAC 0x0404C +#define HDMI_CORE_FC_CH0PREAM 0x04050 +#define HDMI_CORE_FC_CH1PREAM 0x04054 +#define HDMI_CORE_FC_CH2PREAM 0x04058 +#define HDMI_CORE_FC_AVICONF3 0x0405C +#define HDMI_CORE_FC_GCP 0x04060 +#define HDMI_CORE_FC_AVICONF0 0x04064 +#define HDMI_CORE_FC_AVICONF1 0x04068 +#define HDMI_CORE_FC_AVICONF2 0x0406C +#define HDMI_CORE_FC_AVIVID 0x04070 +#define HDMI_CORE_FC_AVIETB0 0x04074 +#define HDMI_CORE_FC_AVIETB1 0x04078 +#define HDMI_CORE_FC_AVISBB0 0x0407C +#define HDMI_CORE_FC_AVISBB1 0x04080 +#define HDMI_CORE_FC_AVIELB0 0x04084 +#define HDMI_CORE_FC_AVIELB1 0x04088 +#define HDMI_CORE_FC_AVISRB0 0x0408C +#define HDMI_CORE_FC_AVISRB1 0x04090 +#define HDMI_CORE_FC_AUDICONF0 0x04094 +#define HDMI_CORE_FC_AUDICONF1 0x04098 +#define HDMI_CORE_FC_AUDICONF2 0x0409C +#define HDMI_CORE_FC_AUDICONF3 0x040A0 +#define HDMI_CORE_FC_VSDIEEEID0 0x040A4 +#define HDMI_CORE_FC_VSDSIZE 0x040A8 +#define HDMI_CORE_FC_VSDIEEEID1 0x040C0 +#define HDMI_CORE_FC_VSDIEEEID2 0x040C4 +#define HDMI_CORE_FC_VSDPAYLOAD(n) (n * 4 + 0x040C8) +#define HDMI_CORE_FC_SPDVENDORNAME(n) (n * 4 + 0x04128) +#define HDMI_CORE_FC_SPDPRODUCTNAME(n) (n * 4 + 0x04148) +#define HDMI_CORE_FC_SPDDEVICEINF 0x04188 +#define HDMI_CORE_FC_AUDSCONF 0x0418C +#define HDMI_CORE_FC_AUDSSTAT 0x04190 +#define HDMI_CORE_FC_AUDSV 0x04194 +#define HDMI_CORE_FC_AUDSU 0x04198 +#define HDMI_CORE_FC_AUDSCHNLS(n) (n * 4 + 0x0419C) +#define HDMI_CORE_FC_CTRLQHIGH 0x041CC +#define HDMI_CORE_FC_CTRLQLOW 0x041D0 +#define HDMI_CORE_FC_ACP0 0x041D4 +#define HDMI_CORE_FC_ACP(n) ((16-n) * 4 + 0x04208) +#define HDMI_CORE_FC_ISCR1_0 0x04248 +#define HDMI_CORE_FC_ISCR1(n) ((16-n) * 4 + 0x0424C) +#define HDMI_CORE_FC_ISCR2(n) ((15-n) * 4 + 0x0428C) +#define HDMI_CORE_FC_DATAUTO0 0x042CC +#define HDMI_CORE_FC_DATAUTO1 0x042D0 +#define HDMI_CORE_FC_DATAUTO2 0x042D4 +#define HDMI_CORE_FC_DATMAN 0x042D8 +#define HDMI_CORE_FC_DATAUTO3 0x042DC +#define HDMI_CORE_FC_RDRB(n) (n * 4 + 0x042E0) +#define HDMI_CORE_FC_STAT0 0x04340 +#define HDMI_CORE_FC_INT0 0x04344 +#define HDMI_CORE_FC_MASK0 0x04348 +#define HDMI_CORE_FC_POL0 0x0434C +#define HDMI_CORE_FC_STAT1 0x04350 +#define HDMI_CORE_FC_INT1 0x04354 +#define HDMI_CORE_FC_MASK1 0x04358 +#define HDMI_CORE_FC_POL1 0x0435C +#define HDMI_CORE_FC_STAT2 0x04360 +#define HDMI_CORE_FC_INT2 0x04364 +#define HDMI_CORE_FC_MASK2 0x04368 +#define HDMI_CORE_FC_POL2 0x0436C +#define HDMI_CORE_FC_PRCONF 0x04380 +#define HDMI_CORE_FC_GMD_STAT 0x04400 +#define HDMI_CORE_FC_GMD_EN 0x04404 +#define HDMI_CORE_FC_GMD_UP 0x04408 +#define HDMI_CORE_FC_GMD_CONF 0x0440C +#define HDMI_CORE_FC_GMD_HB 0x04410 +#define HDMI_CORE_FC_GMD_PB(n) (n * 4 + 0x04414) +#define HDMI_CORE_FC_DBGFORCE 0x04800 +#define HDMI_CORE_FC_DBGAUD0CH0 0x04804 +#define HDMI_CORE_FC_DBGAUD1CH0 0x04808 +#define HDMI_CORE_FC_DBGAUD2CH0 0x0480C +#define HDMI_CORE_FC_DBGAUD0CH1 0x04810 +#define HDMI_CORE_FC_DBGAUD1CH1 0x04814 +#define HDMI_CORE_FC_DBGAUD2CH1 0x04818 +#define HDMI_CORE_FC_DBGAUD0CH2 0x0481C +#define HDMI_CORE_FC_DBGAUD1CH2 0x04820 +#define HDMI_CORE_FC_DBGAUD2CH2 0x04824 +#define HDMI_CORE_FC_DBGAUD0CH3 0x04828 +#define HDMI_CORE_FC_DBGAUD1CH3 0x0482C +#define HDMI_CORE_FC_DBGAUD2CH3 0x04830 +#define HDMI_CORE_FC_DBGAUD0CH4 0x04834 +#define HDMI_CORE_FC_DBGAUD1CH4 0x04838 +#define HDMI_CORE_FC_DBGAUD2CH4 0x0483C +#define HDMI_CORE_FC_DBGAUD0CH5 0x04840 +#define HDMI_CORE_FC_DBGAUD1CH5 0x04844 +#define HDMI_CORE_FC_DBGAUD2CH5 0x04848 +#define HDMI_CORE_FC_DBGAUD0CH6 0x0484C +#define HDMI_CORE_FC_DBGAUD1CH6 0x04850 +#define HDMI_CORE_FC_DBGAUD2CH6 0x04854 +#define HDMI_CORE_FC_DBGAUD0CH7 0x04858 +#define HDMI_CORE_FC_DBGAUD1CH7 0x0485C +#define HDMI_CORE_FC_DBGAUD2CH7 0x04860 +#define HDMI_CORE_FC_DBGTMDS0 0x04864 +#define HDMI_CORE_FC_DBGTMDS1 0x04868 +#define HDMI_CORE_FC_DBGTMDS2 0x0486C +#define HDMI_CORE_PHY_MASK0 0x0C018 +#define HDMI_CORE_PHY_I2CM_INT_ADDR 0x0C09C +#define HDMI_CORE_PHY_I2CM_CTLINT_ADDR 0x0C0A0 + +/* HDMI Audio */ +#define HDMI_CORE_AUD_CONF0 0x0C400 +#define HDMI_CORE_AUD_CONF1 0x0C404 +#define HDMI_CORE_AUD_INT 0x0C408 +#define HDMI_CORE_AUD_N1 0x0C800 +#define HDMI_CORE_AUD_N2 0x0C804 +#define HDMI_CORE_AUD_N3 0x0C808 +#define HDMI_CORE_AUD_CTS1 0x0C80C +#define HDMI_CORE_AUD_CTS2 0x0C810 +#define HDMI_CORE_AUD_CTS3 0x0C814 +#define HDMI_CORE_AUD_INCLKFS 0x0C818 +#define HDMI_CORE_AUD_CC08 0x0CC08 +#define HDMI_CORE_AUD_GP_CONF0 0x0D400 +#define HDMI_CORE_AUD_GP_CONF1 0x0D404 +#define HDMI_CORE_AUD_GP_CONF2 0x0D408 +#define HDMI_CORE_AUD_D010 0x0D010 +#define HDMI_CORE_AUD_GP_STAT 0x0D40C +#define HDMI_CORE_AUD_GP_INT 0x0D410 +#define HDMI_CORE_AUD_GP_POL 0x0D414 +#define HDMI_CORE_AUD_GP_MASK 0x0D418 + +/* HDMI Main Controller */ +#define HDMI_CORE_MC_CLKDIS 0x10004 +#define HDMI_CORE_MC_SWRSTZREQ 0x10008 +#define HDMI_CORE_MC_FLOWCTRL 0x10010 +#define HDMI_CORE_MC_PHYRSTZ 0x10014 +#define HDMI_CORE_MC_LOCKONCLOCK 0x10018 + +/* HDMI COLOR SPACE CONVERTER */ +#define HDMI_CORE_CSC_CFG 0x10400 +#define HDMI_CORE_CSC_SCALE 0x10404 +#define HDMI_CORE_CSC_COEF_A1_MSB 0x10408 +#define HDMI_CORE_CSC_COEF_A1_LSB 0x1040C +#define HDMI_CORE_CSC_COEF_A2_MSB 0x10410 +#define HDMI_CORE_CSC_COEF_A2_LSB 0x10414 +#define HDMI_CORE_CSC_COEF_A3_MSB 0x10418 +#define HDMI_CORE_CSC_COEF_A3_LSB 0x1041C +#define HDMI_CORE_CSC_COEF_A4_MSB 0x10420 +#define HDMI_CORE_CSC_COEF_A4_LSB 0x10424 +#define HDMI_CORE_CSC_COEF_B1_MSB 0x10428 +#define HDMI_CORE_CSC_COEF_B1_LSB 0x1042C +#define HDMI_CORE_CSC_COEF_B2_MSB 0x10430 +#define HDMI_CORE_CSC_COEF_B2_LSB 0x10434 +#define HDMI_CORE_CSC_COEF_B3_MSB 0x10438 +#define HDMI_CORE_CSC_COEF_B3_LSB 0x1043C +#define HDMI_CORE_CSC_COEF_B4_MSB 0x10440 +#define HDMI_CORE_CSC_COEF_B4_LSB 0x10444 +#define HDMI_CORE_CSC_COEF_C1_MSB 0x10448 +#define HDMI_CORE_CSC_COEF_C1_LSB 0x1044C +#define HDMI_CORE_CSC_COEF_C2_MSB 0x10450 +#define HDMI_CORE_CSC_COEF_C2_LSB 0x10454 +#define HDMI_CORE_CSC_COEF_C3_MSB 0x10458 +#define HDMI_CORE_CSC_COEF_C3_LSB 0x1045C +#define HDMI_CORE_CSC_COEF_C4_MSB 0x10460 +#define HDMI_CORE_CSC_COEF_C4_LSB 0x10464 + +/* HDMI HDCP */ +#define HDMI_CORE_HDCP_MASK 0x14020 + +/* HDMI CEC */ +#define HDMI_CORE_CEC_MASK 0x17408 + +/* HDMI I2C Master */ +#define HDMI_CORE_I2CM_SLAVE 0x157C8 +#define HDMI_CORE_I2CM_ADDRESS 0x157CC +#define HDMI_CORE_I2CM_DATAO 0x157D0 +#define HDMI_CORE_I2CM_DATAI 0X157D4 +#define HDMI_CORE_I2CM_OPERATION 0x157D8 +#define HDMI_CORE_I2CM_INT 0x157DC +#define HDMI_CORE_I2CM_CTLINT 0x157E0 +#define HDMI_CORE_I2CM_DIV 0x157E4 +#define HDMI_CORE_I2CM_SEGADDR 0x157E8 +#define HDMI_CORE_I2CM_SOFTRSTZ 0x157EC +#define HDMI_CORE_I2CM_SEGPTR 0x157F0 +#define HDMI_CORE_I2CM_SS_SCL_HCNT_1_ADDR 0x157F4 +#define HDMI_CORE_I2CM_SS_SCL_HCNT_0_ADDR 0x157F8 +#define HDMI_CORE_I2CM_SS_SCL_LCNT_1_ADDR 0x157FC +#define HDMI_CORE_I2CM_SS_SCL_LCNT_0_ADDR 0x15800 +#define HDMI_CORE_I2CM_FS_SCL_HCNT_1_ADDR 0x15804 +#define HDMI_CORE_I2CM_FS_SCL_HCNT_0_ADDR 0x15808 +#define HDMI_CORE_I2CM_FS_SCL_LCNT_1_ADDR 0x1580C +#define HDMI_CORE_I2CM_FS_SCL_LCNT_0_ADDR 0x15810 +#define HDMI_CORE_I2CM_SDA_HOLD_ADDR 0x15814 + +enum hdmi_core_packet_mode { + HDMI_PACKETMODERESERVEDVALUE = 0, + HDMI_PACKETMODE24BITPERPIXEL = 4, + HDMI_PACKETMODE30BITPERPIXEL = 5, + HDMI_PACKETMODE36BITPERPIXEL = 6, + HDMI_PACKETMODE48BITPERPIXEL = 7, +}; + +struct hdmi_core_vid_config { + struct hdmi_config v_fc_config; + enum hdmi_core_packet_mode packet_mode; + int data_enable_pol; + int vblank_osc; + int hblank; + int vblank; +}; + +struct csc_table { + u16 a1, a2, a3, a4; + u16 b1, b2, b3, b4; + u16 c1, c2, c3, c4; +}; + +int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len); +void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s); +int hdmi5_core_handle_irqs(struct hdmi_core_data *core); +void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, + struct hdmi_config *cfg); +int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core); + +int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, + struct omap_dss_audio *audio, u32 pclk); +#endif diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c new file mode 100644 index 000000000000..1b8fcc6c4ba1 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c @@ -0,0 +1,148 @@ + +#define DSS_SUBSYS_NAME "HDMI" + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/of.h> +#include <video/omapdss.h> + +#include "hdmi.h" + +int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, + struct hdmi_phy_data *phy) +{ + struct property *prop; + int r, len; + + prop = of_find_property(ep, "lanes", &len); + if (prop) { + u32 lanes[8]; + + if (len / sizeof(u32) != ARRAY_SIZE(lanes)) { + dev_err(&pdev->dev, "bad number of lanes\n"); + return -EINVAL; + } + + r = of_property_read_u32_array(ep, "lanes", lanes, + ARRAY_SIZE(lanes)); + if (r) { + dev_err(&pdev->dev, "failed to read lane data\n"); + return r; + } + + r = hdmi_phy_parse_lanes(phy, lanes); + if (r) { + dev_err(&pdev->dev, "failed to parse lane data\n"); + return r; + } + } else { + static const u32 default_lanes[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + + r = hdmi_phy_parse_lanes(phy, default_lanes); + if (WARN_ON(r)) { + dev_err(&pdev->dev, "failed to parse lane data\n"); + return r; + } + } + + return 0; +} + +int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts) +{ + u32 deep_color; + bool deep_color_correct = false; + + if (n == NULL || cts == NULL) + return -EINVAL; + + /* TODO: When implemented, query deep color mode here. */ + deep_color = 100; + + /* + * When using deep color, the default N value (as in the HDMI + * specification) yields to an non-integer CTS. Hence, we + * modify it while keeping the restrictions described in + * section 7.2.1 of the HDMI 1.4a specification. + */ + switch (sample_freq) { + case 32000: + case 48000: + case 96000: + case 192000: + if (deep_color == 125) + if (pclk == 27027000 || pclk == 74250000) + deep_color_correct = true; + if (deep_color == 150) + if (pclk == 27027000) + deep_color_correct = true; + break; + case 44100: + case 88200: + case 176400: + if (deep_color == 125) + if (pclk == 27027000) + deep_color_correct = true; + break; + default: + return -EINVAL; + } + + if (deep_color_correct) { + switch (sample_freq) { + case 32000: + *n = 8192; + break; + case 44100: + *n = 12544; + break; + case 48000: + *n = 8192; + break; + case 88200: + *n = 25088; + break; + case 96000: + *n = 16384; + break; + case 176400: + *n = 50176; + break; + case 192000: + *n = 32768; + break; + default: + return -EINVAL; + } + } else { + switch (sample_freq) { + case 32000: + *n = 4096; + break; + case 44100: + *n = 6272; + break; + case 48000: + *n = 6144; + break; + case 88200: + *n = 12544; + break; + case 96000: + *n = 12288; + break; + case 176400: + *n = 25088; + break; + case 192000: + *n = 24576; + break; + default: + return -EINVAL; + } + } + /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ + *cts = (pclk/1000) * (*n / 128) * deep_color / (sample_freq / 10); + + return 0; +} diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c new file mode 100644 index 000000000000..1f5d19c119ce --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -0,0 +1,247 @@ +/* + * HDMI PHY + * + * Copyright (C) 2013 Texas Instruments Incorporated + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <video/omapdss.h> + +#include "dss.h" +#include "hdmi.h" + +struct hdmi_phy_features { + bool bist_ctrl; + bool ldo_voltage; + unsigned long max_phy; +}; + +static const struct hdmi_phy_features *phy_feat; + +void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s) +{ +#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\ + hdmi_read_reg(phy->base, r)) + + DUMPPHY(HDMI_TXPHY_TX_CTRL); + DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL); + DUMPPHY(HDMI_TXPHY_POWER_CTRL); + DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL); + if (phy_feat->bist_ctrl) + DUMPPHY(HDMI_TXPHY_BIST_CONTROL); +} + +int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes) +{ + int i; + + for (i = 0; i < 8; i += 2) { + u8 lane, pol; + int dx, dy; + + dx = lanes[i]; + dy = lanes[i + 1]; + + if (dx < 0 || dx >= 8) + return -EINVAL; + + if (dy < 0 || dy >= 8) + return -EINVAL; + + if (dx & 1) { + if (dy != dx - 1) + return -EINVAL; + pol = 1; + } else { + if (dy != dx + 1) + return -EINVAL; + pol = 0; + } + + lane = dx / 2; + + phy->lane_function[lane] = i / 2; + phy->lane_polarity[lane] = pol; + } + + return 0; +} + +static void hdmi_phy_configure_lanes(struct hdmi_phy_data *phy) +{ + static const u16 pad_cfg_list[] = { + 0x0123, + 0x0132, + 0x0312, + 0x0321, + 0x0231, + 0x0213, + 0x1023, + 0x1032, + 0x3012, + 0x3021, + 0x2031, + 0x2013, + 0x1203, + 0x1302, + 0x3102, + 0x3201, + 0x2301, + 0x2103, + 0x1230, + 0x1320, + 0x3120, + 0x3210, + 0x2310, + 0x2130, + }; + + u16 lane_cfg = 0; + int i; + unsigned lane_cfg_val; + u16 pol_val = 0; + + for (i = 0; i < 4; ++i) + lane_cfg |= phy->lane_function[i] << ((3 - i) * 4); + + pol_val |= phy->lane_polarity[0] << 0; + pol_val |= phy->lane_polarity[1] << 3; + pol_val |= phy->lane_polarity[2] << 2; + pol_val |= phy->lane_polarity[3] << 1; + + for (i = 0; i < ARRAY_SIZE(pad_cfg_list); ++i) + if (pad_cfg_list[i] == lane_cfg) + break; + + if (WARN_ON(i == ARRAY_SIZE(pad_cfg_list))) + i = 0; + + lane_cfg_val = i; + + REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, lane_cfg_val, 26, 22); + REG_FLD_MOD(phy->base, HDMI_TXPHY_PAD_CFG_CTRL, pol_val, 30, 27); +} + +int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk, + unsigned long lfbitclk) +{ + u8 freqout; + + /* + * Read address 0 in order to get the SCP reset done completed + * Dummy access performed to make sure reset is done + */ + hdmi_read_reg(phy->base, HDMI_TXPHY_TX_CTRL); + + /* + * In OMAP5+, the HFBITCLK must be divided by 2 before issuing the + * HDMI_PHYPWRCMD_LDOON command. + */ + if (phy_feat->bist_ctrl) + REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11); + + /* + * If the hfbitclk != lfbitclk, it means the lfbitclk was configured + * to be used for TMDS. + */ + if (hfbitclk != lfbitclk) + freqout = 0; + else if (hfbitclk / 10 < phy_feat->max_phy) + freqout = 1; + else + freqout = 2; + + /* + * Write to phy address 0 to configure the clock + * use HFBITCLK write HDMI_TXPHY_TX_CONTROL_FREQOUT field + */ + REG_FLD_MOD(phy->base, HDMI_TXPHY_TX_CTRL, freqout, 31, 30); + + /* Write to phy address 1 to start HDMI line (TXVALID and TMDSCLKEN) */ + hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000); + + /* Setup max LDO voltage */ + if (phy_feat->ldo_voltage) + REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0); + + hdmi_phy_configure_lanes(phy); + + return 0; +} + +static const struct hdmi_phy_features omap44xx_phy_feats = { + .bist_ctrl = false, + .ldo_voltage = true, + .max_phy = 185675000, +}; + +static const struct hdmi_phy_features omap54xx_phy_feats = { + .bist_ctrl = true, + .ldo_voltage = false, + .max_phy = 186000000, +}; + +static int hdmi_phy_init_features(struct platform_device *pdev) +{ + struct hdmi_phy_features *dst; + const struct hdmi_phy_features *src; + + dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); + if (!dst) { + dev_err(&pdev->dev, "Failed to allocate HDMI PHY Features\n"); + return -ENOMEM; + } + + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + src = &omap44xx_phy_feats; + break; + + case OMAPDSS_VER_OMAP5: + case OMAPDSS_VER_DRA7xx: + src = &omap54xx_phy_feats; + break; + + default: + return -ENODEV; + } + + memcpy(dst, src, sizeof(*dst)); + phy_feat = dst; + + return 0; +} + +int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy) +{ + int r; + struct resource *res; + + r = hdmi_phy_init_features(pdev); + if (r) + return r; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); + if (!res) { + DSSERR("can't get PHY mem resource\n"); + return -EINVAL; + } + + phy->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(phy->base)) { + DSSERR("can't ioremap TX PHY\n"); + return PTR_ERR(phy->base); + } + + return 0; +} diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c new file mode 100644 index 000000000000..06e23a7c432c --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -0,0 +1,255 @@ +/* + * HDMI PLL + * + * Copyright (C) 2013 Texas Instruments Incorporated + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#define DSS_SUBSYS_NAME "HDMIPLL" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "hdmi.h" + +void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s) +{ +#define DUMPPLL(r) seq_printf(s, "%-35s %08x\n", #r,\ + hdmi_read_reg(pll->base, r)) + + DUMPPLL(PLLCTRL_PLL_CONTROL); + DUMPPLL(PLLCTRL_PLL_STATUS); + DUMPPLL(PLLCTRL_PLL_GO); + DUMPPLL(PLLCTRL_CFG1); + DUMPPLL(PLLCTRL_CFG2); + DUMPPLL(PLLCTRL_CFG3); + DUMPPLL(PLLCTRL_SSC_CFG1); + DUMPPLL(PLLCTRL_SSC_CFG2); + DUMPPLL(PLLCTRL_CFG4); +} + +void hdmi_pll_compute(struct hdmi_pll_data *pll, + unsigned long target_tmds, struct dss_pll_clock_info *pi) +{ + unsigned long fint, clkdco, clkout; + unsigned long target_bitclk, target_clkdco; + unsigned long min_dco; + unsigned n, m, mf, m2, sd; + unsigned long clkin; + const struct dss_pll_hw *hw = pll->pll.hw; + + clkin = clk_get_rate(pll->pll.clkin); + + DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds); + + target_bitclk = target_tmds * 10; + + /* Fint */ + n = DIV_ROUND_UP(clkin, hw->fint_max); + fint = clkin / n; + + /* adjust m2 so that the clkdco will be high enough */ + min_dco = roundup(hw->clkdco_min, fint); + m2 = DIV_ROUND_UP(min_dco, target_bitclk); + if (m2 == 0) + m2 = 1; + + target_clkdco = target_bitclk * m2; + m = target_clkdco / fint; + + clkdco = fint * m; + + /* adjust clkdco with fractional mf */ + if (WARN_ON(target_clkdco - clkdco > fint)) + mf = 0; + else + mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint); + + if (mf > 0) + clkdco += (u32)div_u64((u64)mf * fint, 262144); + + clkout = clkdco / m2; + + /* sigma-delta */ + sd = DIV_ROUND_UP(fint * m, 250000000); + + DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n", + n, m, mf, m2, sd); + DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout); + + pi->n = n; + pi->m = m; + pi->mf = mf; + pi->mX[0] = m2; + pi->sd = sd; + + pi->fint = fint; + pi->clkdco = clkdco; + pi->clkout[0] = clkout; +} + +static int hdmi_pll_enable(struct dss_pll *dsspll) +{ + struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); + struct hdmi_wp_data *wp = pll->wp; + u16 r = 0; + + dss_ctrl_pll_enable(DSS_PLL_HDMI, true); + + r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS); + if (r) + return r; + + return 0; +} + +static void hdmi_pll_disable(struct dss_pll *dsspll) +{ + struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); + struct hdmi_wp_data *wp = pll->wp; + + hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); + + dss_ctrl_pll_enable(DSS_PLL_HDMI, false); +} + +static const struct dss_pll_ops dsi_pll_ops = { + .enable = hdmi_pll_enable, + .disable = hdmi_pll_disable, + .set_config = dss_pll_write_config_type_b, +}; + +static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { + .n_max = 255, + .m_min = 20, + .m_max = 4095, + .mX_max = 127, + .fint_min = 500000, + .fint_max = 2500000, + + .clkdco_min = 500000000, + .clkdco_low = 1000000000, + .clkdco_max = 2000000000, + + .n_msb = 8, + .n_lsb = 1, + .m_msb = 20, + .m_lsb = 9, + + .mX_msb[0] = 24, + .mX_lsb[0] = 18, + + .has_selfreqdco = true, +}; + +static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { + .n_max = 255, + .m_min = 20, + .m_max = 2045, + .mX_max = 127, + .fint_min = 620000, + .fint_max = 2500000, + + .clkdco_min = 750000000, + .clkdco_low = 1500000000, + .clkdco_max = 2500000000UL, + + .n_msb = 8, + .n_lsb = 1, + .m_msb = 20, + .m_lsb = 9, + + .mX_msb[0] = 24, + .mX_lsb[0] = 18, + + .has_selfreqdco = true, + .has_refsel = true, +}; + +static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data *hpll) +{ + struct dss_pll *pll = &hpll->pll; + struct clk *clk; + int r; + + clk = devm_clk_get(&pdev->dev, "sys_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get sys_clk\n"); + return PTR_ERR(clk); + } + + pll->name = "hdmi"; + pll->id = DSS_PLL_HDMI; + pll->base = hpll->base; + pll->clkin = clk; + + switch (omapdss_get_version()) { + case OMAPDSS_VER_OMAP4430_ES1: + case OMAPDSS_VER_OMAP4430_ES2: + case OMAPDSS_VER_OMAP4: + pll->hw = &dss_omap4_hdmi_pll_hw; + break; + + case OMAPDSS_VER_OMAP5: + case OMAPDSS_VER_DRA7xx: + pll->hw = &dss_omap5_hdmi_pll_hw; + break; + + default: + return -ENODEV; + } + + pll->ops = &dsi_pll_ops; + + r = dss_pll_register(pll); + if (r) + return r; + + return 0; +} + +int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, + struct hdmi_wp_data *wp) +{ + int r; + struct resource *res; + + pll->wp = wp; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); + if (!res) { + DSSERR("can't get PLL mem resource\n"); + return -EINVAL; + } + + pll->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pll->base)) { + DSSERR("can't ioremap PLLCTRL\n"); + return PTR_ERR(pll->base); + } + + r = dsi_init_pll_data(pdev, pll); + if (r) { + DSSERR("failed to init HDMI PLL\n"); + return r; + } + + return 0; +} + +void hdmi_pll_uninit(struct hdmi_pll_data *hpll) +{ + struct dss_pll *pll = &hpll->pll; + + dss_pll_unregister(pll); +} diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c new file mode 100644 index 000000000000..7c544bc56fb5 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -0,0 +1,282 @@ +/* + * HDMI wrapper + * + * Copyright (C) 2013 Texas Instruments Incorporated + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#define DSS_SUBSYS_NAME "HDMIWP" + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <video/omapdss.h> + +#include "dss.h" +#include "hdmi.h" + +void hdmi_wp_dump(struct hdmi_wp_data *wp, struct seq_file *s) +{ +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, hdmi_read_reg(wp->base, r)) + + DUMPREG(HDMI_WP_REVISION); + DUMPREG(HDMI_WP_SYSCONFIG); + DUMPREG(HDMI_WP_IRQSTATUS_RAW); + DUMPREG(HDMI_WP_IRQSTATUS); + DUMPREG(HDMI_WP_IRQENABLE_SET); + DUMPREG(HDMI_WP_IRQENABLE_CLR); + DUMPREG(HDMI_WP_IRQWAKEEN); + DUMPREG(HDMI_WP_PWR_CTRL); + DUMPREG(HDMI_WP_DEBOUNCE); + DUMPREG(HDMI_WP_VIDEO_CFG); + DUMPREG(HDMI_WP_VIDEO_SIZE); + DUMPREG(HDMI_WP_VIDEO_TIMING_H); + DUMPREG(HDMI_WP_VIDEO_TIMING_V); + DUMPREG(HDMI_WP_CLK); + DUMPREG(HDMI_WP_AUDIO_CFG); + DUMPREG(HDMI_WP_AUDIO_CFG2); + DUMPREG(HDMI_WP_AUDIO_CTRL); + DUMPREG(HDMI_WP_AUDIO_DATA); +} + +u32 hdmi_wp_get_irqstatus(struct hdmi_wp_data *wp) +{ + return hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS); +} + +void hdmi_wp_set_irqstatus(struct hdmi_wp_data *wp, u32 irqstatus) +{ + hdmi_write_reg(wp->base, HDMI_WP_IRQSTATUS, irqstatus); + /* flush posted write */ + hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS); +} + +void hdmi_wp_set_irqenable(struct hdmi_wp_data *wp, u32 mask) +{ + hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_SET, mask); +} + +void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask) +{ + hdmi_write_reg(wp->base, HDMI_WP_IRQENABLE_CLR, mask); +} + +/* PHY_PWR_CMD */ +int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val) +{ + /* Return if already the state */ + if (REG_GET(wp->base, HDMI_WP_PWR_CTRL, 5, 4) == val) + return 0; + + /* Command for power control of HDMI PHY */ + REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 7, 6); + + /* Status of the power control of HDMI PHY */ + if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 5, 4, val) + != val) { + DSSERR("Failed to set PHY power mode to %d\n", val); + return -ETIMEDOUT; + } + + return 0; +} + +/* PLL_PWR_CMD */ +int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val) +{ + /* Command for power control of HDMI PLL */ + REG_FLD_MOD(wp->base, HDMI_WP_PWR_CTRL, val, 3, 2); + + /* wait till PHY_PWR_STATUS is set */ + if (hdmi_wait_for_bit_change(wp->base, HDMI_WP_PWR_CTRL, 1, 0, val) + != val) { + DSSERR("Failed to set PLL_PWR_STATUS\n"); + return -ETIMEDOUT; + } + + return 0; +} + +int hdmi_wp_video_start(struct hdmi_wp_data *wp) +{ + REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, true, 31, 31); + + return 0; +} + +void hdmi_wp_video_stop(struct hdmi_wp_data *wp) +{ + int i; + + hdmi_write_reg(wp->base, HDMI_WP_IRQSTATUS, HDMI_IRQ_VIDEO_FRAME_DONE); + + REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, false, 31, 31); + + for (i = 0; i < 50; ++i) { + u32 v; + + msleep(20); + + v = hdmi_read_reg(wp->base, HDMI_WP_IRQSTATUS_RAW); + if (v & HDMI_IRQ_VIDEO_FRAME_DONE) + return; + } + + DSSERR("no HDMI FRAMEDONE when disabling output\n"); +} + +void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, + struct hdmi_video_format *video_fmt) +{ + u32 l = 0; + + REG_FLD_MOD(wp->base, HDMI_WP_VIDEO_CFG, video_fmt->packing_mode, + 10, 8); + + l |= FLD_VAL(video_fmt->y_res, 31, 16); + l |= FLD_VAL(video_fmt->x_res, 15, 0); + hdmi_write_reg(wp->base, HDMI_WP_VIDEO_SIZE, l); +} + +void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, + struct omap_video_timings *timings) +{ + u32 r; + bool vsync_pol, hsync_pol; + DSSDBG("Enter hdmi_wp_video_config_interface\n"); + + vsync_pol = timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + hsync_pol = timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + + r = hdmi_read_reg(wp->base, HDMI_WP_VIDEO_CFG); + r = FLD_MOD(r, vsync_pol, 7, 7); + r = FLD_MOD(r, hsync_pol, 6, 6); + r = FLD_MOD(r, timings->interlace, 3, 3); + r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */ + hdmi_write_reg(wp->base, HDMI_WP_VIDEO_CFG, r); +} + +void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, + struct omap_video_timings *timings) +{ + u32 timing_h = 0; + u32 timing_v = 0; + + DSSDBG("Enter hdmi_wp_video_config_timing\n"); + + timing_h |= FLD_VAL(timings->hbp, 31, 20); + timing_h |= FLD_VAL(timings->hfp, 19, 8); + timing_h |= FLD_VAL(timings->hsw, 7, 0); + hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h); + + timing_v |= FLD_VAL(timings->vbp, 31, 20); + timing_v |= FLD_VAL(timings->vfp, 19, 8); + timing_v |= FLD_VAL(timings->vsw, 7, 0); + hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_V, timing_v); +} + +void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, + struct omap_video_timings *timings, struct hdmi_config *param) +{ + DSSDBG("Enter hdmi_wp_video_init_format\n"); + + video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444; + video_fmt->y_res = param->timings.y_res; + video_fmt->x_res = param->timings.x_res; + if (param->timings.interlace) + video_fmt->y_res /= 2; + + timings->hbp = param->timings.hbp; + timings->hfp = param->timings.hfp; + timings->hsw = param->timings.hsw; + timings->vbp = param->timings.vbp; + timings->vfp = param->timings.vfp; + timings->vsw = param->timings.vsw; + timings->vsync_level = param->timings.vsync_level; + timings->hsync_level = param->timings.hsync_level; + timings->interlace = param->timings.interlace; +} + +void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp, + struct hdmi_audio_format *aud_fmt) +{ + u32 r; + + DSSDBG("Enter hdmi_wp_audio_config_format\n"); + + r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG); + if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 || + omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 || + omapdss_get_version() == OMAPDSS_VER_OMAP4) { + r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24); + r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16); + } + r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5); + r = FLD_MOD(r, aud_fmt->type, 4, 4); + r = FLD_MOD(r, aud_fmt->justification, 3, 3); + r = FLD_MOD(r, aud_fmt->sample_order, 2, 2); + r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1); + r = FLD_MOD(r, aud_fmt->sample_size, 0, 0); + hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG, r); +} + +void hdmi_wp_audio_config_dma(struct hdmi_wp_data *wp, + struct hdmi_audio_dma *aud_dma) +{ + u32 r; + + DSSDBG("Enter hdmi_wp_audio_config_dma\n"); + + r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG2); + r = FLD_MOD(r, aud_dma->transfer_size, 15, 8); + r = FLD_MOD(r, aud_dma->block_size, 7, 0); + hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CFG2, r); + + r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CTRL); + r = FLD_MOD(r, aud_dma->mode, 9, 9); + r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0); + hdmi_write_reg(wp->base, HDMI_WP_AUDIO_CTRL, r); +} + +int hdmi_wp_audio_enable(struct hdmi_wp_data *wp, bool enable) +{ + REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 31, 31); + + return 0; +} + +int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable) +{ + REG_FLD_MOD(wp->base, HDMI_WP_AUDIO_CTRL, enable, 30, 30); + + return 0; +} + +int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp) +{ + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wp"); + if (!res) { + DSSERR("can't get WP mem resource\n"); + return -EINVAL; + } + wp->phys_base = res->start; + + wp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(wp->base)) { + DSSERR("can't ioremap HDMI WP\n"); + return PTR_ERR(wp->base); + } + + return 0; +} + +phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp) +{ + return wp->phys_base + HDMI_WP_AUDIO_DATA; +} diff --git a/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c b/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c new file mode 100644 index 000000000000..a7414fb12830 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c @@ -0,0 +1,531 @@ +/* + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "MANAGER" + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/jiffies.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name); +} + +static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf) +{ + struct omap_dss_device *dssdev = mgr->get_device(mgr); + + return snprintf(buf, PAGE_SIZE, "%s\n", dssdev ? + dssdev->name : "<none>"); +} + +static int manager_display_match(struct omap_dss_device *dssdev, void *data) +{ + const char *str = data; + + return sysfs_streq(dssdev->name, str); +} + +static ssize_t manager_display_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + int r = 0; + size_t len = size; + struct omap_dss_device *dssdev = NULL; + struct omap_dss_device *old_dssdev; + + if (buf[size-1] == '\n') + --len; + + if (len > 0) + dssdev = omap_dss_find_device((void *)buf, + manager_display_match); + + if (len > 0 && dssdev == NULL) + return -EINVAL; + + if (dssdev) { + DSSDBG("display %s found\n", dssdev->name); + + if (omapdss_device_is_connected(dssdev)) { + DSSERR("new display is already connected\n"); + r = -EINVAL; + goto put_device; + } + + if (omapdss_device_is_enabled(dssdev)) { + DSSERR("new display is not disabled\n"); + r = -EINVAL; + goto put_device; + } + } + + old_dssdev = mgr->get_device(mgr); + if (old_dssdev) { + if (omapdss_device_is_enabled(old_dssdev)) { + DSSERR("old display is not disabled\n"); + r = -EINVAL; + goto put_device; + } + + old_dssdev->driver->disconnect(old_dssdev); + } + + if (dssdev) { + r = dssdev->driver->connect(dssdev); + if (r) { + DSSERR("failed to connect new device\n"); + goto put_device; + } + + old_dssdev = mgr->get_device(mgr); + if (old_dssdev != dssdev) { + DSSERR("failed to connect device to this manager\n"); + dssdev->driver->disconnect(dssdev); + goto put_device; + } + + r = mgr->apply(mgr); + if (r) { + DSSERR("failed to apply dispc config\n"); + goto put_device; + } + } + +put_device: + if (dssdev) + omap_dss_put_device(dssdev); + + return r ? r : size; +} + +static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr, + char *buf) +{ + struct omap_overlay_manager_info info; + + mgr->get_manager_info(mgr, &info); + + return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color); +} + +static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + u32 color; + int r; + + r = kstrtouint(buf, 0, &color); + if (r) + return r; + + mgr->get_manager_info(mgr, &info); + + info.default_color = color; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +static const char *trans_key_type_str[] = { + "gfx-destination", + "video-source", +}; + +static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr, + char *buf) +{ + enum omap_dss_trans_key_type key_type; + struct omap_overlay_manager_info info; + + mgr->get_manager_info(mgr, &info); + + key_type = info.trans_key_type; + BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str)); + + return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]); +} + +static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + enum omap_dss_trans_key_type key_type; + struct omap_overlay_manager_info info; + int r; + + for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST; + key_type < ARRAY_SIZE(trans_key_type_str); key_type++) { + if (sysfs_streq(buf, trans_key_type_str[key_type])) + break; + } + + if (key_type == ARRAY_SIZE(trans_key_type_str)) + return -EINVAL; + + mgr->get_manager_info(mgr, &info); + + info.trans_key_type = key_type; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr, + char *buf) +{ + struct omap_overlay_manager_info info; + + mgr->get_manager_info(mgr, &info); + + return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key); +} + +static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + u32 key_value; + int r; + + r = kstrtouint(buf, 0, &key_value); + if (r) + return r; + + mgr->get_manager_info(mgr, &info); + + info.trans_key = key_value; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr, + char *buf) +{ + struct omap_overlay_manager_info info; + + mgr->get_manager_info(mgr, &info); + + return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled); +} + +static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + bool enable; + int r; + + r = strtobool(buf, &enable); + if (r) + return r; + + mgr->get_manager_info(mgr, &info); + + info.trans_enabled = enable; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +static ssize_t manager_alpha_blending_enabled_show( + struct omap_overlay_manager *mgr, char *buf) +{ + struct omap_overlay_manager_info info; + + if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) + return -ENODEV; + + mgr->get_manager_info(mgr, &info); + + return snprintf(buf, PAGE_SIZE, "%d\n", + info.partial_alpha_enabled); +} + +static ssize_t manager_alpha_blending_enabled_store( + struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + bool enable; + int r; + + if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) + return -ENODEV; + + r = strtobool(buf, &enable); + if (r) + return r; + + mgr->get_manager_info(mgr, &info); + + info.partial_alpha_enabled = enable; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr, + char *buf) +{ + struct omap_overlay_manager_info info; + + mgr->get_manager_info(mgr, &info); + + return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable); +} + +static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + int r; + bool enable; + + if (!dss_has_feature(FEAT_CPR)) + return -ENODEV; + + r = strtobool(buf, &enable); + if (r) + return r; + + mgr->get_manager_info(mgr, &info); + + if (info.cpr_enable == enable) + return size; + + info.cpr_enable = enable; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr, + char *buf) +{ + struct omap_overlay_manager_info info; + + mgr->get_manager_info(mgr, &info); + + return snprintf(buf, PAGE_SIZE, + "%d %d %d %d %d %d %d %d %d\n", + info.cpr_coefs.rr, + info.cpr_coefs.rg, + info.cpr_coefs.rb, + info.cpr_coefs.gr, + info.cpr_coefs.gg, + info.cpr_coefs.gb, + info.cpr_coefs.br, + info.cpr_coefs.bg, + info.cpr_coefs.bb); +} + +static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + struct omap_dss_cpr_coefs coefs; + int r, i; + s16 *arr; + + if (!dss_has_feature(FEAT_CPR)) + return -ENODEV; + + if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd", + &coefs.rr, &coefs.rg, &coefs.rb, + &coefs.gr, &coefs.gg, &coefs.gb, + &coefs.br, &coefs.bg, &coefs.bb) != 9) + return -EINVAL; + + arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb, + coefs.gr, coefs.gg, coefs.gb, + coefs.br, coefs.bg, coefs.bb }; + + for (i = 0; i < 9; ++i) { + if (arr[i] < -512 || arr[i] > 511) + return -EINVAL; + } + + mgr->get_manager_info(mgr, &info); + + info.cpr_coefs = coefs; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +struct manager_attribute { + struct attribute attr; + ssize_t (*show)(struct omap_overlay_manager *, char *); + ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t); +}; + +#define MANAGER_ATTR(_name, _mode, _show, _store) \ + struct manager_attribute manager_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL); +static MANAGER_ATTR(display, S_IRUGO|S_IWUSR, + manager_display_show, manager_display_store); +static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR, + manager_default_color_show, manager_default_color_store); +static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR, + manager_trans_key_type_show, manager_trans_key_type_store); +static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR, + manager_trans_key_value_show, manager_trans_key_value_store); +static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR, + manager_trans_key_enabled_show, + manager_trans_key_enabled_store); +static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR, + manager_alpha_blending_enabled_show, + manager_alpha_blending_enabled_store); +static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR, + manager_cpr_enable_show, + manager_cpr_enable_store); +static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR, + manager_cpr_coef_show, + manager_cpr_coef_store); + + +static struct attribute *manager_sysfs_attrs[] = { + &manager_attr_name.attr, + &manager_attr_display.attr, + &manager_attr_default_color.attr, + &manager_attr_trans_key_type.attr, + &manager_attr_trans_key_value.attr, + &manager_attr_trans_key_enabled.attr, + &manager_attr_alpha_blending_enabled.attr, + &manager_attr_cpr_enable.attr, + &manager_attr_cpr_coef.attr, + NULL +}; + +static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct omap_overlay_manager *manager; + struct manager_attribute *manager_attr; + + manager = container_of(kobj, struct omap_overlay_manager, kobj); + manager_attr = container_of(attr, struct manager_attribute, attr); + + if (!manager_attr->show) + return -ENOENT; + + return manager_attr->show(manager, buf); +} + +static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t size) +{ + struct omap_overlay_manager *manager; + struct manager_attribute *manager_attr; + + manager = container_of(kobj, struct omap_overlay_manager, kobj); + manager_attr = container_of(attr, struct manager_attribute, attr); + + if (!manager_attr->store) + return -ENOENT; + + return manager_attr->store(manager, buf, size); +} + +static const struct sysfs_ops manager_sysfs_ops = { + .show = manager_attr_show, + .store = manager_attr_store, +}; + +static struct kobj_type manager_ktype = { + .sysfs_ops = &manager_sysfs_ops, + .default_attrs = manager_sysfs_attrs, +}; + +int dss_manager_kobj_init(struct omap_overlay_manager *mgr, + struct platform_device *pdev) +{ + return kobject_init_and_add(&mgr->kobj, &manager_ktype, + &pdev->dev.kobj, "manager%d", mgr->id); +} + +void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr) +{ + kobject_del(&mgr->kobj); + kobject_put(&mgr->kobj); + + memset(&mgr->kobj, 0, sizeof(mgr->kobj)); +} diff --git a/drivers/gpu/drm/omapdrm/dss/manager.c b/drivers/gpu/drm/omapdrm/dss/manager.c new file mode 100644 index 000000000000..08a67f4f6a20 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/manager.c @@ -0,0 +1,263 @@ +/* + * linux/drivers/video/omap2/dss/manager.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "MANAGER" + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/jiffies.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +static int num_managers; +static struct omap_overlay_manager *managers; + +int dss_init_overlay_managers(void) +{ + int i; + + num_managers = dss_feat_get_num_mgrs(); + + managers = kzalloc(sizeof(struct omap_overlay_manager) * num_managers, + GFP_KERNEL); + + BUG_ON(managers == NULL); + + for (i = 0; i < num_managers; ++i) { + struct omap_overlay_manager *mgr = &managers[i]; + + switch (i) { + case 0: + mgr->name = "lcd"; + mgr->id = OMAP_DSS_CHANNEL_LCD; + break; + case 1: + mgr->name = "tv"; + mgr->id = OMAP_DSS_CHANNEL_DIGIT; + break; + case 2: + mgr->name = "lcd2"; + mgr->id = OMAP_DSS_CHANNEL_LCD2; + break; + case 3: + mgr->name = "lcd3"; + mgr->id = OMAP_DSS_CHANNEL_LCD3; + break; + } + + mgr->caps = 0; + mgr->supported_displays = + dss_feat_get_supported_displays(mgr->id); + mgr->supported_outputs = + dss_feat_get_supported_outputs(mgr->id); + + INIT_LIST_HEAD(&mgr->overlays); + } + + return 0; +} + +int dss_init_overlay_managers_sysfs(struct platform_device *pdev) +{ + int i, r; + + for (i = 0; i < num_managers; ++i) { + struct omap_overlay_manager *mgr = &managers[i]; + + r = dss_manager_kobj_init(mgr, pdev); + if (r) + DSSERR("failed to create sysfs file\n"); + } + + return 0; +} + +void dss_uninit_overlay_managers(void) +{ + kfree(managers); + managers = NULL; + num_managers = 0; +} + +void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < num_managers; ++i) { + struct omap_overlay_manager *mgr = &managers[i]; + + dss_manager_kobj_uninit(mgr); + } +} + +int omap_dss_get_num_overlay_managers(void) +{ + return num_managers; +} +EXPORT_SYMBOL(omap_dss_get_num_overlay_managers); + +struct omap_overlay_manager *omap_dss_get_overlay_manager(int num) +{ + if (num >= num_managers) + return NULL; + + return &managers[num]; +} +EXPORT_SYMBOL(omap_dss_get_overlay_manager); + +int dss_mgr_simple_check(struct omap_overlay_manager *mgr, + const struct omap_overlay_manager_info *info) +{ + if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) { + /* + * OMAP3 supports only graphics source transparency color key + * and alpha blending simultaneously. See TRM 15.4.2.4.2.2 + * Alpha Mode. + */ + if (info->partial_alpha_enabled && info->trans_enabled + && info->trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST) { + DSSERR("check_manager: illegal transparency key\n"); + return -EINVAL; + } + } + + return 0; +} + +static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr, + struct omap_overlay_info **overlay_infos) +{ + struct omap_overlay *ovl1, *ovl2; + struct omap_overlay_info *info1, *info2; + + list_for_each_entry(ovl1, &mgr->overlays, list) { + info1 = overlay_infos[ovl1->id]; + + if (info1 == NULL) + continue; + + list_for_each_entry(ovl2, &mgr->overlays, list) { + if (ovl1 == ovl2) + continue; + + info2 = overlay_infos[ovl2->id]; + + if (info2 == NULL) + continue; + + if (info1->zorder == info2->zorder) { + DSSERR("overlays %d and %d have the same " + "zorder %d\n", + ovl1->id, ovl2->id, info1->zorder); + return -EINVAL; + } + } + } + + return 0; +} + +int dss_mgr_check_timings(struct omap_overlay_manager *mgr, + const struct omap_video_timings *timings) +{ + if (!dispc_mgr_timings_ok(mgr->id, timings)) { + DSSERR("check_manager: invalid timings\n"); + return -EINVAL; + } + + return 0; +} + +static int dss_mgr_check_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct dispc_clock_info cinfo = config->clock_info; + int dl = config->video_port_width; + bool stallmode = config->stallmode; + bool fifohandcheck = config->fifohandcheck; + + if (cinfo.lck_div < 1 || cinfo.lck_div > 255) + return -EINVAL; + + if (cinfo.pck_div < 1 || cinfo.pck_div > 255) + return -EINVAL; + + if (dl != 12 && dl != 16 && dl != 18 && dl != 24) + return -EINVAL; + + /* fifohandcheck should be used only with stallmode */ + if (!stallmode && fifohandcheck) + return -EINVAL; + + /* + * io pad mode can be only checked by using dssdev connected to the + * manager. Ignore checking these for now, add checks when manager + * is capable of holding information related to the connected interface + */ + + return 0; +} + +int dss_mgr_check(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info, + const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *lcd_config, + struct omap_overlay_info **overlay_infos) +{ + struct omap_overlay *ovl; + int r; + + if (dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) { + r = dss_mgr_check_zorder(mgr, overlay_infos); + if (r) + return r; + } + + r = dss_mgr_check_timings(mgr, mgr_timings); + if (r) + return r; + + r = dss_mgr_check_lcd_config(mgr, lcd_config); + if (r) + return r; + + list_for_each_entry(ovl, &mgr->overlays, list) { + struct omap_overlay_info *oi; + int r; + + oi = overlay_infos[ovl->id]; + + if (oi == NULL) + continue; + + r = dss_ovl_check(ovl, oi, mgr_timings); + if (r) + return r; + } + + return 0; +} diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c new file mode 100644 index 000000000000..136d30484d02 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2014 Texas Instruments + * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +/* + * As omapdss panel drivers are omapdss specific, but we want to define the + * DT-data in generic manner, we convert the compatible strings of the panel and + * encoder nodes from "panel-foo" to "omapdss,panel-foo". This way we can have + * both correct DT data and omapdss specific drivers. + * + * When we get generic panel drivers to the kernel, this file will be removed. + */ + +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/slab.h> +#include <linux/list.h> + +static struct list_head dss_conv_list __initdata; + +static const char prefix[] __initconst = "omapdss,"; + +struct dss_conv_node { + struct list_head list; + struct device_node *node; + bool root; +}; + +static int __init omapdss_count_strings(const struct property *prop) +{ + const char *p = prop->value; + int l = 0, total = 0; + int i; + + for (i = 0; total < prop->length; total += l, p += l, i++) + l = strlen(p) + 1; + + return i; +} + +static void __init omapdss_update_prop(struct device_node *node, char *compat, + int len) +{ + struct property *prop; + + prop = kzalloc(sizeof(*prop), GFP_KERNEL); + if (!prop) + return; + + prop->name = "compatible"; + prop->value = compat; + prop->length = len; + + of_update_property(node, prop); +} + +static void __init omapdss_prefix_strcpy(char *dst, int dst_len, + const char *src, int src_len) +{ + size_t total = 0; + + while (total < src_len) { + size_t l = strlen(src) + 1; + + strcpy(dst, prefix); + dst += strlen(prefix); + + strcpy(dst, src); + dst += l; + + src += l; + total += l; + } +} + +/* prepend compatible property strings with "omapdss," */ +static void __init omapdss_omapify_node(struct device_node *node) +{ + struct property *prop; + char *new_compat; + int num_strs; + int new_len; + + prop = of_find_property(node, "compatible", NULL); + + if (!prop || !prop->value) + return; + + if (strnlen(prop->value, prop->length) >= prop->length) + return; + + /* is it already prefixed? */ + if (strncmp(prefix, prop->value, strlen(prefix)) == 0) + return; + + num_strs = omapdss_count_strings(prop); + + new_len = prop->length + strlen(prefix) * num_strs; + new_compat = kmalloc(new_len, GFP_KERNEL); + + omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length); + + omapdss_update_prop(node, new_compat, new_len); +} + +static void __init omapdss_add_to_list(struct device_node *node, bool root) +{ + struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node), + GFP_KERNEL); + if (n) { + n->node = node; + n->root = root; + list_add(&n->list, &dss_conv_list); + } +} + +static bool __init omapdss_list_contains(const struct device_node *node) +{ + struct dss_conv_node *n; + + list_for_each_entry(n, &dss_conv_list, list) { + if (n->node == node) + return true; + } + + return false; +} + +static void __init omapdss_walk_device(struct device_node *node, bool root) +{ + struct device_node *n; + + omapdss_add_to_list(node, root); + + /* + * of_graph_get_remote_port_parent() prints an error if there is no + * port/ports node. To avoid that, check first that there's the node. + */ + n = of_get_child_by_name(node, "ports"); + if (!n) + n = of_get_child_by_name(node, "port"); + if (!n) + return; + + of_node_put(n); + + n = NULL; + while ((n = of_graph_get_next_endpoint(node, n)) != NULL) { + struct device_node *pn; + + pn = of_graph_get_remote_port_parent(n); + + if (!pn) + continue; + + if (!of_device_is_available(pn) || omapdss_list_contains(pn)) { + of_node_put(pn); + continue; + } + + omapdss_walk_device(pn, false); + } +} + +static const struct of_device_id omapdss_of_match[] __initconst = { + { .compatible = "ti,omap2-dss", }, + { .compatible = "ti,omap3-dss", }, + { .compatible = "ti,omap4-dss", }, + { .compatible = "ti,omap5-dss", }, + { .compatible = "ti,dra7-dss", }, + {}, +}; + +static int __init omapdss_boot_init(void) +{ + struct device_node *dss, *child; + + INIT_LIST_HEAD(&dss_conv_list); + + dss = of_find_matching_node(NULL, omapdss_of_match); + + if (dss == NULL || !of_device_is_available(dss)) + return 0; + + omapdss_walk_device(dss, true); + + for_each_available_child_of_node(dss, child) { + if (!of_find_property(child, "compatible", NULL)) + continue; + + omapdss_walk_device(child, true); + } + + while (!list_empty(&dss_conv_list)) { + struct dss_conv_node *n; + + n = list_first_entry(&dss_conv_list, struct dss_conv_node, + list); + + if (!n->root) + omapdss_omapify_node(n->node); + + list_del(&n->list); + of_node_put(n->node); + kfree(n); + } + + return 0; +} + +subsys_initcall(omapdss_boot_init); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c new file mode 100644 index 000000000000..16072159bd24 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -0,0 +1,267 @@ +/* + * Copyright (C) 2012 Texas Instruments Ltd + * Author: Archit Taneja <archit@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of.h> + +#include <video/omapdss.h> + +#include "dss.h" + +static LIST_HEAD(output_list); +static DEFINE_MUTEX(output_lock); + +int omapdss_output_set_device(struct omap_dss_device *out, + struct omap_dss_device *dssdev) +{ + int r; + + mutex_lock(&output_lock); + + if (out->dst) { + DSSERR("output already has device %s connected to it\n", + out->dst->name); + r = -EINVAL; + goto err; + } + + if (out->output_type != dssdev->type) { + DSSERR("output type and display type don't match\n"); + r = -EINVAL; + goto err; + } + + out->dst = dssdev; + dssdev->src = out; + + mutex_unlock(&output_lock); + + return 0; +err: + mutex_unlock(&output_lock); + + return r; +} +EXPORT_SYMBOL(omapdss_output_set_device); + +int omapdss_output_unset_device(struct omap_dss_device *out) +{ + int r; + + mutex_lock(&output_lock); + + if (!out->dst) { + DSSERR("output doesn't have a device connected to it\n"); + r = -EINVAL; + goto err; + } + + if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) { + DSSERR("device %s is not disabled, cannot unset device\n", + out->dst->name); + r = -EINVAL; + goto err; + } + + out->dst->src = NULL; + out->dst = NULL; + + mutex_unlock(&output_lock); + + return 0; +err: + mutex_unlock(&output_lock); + + return r; +} +EXPORT_SYMBOL(omapdss_output_unset_device); + +int omapdss_register_output(struct omap_dss_device *out) +{ + list_add_tail(&out->list, &output_list); + return 0; +} +EXPORT_SYMBOL(omapdss_register_output); + +void omapdss_unregister_output(struct omap_dss_device *out) +{ + list_del(&out->list); +} +EXPORT_SYMBOL(omapdss_unregister_output); + +struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) +{ + struct omap_dss_device *out; + + list_for_each_entry(out, &output_list, list) { + if (out->id == id) + return out; + } + + return NULL; +} +EXPORT_SYMBOL(omap_dss_get_output); + +struct omap_dss_device *omap_dss_find_output(const char *name) +{ + struct omap_dss_device *out; + + list_for_each_entry(out, &output_list, list) { + if (strcmp(out->name, name) == 0) + return omap_dss_get_device(out); + } + + return NULL; +} +EXPORT_SYMBOL(omap_dss_find_output); + +struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port) +{ + struct device_node *src_node; + struct omap_dss_device *out; + u32 reg; + + src_node = dss_of_port_get_parent_device(port); + if (!src_node) + return NULL; + + reg = dss_of_port_get_port_number(port); + + list_for_each_entry(out, &output_list, list) { + if (out->dev->of_node == src_node && out->port_num == reg) { + of_node_put(src_node); + return omap_dss_get_device(out); + } + } + + of_node_put(src_node); + + return NULL; +} +EXPORT_SYMBOL(omap_dss_find_output_by_port_node); + +struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev) +{ + while (dssdev->src) + dssdev = dssdev->src; + + if (dssdev->id != 0) + return omap_dss_get_device(dssdev); + + return NULL; +} +EXPORT_SYMBOL(omapdss_find_output_from_display); + +struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev) +{ + struct omap_dss_device *out; + struct omap_overlay_manager *mgr; + + out = omapdss_find_output_from_display(dssdev); + + if (out == NULL) + return NULL; + + mgr = out->manager; + + omap_dss_put_device(out); + + return mgr; +} +EXPORT_SYMBOL(omapdss_find_mgr_from_display); + +static const struct dss_mgr_ops *dss_mgr_ops; + +int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops) +{ + if (dss_mgr_ops) + return -EBUSY; + + dss_mgr_ops = mgr_ops; + + return 0; +} +EXPORT_SYMBOL(dss_install_mgr_ops); + +void dss_uninstall_mgr_ops(void) +{ + dss_mgr_ops = NULL; +} +EXPORT_SYMBOL(dss_uninstall_mgr_ops); + +int dss_mgr_connect(struct omap_overlay_manager *mgr, + struct omap_dss_device *dst) +{ + return dss_mgr_ops->connect(mgr, dst); +} +EXPORT_SYMBOL(dss_mgr_connect); + +void dss_mgr_disconnect(struct omap_overlay_manager *mgr, + struct omap_dss_device *dst) +{ + dss_mgr_ops->disconnect(mgr, dst); +} +EXPORT_SYMBOL(dss_mgr_disconnect); + +void dss_mgr_set_timings(struct omap_overlay_manager *mgr, + const struct omap_video_timings *timings) +{ + dss_mgr_ops->set_timings(mgr, timings); +} +EXPORT_SYMBOL(dss_mgr_set_timings); + +void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + dss_mgr_ops->set_lcd_config(mgr, config); +} +EXPORT_SYMBOL(dss_mgr_set_lcd_config); + +int dss_mgr_enable(struct omap_overlay_manager *mgr) +{ + return dss_mgr_ops->enable(mgr); +} +EXPORT_SYMBOL(dss_mgr_enable); + +void dss_mgr_disable(struct omap_overlay_manager *mgr) +{ + dss_mgr_ops->disable(mgr); +} +EXPORT_SYMBOL(dss_mgr_disable); + +void dss_mgr_start_update(struct omap_overlay_manager *mgr) +{ + dss_mgr_ops->start_update(mgr); +} +EXPORT_SYMBOL(dss_mgr_start_update); + +int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr, + void (*handler)(void *), void *data) +{ + return dss_mgr_ops->register_framedone_handler(mgr, handler, data); +} +EXPORT_SYMBOL(dss_mgr_register_framedone_handler); + +void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr, + void (*handler)(void *), void *data) +{ + dss_mgr_ops->unregister_framedone_handler(mgr, handler, data); +} +EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler); diff --git a/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c b/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c new file mode 100644 index 000000000000..4cc5ddebfb34 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c @@ -0,0 +1,456 @@ +/* + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "OVERLAY" + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/sysfs.h> +#include <linux/kobject.h> +#include <linux/platform_device.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name); +} + +static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", + ovl->manager ? ovl->manager->name : "<none>"); +} + +static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf, + size_t size) +{ + int i, r; + struct omap_overlay_manager *mgr = NULL; + struct omap_overlay_manager *old_mgr; + int len = size; + + if (buf[size-1] == '\n') + --len; + + if (len > 0) { + for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) { + mgr = omap_dss_get_overlay_manager(i); + + if (sysfs_streq(buf, mgr->name)) + break; + + mgr = NULL; + } + } + + if (len > 0 && mgr == NULL) + return -EINVAL; + + if (mgr) + DSSDBG("manager %s found\n", mgr->name); + + if (mgr == ovl->manager) + return size; + + old_mgr = ovl->manager; + + r = dispc_runtime_get(); + if (r) + return r; + + /* detach old manager */ + if (old_mgr) { + r = ovl->unset_manager(ovl); + if (r) { + DSSERR("detach failed\n"); + goto err; + } + + r = old_mgr->apply(old_mgr); + if (r) + goto err; + } + + if (mgr) { + r = ovl->set_manager(ovl, mgr); + if (r) { + DSSERR("Failed to attach overlay\n"); + goto err; + } + + r = mgr->apply(mgr); + if (r) + goto err; + } + + dispc_runtime_put(); + + return size; + +err: + dispc_runtime_put(); + return r; +} + +static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + return snprintf(buf, PAGE_SIZE, "%d,%d\n", + info.width, info.height); +} + +static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width); +} + +static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + return snprintf(buf, PAGE_SIZE, "%d,%d\n", + info.pos_x, info.pos_y); +} + +static ssize_t overlay_position_store(struct omap_overlay *ovl, + const char *buf, size_t size) +{ + int r; + char *last; + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + info.pos_x = simple_strtoul(buf, &last, 10); + ++last; + if (last - buf >= size) + return -EINVAL; + + info.pos_y = simple_strtoul(last, &last, 10); + + r = ovl->set_overlay_info(ovl, &info); + if (r) + return r; + + if (ovl->manager) { + r = ovl->manager->apply(ovl->manager); + if (r) + return r; + } + + return size; +} + +static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + return snprintf(buf, PAGE_SIZE, "%d,%d\n", + info.out_width, info.out_height); +} + +static ssize_t overlay_output_size_store(struct omap_overlay *ovl, + const char *buf, size_t size) +{ + int r; + char *last; + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + info.out_width = simple_strtoul(buf, &last, 10); + ++last; + if (last - buf >= size) + return -EINVAL; + + info.out_height = simple_strtoul(last, &last, 10); + + r = ovl->set_overlay_info(ovl, &info); + if (r) + return r; + + if (ovl->manager) { + r = ovl->manager->apply(ovl->manager); + if (r) + return r; + } + + return size; +} + +static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl)); +} + +static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf, + size_t size) +{ + int r; + bool enable; + + r = strtobool(buf, &enable); + if (r) + return r; + + if (enable) + r = ovl->enable(ovl); + else + r = ovl->disable(ovl); + + if (r) + return r; + + return size; +} + +static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + return snprintf(buf, PAGE_SIZE, "%d\n", + info.global_alpha); +} + +static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, + const char *buf, size_t size) +{ + int r; + u8 alpha; + struct omap_overlay_info info; + + if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0) + return -ENODEV; + + r = kstrtou8(buf, 0, &alpha); + if (r) + return r; + + ovl->get_overlay_info(ovl, &info); + + info.global_alpha = alpha; + + r = ovl->set_overlay_info(ovl, &info); + if (r) + return r; + + if (ovl->manager) { + r = ovl->manager->apply(ovl->manager); + if (r) + return r; + } + + return size; +} + +static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl, + char *buf) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + return snprintf(buf, PAGE_SIZE, "%d\n", + info.pre_mult_alpha); +} + +static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl, + const char *buf, size_t size) +{ + int r; + u8 alpha; + struct omap_overlay_info info; + + if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0) + return -ENODEV; + + r = kstrtou8(buf, 0, &alpha); + if (r) + return r; + + ovl->get_overlay_info(ovl, &info); + + info.pre_mult_alpha = alpha; + + r = ovl->set_overlay_info(ovl, &info); + if (r) + return r; + + if (ovl->manager) { + r = ovl->manager->apply(ovl->manager); + if (r) + return r; + } + + return size; +} + +static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf) +{ + struct omap_overlay_info info; + + ovl->get_overlay_info(ovl, &info); + + return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder); +} + +static ssize_t overlay_zorder_store(struct omap_overlay *ovl, + const char *buf, size_t size) +{ + int r; + u8 zorder; + struct omap_overlay_info info; + + if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0) + return -ENODEV; + + r = kstrtou8(buf, 0, &zorder); + if (r) + return r; + + ovl->get_overlay_info(ovl, &info); + + info.zorder = zorder; + + r = ovl->set_overlay_info(ovl, &info); + if (r) + return r; + + if (ovl->manager) { + r = ovl->manager->apply(ovl->manager); + if (r) + return r; + } + + return size; +} + +struct overlay_attribute { + struct attribute attr; + ssize_t (*show)(struct omap_overlay *, char *); + ssize_t (*store)(struct omap_overlay *, const char *, size_t); +}; + +#define OVERLAY_ATTR(_name, _mode, _show, _store) \ + struct overlay_attribute overlay_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL); +static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR, + overlay_manager_show, overlay_manager_store); +static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL); +static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL); +static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR, + overlay_position_show, overlay_position_store); +static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR, + overlay_output_size_show, overlay_output_size_store); +static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR, + overlay_enabled_show, overlay_enabled_store); +static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR, + overlay_global_alpha_show, overlay_global_alpha_store); +static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR, + overlay_pre_mult_alpha_show, + overlay_pre_mult_alpha_store); +static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR, + overlay_zorder_show, overlay_zorder_store); + +static struct attribute *overlay_sysfs_attrs[] = { + &overlay_attr_name.attr, + &overlay_attr_manager.attr, + &overlay_attr_input_size.attr, + &overlay_attr_screen_width.attr, + &overlay_attr_position.attr, + &overlay_attr_output_size.attr, + &overlay_attr_enabled.attr, + &overlay_attr_global_alpha.attr, + &overlay_attr_pre_mult_alpha.attr, + &overlay_attr_zorder.attr, + NULL +}; + +static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct omap_overlay *overlay; + struct overlay_attribute *overlay_attr; + + overlay = container_of(kobj, struct omap_overlay, kobj); + overlay_attr = container_of(attr, struct overlay_attribute, attr); + + if (!overlay_attr->show) + return -ENOENT; + + return overlay_attr->show(overlay, buf); +} + +static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t size) +{ + struct omap_overlay *overlay; + struct overlay_attribute *overlay_attr; + + overlay = container_of(kobj, struct omap_overlay, kobj); + overlay_attr = container_of(attr, struct overlay_attribute, attr); + + if (!overlay_attr->store) + return -ENOENT; + + return overlay_attr->store(overlay, buf, size); +} + +static const struct sysfs_ops overlay_sysfs_ops = { + .show = overlay_attr_show, + .store = overlay_attr_store, +}; + +static struct kobj_type overlay_ktype = { + .sysfs_ops = &overlay_sysfs_ops, + .default_attrs = overlay_sysfs_attrs, +}; + +int dss_overlay_kobj_init(struct omap_overlay *ovl, + struct platform_device *pdev) +{ + return kobject_init_and_add(&ovl->kobj, &overlay_ktype, + &pdev->dev.kobj, "overlay%d", ovl->id); +} + +void dss_overlay_kobj_uninit(struct omap_overlay *ovl) +{ + kobject_del(&ovl->kobj); + kobject_put(&ovl->kobj); +} diff --git a/drivers/gpu/drm/omapdrm/dss/overlay.c b/drivers/gpu/drm/omapdrm/dss/overlay.c new file mode 100644 index 000000000000..2f7cee985cdd --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/overlay.c @@ -0,0 +1,202 @@ +/* + * linux/drivers/video/omap2/dss/overlay.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "OVERLAY" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/sysfs.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/slab.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +static int num_overlays; +static struct omap_overlay *overlays; + +int omap_dss_get_num_overlays(void) +{ + return num_overlays; +} +EXPORT_SYMBOL(omap_dss_get_num_overlays); + +struct omap_overlay *omap_dss_get_overlay(int num) +{ + if (num >= num_overlays) + return NULL; + + return &overlays[num]; +} +EXPORT_SYMBOL(omap_dss_get_overlay); + +void dss_init_overlays(struct platform_device *pdev) +{ + int i, r; + + num_overlays = dss_feat_get_num_ovls(); + + overlays = kzalloc(sizeof(struct omap_overlay) * num_overlays, + GFP_KERNEL); + + BUG_ON(overlays == NULL); + + for (i = 0; i < num_overlays; ++i) { + struct omap_overlay *ovl = &overlays[i]; + + switch (i) { + case 0: + ovl->name = "gfx"; + ovl->id = OMAP_DSS_GFX; + break; + case 1: + ovl->name = "vid1"; + ovl->id = OMAP_DSS_VIDEO1; + break; + case 2: + ovl->name = "vid2"; + ovl->id = OMAP_DSS_VIDEO2; + break; + case 3: + ovl->name = "vid3"; + ovl->id = OMAP_DSS_VIDEO3; + break; + } + + ovl->caps = dss_feat_get_overlay_caps(ovl->id); + ovl->supported_modes = + dss_feat_get_supported_color_modes(ovl->id); + + r = dss_overlay_kobj_init(ovl, pdev); + if (r) + DSSERR("failed to create sysfs file\n"); + } +} + +void dss_uninit_overlays(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < num_overlays; ++i) { + struct omap_overlay *ovl = &overlays[i]; + dss_overlay_kobj_uninit(ovl); + } + + kfree(overlays); + overlays = NULL; + num_overlays = 0; +} + +int dss_ovl_simple_check(struct omap_overlay *ovl, + const struct omap_overlay_info *info) +{ + if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { + if (info->out_width != 0 && info->width != info->out_width) { + DSSERR("check_overlay: overlay %d doesn't support " + "scaling\n", ovl->id); + return -EINVAL; + } + + if (info->out_height != 0 && info->height != info->out_height) { + DSSERR("check_overlay: overlay %d doesn't support " + "scaling\n", ovl->id); + return -EINVAL; + } + } + + if ((ovl->supported_modes & info->color_mode) == 0) { + DSSERR("check_overlay: overlay %d doesn't support mode %d\n", + ovl->id, info->color_mode); + return -EINVAL; + } + + if (info->zorder >= omap_dss_get_num_overlays()) { + DSSERR("check_overlay: zorder %d too high\n", info->zorder); + return -EINVAL; + } + + if (dss_feat_rotation_type_supported(info->rotation_type) == 0) { + DSSERR("check_overlay: rotation type %d not supported\n", + info->rotation_type); + return -EINVAL; + } + + return 0; +} + +int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, + const struct omap_video_timings *mgr_timings) +{ + u16 outw, outh; + u16 dw, dh; + + dw = mgr_timings->x_res; + dh = mgr_timings->y_res; + + if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { + outw = info->width; + outh = info->height; + } else { + if (info->out_width == 0) + outw = info->width; + else + outw = info->out_width; + + if (info->out_height == 0) + outh = info->height; + else + outh = info->out_height; + } + + if (dw < info->pos_x + outw) { + DSSERR("overlay %d horizontally not inside the display area " + "(%d + %d >= %d)\n", + ovl->id, info->pos_x, outw, dw); + return -EINVAL; + } + + if (dh < info->pos_y + outh) { + DSSERR("overlay %d vertically not inside the display area " + "(%d + %d >= %d)\n", + ovl->id, info->pos_y, outh, dh); + return -EINVAL; + } + + return 0; +} + +/* + * Checks if replication logic should be used. Only use when overlay is in + * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp + */ +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode) +{ + if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) + return false; + + return config.video_port_width > 16; +} diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c new file mode 100644 index 000000000000..f974ddcd3b6e --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -0,0 +1,389 @@ +/* + * Copyright (C) 2014 Texas Instruments Incorporated + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "PLL" + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/regulator/consumer.h> +#include <linux/sched.h> + +#include <video/omapdss.h> + +#include "dss.h" + +#define PLL_CONTROL 0x0000 +#define PLL_STATUS 0x0004 +#define PLL_GO 0x0008 +#define PLL_CONFIGURATION1 0x000C +#define PLL_CONFIGURATION2 0x0010 +#define PLL_CONFIGURATION3 0x0014 +#define PLL_SSC_CONFIGURATION1 0x0018 +#define PLL_SSC_CONFIGURATION2 0x001C +#define PLL_CONFIGURATION4 0x0020 + +static struct dss_pll *dss_plls[4]; + +int dss_pll_register(struct dss_pll *pll) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) { + if (!dss_plls[i]) { + dss_plls[i] = pll; + return 0; + } + } + + return -EBUSY; +} + +void dss_pll_unregister(struct dss_pll *pll) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) { + if (dss_plls[i] == pll) { + dss_plls[i] = NULL; + return; + } + } +} + +struct dss_pll *dss_pll_find(const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) { + if (dss_plls[i] && strcmp(dss_plls[i]->name, name) == 0) + return dss_plls[i]; + } + + return NULL; +} + +int dss_pll_enable(struct dss_pll *pll) +{ + int r; + + r = clk_prepare_enable(pll->clkin); + if (r) + return r; + + if (pll->regulator) { + r = regulator_enable(pll->regulator); + if (r) + goto err_reg; + } + + r = pll->ops->enable(pll); + if (r) + goto err_enable; + + return 0; + +err_enable: + if (pll->regulator) + regulator_disable(pll->regulator); +err_reg: + clk_disable_unprepare(pll->clkin); + return r; +} + +void dss_pll_disable(struct dss_pll *pll) +{ + pll->ops->disable(pll); + + if (pll->regulator) + regulator_disable(pll->regulator); + + clk_disable_unprepare(pll->clkin); + + memset(&pll->cinfo, 0, sizeof(pll->cinfo)); +} + +int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo) +{ + int r; + + r = pll->ops->set_config(pll, cinfo); + if (r) + return r; + + pll->cinfo = *cinfo; + + return 0; +} + +bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, + unsigned long out_min, unsigned long out_max, + dss_hsdiv_calc_func func, void *data) +{ + const struct dss_pll_hw *hw = pll->hw; + int m, m_start, m_stop; + unsigned long out; + + out_min = out_min ? out_min : 1; + out_max = out_max ? out_max : ULONG_MAX; + + m_start = max(DIV_ROUND_UP(clkdco, out_max), 1ul); + + m_stop = min((unsigned)(clkdco / out_min), hw->mX_max); + + for (m = m_start; m <= m_stop; ++m) { + out = clkdco / m; + + if (func(m, out, data)) + return true; + } + + return false; +} + +bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, + unsigned long pll_min, unsigned long pll_max, + dss_pll_calc_func func, void *data) +{ + const struct dss_pll_hw *hw = pll->hw; + int n, n_start, n_stop; + int m, m_start, m_stop; + unsigned long fint, clkdco; + unsigned long pll_hw_max; + unsigned long fint_hw_min, fint_hw_max; + + pll_hw_max = hw->clkdco_max; + + fint_hw_min = hw->fint_min; + fint_hw_max = hw->fint_max; + + n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul); + n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max); + + pll_max = pll_max ? pll_max : ULONG_MAX; + + for (n = n_start; n <= n_stop; ++n) { + fint = clkin / n; + + m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2), + 1ul); + m_stop = min3((unsigned)(pll_max / fint / 2), + (unsigned)(pll_hw_max / fint / 2), + hw->m_max); + + for (m = m_start; m <= m_stop; ++m) { + clkdco = 2 * m * fint; + + if (func(n, m, fint, clkdco, data)) + return true; + } + } + + return false; +} + +static int wait_for_bit_change(void __iomem *reg, int bitnum, int value) +{ + unsigned long timeout; + ktime_t wait; + int t; + + /* first busyloop to see if the bit changes right away */ + t = 100; + while (t-- > 0) { + if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value) + return value; + } + + /* then loop for 500ms, sleeping for 1ms in between */ + timeout = jiffies + msecs_to_jiffies(500); + while (time_before(jiffies, timeout)) { + if (FLD_GET(readl_relaxed(reg), bitnum, bitnum) == value) + return value; + + wait = ns_to_ktime(1000 * 1000); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&wait, HRTIMER_MODE_REL); + } + + return !value; +} + +int dss_pll_wait_reset_done(struct dss_pll *pll) +{ + void __iomem *base = pll->base; + + if (wait_for_bit_change(base + PLL_STATUS, 0, 1) != 1) + return -ETIMEDOUT; + else + return 0; +} + +static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask) +{ + int t = 100; + + while (t-- > 0) { + u32 v = readl_relaxed(pll->base + PLL_STATUS); + v &= hsdiv_ack_mask; + if (v == hsdiv_ack_mask) + return 0; + } + + return -ETIMEDOUT; +} + +int dss_pll_write_config_type_a(struct dss_pll *pll, + const struct dss_pll_clock_info *cinfo) +{ + const struct dss_pll_hw *hw = pll->hw; + void __iomem *base = pll->base; + int r = 0; + u32 l; + + l = 0; + if (hw->has_stopmode) + l = FLD_MOD(l, 1, 0, 0); /* PLL_STOPMODE */ + l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */ + l = FLD_MOD(l, cinfo->m, hw->m_msb, hw->m_lsb); /* PLL_REGM */ + /* M4 */ + l = FLD_MOD(l, cinfo->mX[0] ? cinfo->mX[0] - 1 : 0, + hw->mX_msb[0], hw->mX_lsb[0]); + /* M5 */ + l = FLD_MOD(l, cinfo->mX[1] ? cinfo->mX[1] - 1 : 0, + hw->mX_msb[1], hw->mX_lsb[1]); + writel_relaxed(l, base + PLL_CONFIGURATION1); + + l = 0; + /* M6 */ + l = FLD_MOD(l, cinfo->mX[2] ? cinfo->mX[2] - 1 : 0, + hw->mX_msb[2], hw->mX_lsb[2]); + /* M7 */ + l = FLD_MOD(l, cinfo->mX[3] ? cinfo->mX[3] - 1 : 0, + hw->mX_msb[3], hw->mX_lsb[3]); + writel_relaxed(l, base + PLL_CONFIGURATION3); + + l = readl_relaxed(base + PLL_CONFIGURATION2); + if (hw->has_freqsel) { + u32 f = cinfo->fint < 1000000 ? 0x3 : + cinfo->fint < 1250000 ? 0x4 : + cinfo->fint < 1500000 ? 0x5 : + cinfo->fint < 1750000 ? 0x6 : + 0x7; + + l = FLD_MOD(l, f, 4, 1); /* PLL_FREQSEL */ + } else if (hw->has_selfreqdco) { + u32 f = cinfo->clkdco < hw->clkdco_low ? 0x2 : 0x4; + + l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */ + } + l = FLD_MOD(l, 1, 13, 13); /* PLL_REFEN */ + l = FLD_MOD(l, 0, 14, 14); /* PHY_CLKINEN */ + l = FLD_MOD(l, 0, 16, 16); /* M4_CLOCK_EN */ + l = FLD_MOD(l, 0, 18, 18); /* M5_CLOCK_EN */ + l = FLD_MOD(l, 1, 20, 20); /* HSDIVBYPASS */ + if (hw->has_refsel) + l = FLD_MOD(l, 3, 22, 21); /* REFSEL = sysclk */ + l = FLD_MOD(l, 0, 23, 23); /* M6_CLOCK_EN */ + l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */ + writel_relaxed(l, base + PLL_CONFIGURATION2); + + writel_relaxed(1, base + PLL_GO); /* PLL_GO */ + + if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) { + DSSERR("DSS DPLL GO bit not going down.\n"); + r = -EIO; + goto err; + } + + if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) { + DSSERR("cannot lock DSS DPLL\n"); + r = -EIO; + goto err; + } + + l = readl_relaxed(base + PLL_CONFIGURATION2); + l = FLD_MOD(l, 1, 14, 14); /* PHY_CLKINEN */ + l = FLD_MOD(l, cinfo->mX[0] ? 1 : 0, 16, 16); /* M4_CLOCK_EN */ + l = FLD_MOD(l, cinfo->mX[1] ? 1 : 0, 18, 18); /* M5_CLOCK_EN */ + l = FLD_MOD(l, 0, 20, 20); /* HSDIVBYPASS */ + l = FLD_MOD(l, cinfo->mX[2] ? 1 : 0, 23, 23); /* M6_CLOCK_EN */ + l = FLD_MOD(l, cinfo->mX[3] ? 1 : 0, 25, 25); /* M7_CLOCK_EN */ + writel_relaxed(l, base + PLL_CONFIGURATION2); + + r = dss_wait_hsdiv_ack(pll, + (cinfo->mX[0] ? BIT(7) : 0) | + (cinfo->mX[1] ? BIT(8) : 0) | + (cinfo->mX[2] ? BIT(10) : 0) | + (cinfo->mX[3] ? BIT(11) : 0)); + if (r) { + DSSERR("failed to enable HSDIV clocks\n"); + goto err; + } + +err: + return r; +} + +int dss_pll_write_config_type_b(struct dss_pll *pll, + const struct dss_pll_clock_info *cinfo) +{ + const struct dss_pll_hw *hw = pll->hw; + void __iomem *base = pll->base; + u32 l; + + l = 0; + l = FLD_MOD(l, cinfo->m, 20, 9); /* PLL_REGM */ + l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */ + writel_relaxed(l, base + PLL_CONFIGURATION1); + + l = readl_relaxed(base + PLL_CONFIGURATION2); + l = FLD_MOD(l, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */ + l = FLD_MOD(l, 0x1, 13, 13); /* PLL_REFEN */ + l = FLD_MOD(l, 0x0, 14, 14); /* PHY_CLKINEN */ + if (hw->has_refsel) + l = FLD_MOD(l, 0x3, 22, 21); /* REFSEL = SYSCLK */ + + /* PLL_SELFREQDCO */ + if (cinfo->clkdco > hw->clkdco_low) + l = FLD_MOD(l, 0x4, 3, 1); + else + l = FLD_MOD(l, 0x2, 3, 1); + writel_relaxed(l, base + PLL_CONFIGURATION2); + + l = readl_relaxed(base + PLL_CONFIGURATION3); + l = FLD_MOD(l, cinfo->sd, 17, 10); /* PLL_REGSD */ + writel_relaxed(l, base + PLL_CONFIGURATION3); + + l = readl_relaxed(base + PLL_CONFIGURATION4); + l = FLD_MOD(l, cinfo->mX[0], 24, 18); /* PLL_REGM2 */ + l = FLD_MOD(l, cinfo->mf, 17, 0); /* PLL_REGM_F */ + writel_relaxed(l, base + PLL_CONFIGURATION4); + + writel_relaxed(1, base + PLL_GO); /* PLL_GO */ + + if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) { + DSSERR("DSS DPLL GO bit not going down.\n"); + return -EIO; + } + + if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) { + DSSERR("cannot lock DSS DPLL\n"); + return -ETIMEDOUT; + } + + return 0; +} diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c new file mode 100644 index 000000000000..aea6a1d0fb20 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c @@ -0,0 +1,1078 @@ +/* + * linux/drivers/video/omap2/dss/rfbi.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * Some code and ideas taken from drivers/video/omap/ driver + * by Imre Deak. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "RFBI" + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/export.h> +#include <linux/vmalloc.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/kfifo.h> +#include <linux/ktime.h> +#include <linux/hrtimer.h> +#include <linux/seq_file.h> +#include <linux/semaphore.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/component.h> + +#include <video/omapdss.h> +#include "dss.h" + +struct rfbi_reg { u16 idx; }; + +#define RFBI_REG(idx) ((const struct rfbi_reg) { idx }) + +#define RFBI_REVISION RFBI_REG(0x0000) +#define RFBI_SYSCONFIG RFBI_REG(0x0010) +#define RFBI_SYSSTATUS RFBI_REG(0x0014) +#define RFBI_CONTROL RFBI_REG(0x0040) +#define RFBI_PIXEL_CNT RFBI_REG(0x0044) +#define RFBI_LINE_NUMBER RFBI_REG(0x0048) +#define RFBI_CMD RFBI_REG(0x004c) +#define RFBI_PARAM RFBI_REG(0x0050) +#define RFBI_DATA RFBI_REG(0x0054) +#define RFBI_READ RFBI_REG(0x0058) +#define RFBI_STATUS RFBI_REG(0x005c) + +#define RFBI_CONFIG(n) RFBI_REG(0x0060 + (n)*0x18) +#define RFBI_ONOFF_TIME(n) RFBI_REG(0x0064 + (n)*0x18) +#define RFBI_CYCLE_TIME(n) RFBI_REG(0x0068 + (n)*0x18) +#define RFBI_DATA_CYCLE1(n) RFBI_REG(0x006c + (n)*0x18) +#define RFBI_DATA_CYCLE2(n) RFBI_REG(0x0070 + (n)*0x18) +#define RFBI_DATA_CYCLE3(n) RFBI_REG(0x0074 + (n)*0x18) + +#define RFBI_VSYNC_WIDTH RFBI_REG(0x0090) +#define RFBI_HSYNC_WIDTH RFBI_REG(0x0094) + +#define REG_FLD_MOD(idx, val, start, end) \ + rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end)) + +enum omap_rfbi_cycleformat { + OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0, + OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1, + OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2, + OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3, +}; + +enum omap_rfbi_datatype { + OMAP_DSS_RFBI_DATATYPE_12 = 0, + OMAP_DSS_RFBI_DATATYPE_16 = 1, + OMAP_DSS_RFBI_DATATYPE_18 = 2, + OMAP_DSS_RFBI_DATATYPE_24 = 3, +}; + +enum omap_rfbi_parallelmode { + OMAP_DSS_RFBI_PARALLELMODE_8 = 0, + OMAP_DSS_RFBI_PARALLELMODE_9 = 1, + OMAP_DSS_RFBI_PARALLELMODE_12 = 2, + OMAP_DSS_RFBI_PARALLELMODE_16 = 3, +}; + +static int rfbi_convert_timings(struct rfbi_timings *t); +static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div); + +static struct { + struct platform_device *pdev; + void __iomem *base; + + unsigned long l4_khz; + + enum omap_rfbi_datatype datatype; + enum omap_rfbi_parallelmode parallelmode; + + enum omap_rfbi_te_mode te_mode; + int te_enabled; + + void (*framedone_callback)(void *data); + void *framedone_callback_data; + + struct omap_dss_device *dssdev[2]; + + struct semaphore bus_lock; + + struct omap_video_timings timings; + int pixel_size; + int data_lines; + struct rfbi_timings intf_timings; + + struct omap_dss_device output; +} rfbi; + +static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val) +{ + __raw_writel(val, rfbi.base + idx.idx); +} + +static inline u32 rfbi_read_reg(const struct rfbi_reg idx) +{ + return __raw_readl(rfbi.base + idx.idx); +} + +static int rfbi_runtime_get(void) +{ + int r; + + DSSDBG("rfbi_runtime_get\n"); + + r = pm_runtime_get_sync(&rfbi.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void rfbi_runtime_put(void) +{ + int r; + + DSSDBG("rfbi_runtime_put\n"); + + r = pm_runtime_put_sync(&rfbi.pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} + +static void rfbi_bus_lock(void) +{ + down(&rfbi.bus_lock); +} + +static void rfbi_bus_unlock(void) +{ + up(&rfbi.bus_lock); +} + +static void rfbi_write_command(const void *buf, u32 len) +{ + switch (rfbi.parallelmode) { + case OMAP_DSS_RFBI_PARALLELMODE_8: + { + const u8 *b = buf; + for (; len; len--) + rfbi_write_reg(RFBI_CMD, *b++); + break; + } + + case OMAP_DSS_RFBI_PARALLELMODE_16: + { + const u16 *w = buf; + BUG_ON(len & 1); + for (; len; len -= 2) + rfbi_write_reg(RFBI_CMD, *w++); + break; + } + + case OMAP_DSS_RFBI_PARALLELMODE_9: + case OMAP_DSS_RFBI_PARALLELMODE_12: + default: + BUG(); + } +} + +static void rfbi_read_data(void *buf, u32 len) +{ + switch (rfbi.parallelmode) { + case OMAP_DSS_RFBI_PARALLELMODE_8: + { + u8 *b = buf; + for (; len; len--) { + rfbi_write_reg(RFBI_READ, 0); + *b++ = rfbi_read_reg(RFBI_READ); + } + break; + } + + case OMAP_DSS_RFBI_PARALLELMODE_16: + { + u16 *w = buf; + BUG_ON(len & ~1); + for (; len; len -= 2) { + rfbi_write_reg(RFBI_READ, 0); + *w++ = rfbi_read_reg(RFBI_READ); + } + break; + } + + case OMAP_DSS_RFBI_PARALLELMODE_9: + case OMAP_DSS_RFBI_PARALLELMODE_12: + default: + BUG(); + } +} + +static void rfbi_write_data(const void *buf, u32 len) +{ + switch (rfbi.parallelmode) { + case OMAP_DSS_RFBI_PARALLELMODE_8: + { + const u8 *b = buf; + for (; len; len--) + rfbi_write_reg(RFBI_PARAM, *b++); + break; + } + + case OMAP_DSS_RFBI_PARALLELMODE_16: + { + const u16 *w = buf; + BUG_ON(len & 1); + for (; len; len -= 2) + rfbi_write_reg(RFBI_PARAM, *w++); + break; + } + + case OMAP_DSS_RFBI_PARALLELMODE_9: + case OMAP_DSS_RFBI_PARALLELMODE_12: + default: + BUG(); + + } +} + +static void rfbi_write_pixels(const void __iomem *buf, int scr_width, + u16 x, u16 y, + u16 w, u16 h) +{ + int start_offset = scr_width * y + x; + int horiz_offset = scr_width - w; + int i; + + if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && + rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { + const u16 __iomem *pd = buf; + pd += start_offset; + + for (; h; --h) { + for (i = 0; i < w; ++i) { + const u8 __iomem *b = (const u8 __iomem *)pd; + rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1)); + rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0)); + ++pd; + } + pd += horiz_offset; + } + } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 && + rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { + const u32 __iomem *pd = buf; + pd += start_offset; + + for (; h; --h) { + for (i = 0; i < w; ++i) { + const u8 __iomem *b = (const u8 __iomem *)pd; + rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2)); + rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1)); + rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0)); + ++pd; + } + pd += horiz_offset; + } + } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && + rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) { + const u16 __iomem *pd = buf; + pd += start_offset; + + for (; h; --h) { + for (i = 0; i < w; ++i) { + rfbi_write_reg(RFBI_PARAM, __raw_readw(pd)); + ++pd; + } + pd += horiz_offset; + } + } else { + BUG(); + } +} + +static int rfbi_transfer_area(struct omap_dss_device *dssdev, + void (*callback)(void *data), void *data) +{ + u32 l; + int r; + struct omap_overlay_manager *mgr = rfbi.output.manager; + u16 width = rfbi.timings.x_res; + u16 height = rfbi.timings.y_res; + + /*BUG_ON(callback == 0);*/ + BUG_ON(rfbi.framedone_callback != NULL); + + DSSDBG("rfbi_transfer_area %dx%d\n", width, height); + + dss_mgr_set_timings(mgr, &rfbi.timings); + + r = dss_mgr_enable(mgr); + if (r) + return r; + + rfbi.framedone_callback = callback; + rfbi.framedone_callback_data = data; + + rfbi_write_reg(RFBI_PIXEL_CNT, width * height); + + l = rfbi_read_reg(RFBI_CONTROL); + l = FLD_MOD(l, 1, 0, 0); /* enable */ + if (!rfbi.te_enabled) + l = FLD_MOD(l, 1, 4, 4); /* ITE */ + + rfbi_write_reg(RFBI_CONTROL, l); + + return 0; +} + +static void framedone_callback(void *data) +{ + void (*callback)(void *data); + + DSSDBG("FRAMEDONE\n"); + + REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0); + + callback = rfbi.framedone_callback; + rfbi.framedone_callback = NULL; + + if (callback != NULL) + callback(rfbi.framedone_callback_data); +} + +#if 1 /* VERBOSE */ +static void rfbi_print_timings(void) +{ + u32 l; + u32 time; + + l = rfbi_read_reg(RFBI_CONFIG(0)); + time = 1000000000 / rfbi.l4_khz; + if (l & (1 << 4)) + time *= 2; + + DSSDBG("Tick time %u ps\n", time); + l = rfbi_read_reg(RFBI_ONOFF_TIME(0)); + DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, " + "REONTIME %d, REOFFTIME %d\n", + l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f, + (l >> 20) & 0x0f, (l >> 24) & 0x3f); + + l = rfbi_read_reg(RFBI_CYCLE_TIME(0)); + DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, " + "ACCESSTIME %d\n", + (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f, + (l >> 22) & 0x3f); +} +#else +static void rfbi_print_timings(void) {} +#endif + + + + +static u32 extif_clk_period; + +static inline unsigned long round_to_extif_ticks(unsigned long ps, int div) +{ + int bus_tick = extif_clk_period * div; + return (ps + bus_tick - 1) / bus_tick * bus_tick; +} + +static int calc_reg_timing(struct rfbi_timings *t, int div) +{ + t->clk_div = div; + + t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div); + + t->we_on_time = round_to_extif_ticks(t->we_on_time, div); + t->we_off_time = round_to_extif_ticks(t->we_off_time, div); + t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div); + + t->re_on_time = round_to_extif_ticks(t->re_on_time, div); + t->re_off_time = round_to_extif_ticks(t->re_off_time, div); + t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div); + + t->access_time = round_to_extif_ticks(t->access_time, div); + t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div); + t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div); + + DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n", + t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time); + DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n", + t->we_on_time, t->we_off_time, t->re_cycle_time, + t->we_cycle_time); + DSSDBG("[reg]rdaccess %d cspulse %d\n", + t->access_time, t->cs_pulse_width); + + return rfbi_convert_timings(t); +} + +static int calc_extif_timings(struct rfbi_timings *t) +{ + u32 max_clk_div; + int div; + + rfbi_get_clk_info(&extif_clk_period, &max_clk_div); + for (div = 1; div <= max_clk_div; div++) { + if (calc_reg_timing(t, div) == 0) + break; + } + + if (div <= max_clk_div) + return 0; + + DSSERR("can't setup timings\n"); + return -1; +} + + +static void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) +{ + int r; + + if (!t->converted) { + r = calc_extif_timings(t); + if (r < 0) + DSSERR("Failed to calc timings\n"); + } + + BUG_ON(!t->converted); + + rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]); + rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]); + + /* TIMEGRANULARITY */ + REG_FLD_MOD(RFBI_CONFIG(rfbi_module), + (t->tim[2] ? 1 : 0), 4, 4); + + rfbi_print_timings(); +} + +static int ps_to_rfbi_ticks(int time, int div) +{ + unsigned long tick_ps; + int ret; + + /* Calculate in picosecs to yield more exact results */ + tick_ps = 1000000000 / (rfbi.l4_khz) * div; + + ret = (time + tick_ps - 1) / tick_ps; + + return ret; +} + +static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div) +{ + *clk_period = 1000000000 / rfbi.l4_khz; + *max_clk_div = 2; +} + +static int rfbi_convert_timings(struct rfbi_timings *t) +{ + u32 l; + int reon, reoff, weon, weoff, cson, csoff, cs_pulse; + int actim, recyc, wecyc; + int div = t->clk_div; + + if (div <= 0 || div > 2) + return -1; + + /* Make sure that after conversion it still holds that: + * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff, + * csoff > cson, csoff >= max(weoff, reoff), actim > reon + */ + weon = ps_to_rfbi_ticks(t->we_on_time, div); + weoff = ps_to_rfbi_ticks(t->we_off_time, div); + if (weoff <= weon) + weoff = weon + 1; + if (weon > 0x0f) + return -1; + if (weoff > 0x3f) + return -1; + + reon = ps_to_rfbi_ticks(t->re_on_time, div); + reoff = ps_to_rfbi_ticks(t->re_off_time, div); + if (reoff <= reon) + reoff = reon + 1; + if (reon > 0x0f) + return -1; + if (reoff > 0x3f) + return -1; + + cson = ps_to_rfbi_ticks(t->cs_on_time, div); + csoff = ps_to_rfbi_ticks(t->cs_off_time, div); + if (csoff <= cson) + csoff = cson + 1; + if (csoff < max(weoff, reoff)) + csoff = max(weoff, reoff); + if (cson > 0x0f) + return -1; + if (csoff > 0x3f) + return -1; + + l = cson; + l |= csoff << 4; + l |= weon << 10; + l |= weoff << 14; + l |= reon << 20; + l |= reoff << 24; + + t->tim[0] = l; + + actim = ps_to_rfbi_ticks(t->access_time, div); + if (actim <= reon) + actim = reon + 1; + if (actim > 0x3f) + return -1; + + wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div); + if (wecyc < weoff) + wecyc = weoff; + if (wecyc > 0x3f) + return -1; + + recyc = ps_to_rfbi_ticks(t->re_cycle_time, div); + if (recyc < reoff) + recyc = reoff; + if (recyc > 0x3f) + return -1; + + cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div); + if (cs_pulse > 0x3f) + return -1; + + l = wecyc; + l |= recyc << 6; + l |= cs_pulse << 12; + l |= actim << 22; + + t->tim[1] = l; + + t->tim[2] = div - 1; + + t->converted = 1; + + return 0; +} + +/* xxx FIX module selection missing */ +static int rfbi_setup_te(enum omap_rfbi_te_mode mode, + unsigned hs_pulse_time, unsigned vs_pulse_time, + int hs_pol_inv, int vs_pol_inv, int extif_div) +{ + int hs, vs; + int min; + u32 l; + + hs = ps_to_rfbi_ticks(hs_pulse_time, 1); + vs = ps_to_rfbi_ticks(vs_pulse_time, 1); + if (hs < 2) + return -EDOM; + if (mode == OMAP_DSS_RFBI_TE_MODE_2) + min = 2; + else /* OMAP_DSS_RFBI_TE_MODE_1 */ + min = 4; + if (vs < min) + return -EDOM; + if (vs == hs) + return -EINVAL; + rfbi.te_mode = mode; + DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n", + mode, hs, vs, hs_pol_inv, vs_pol_inv); + + rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); + rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); + + l = rfbi_read_reg(RFBI_CONFIG(0)); + if (hs_pol_inv) + l &= ~(1 << 21); + else + l |= 1 << 21; + if (vs_pol_inv) + l &= ~(1 << 20); + else + l |= 1 << 20; + + return 0; +} + +/* xxx FIX module selection missing */ +static int rfbi_enable_te(bool enable, unsigned line) +{ + u32 l; + + DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode); + if (line > (1 << 11) - 1) + return -EINVAL; + + l = rfbi_read_reg(RFBI_CONFIG(0)); + l &= ~(0x3 << 2); + if (enable) { + rfbi.te_enabled = 1; + l |= rfbi.te_mode << 2; + } else + rfbi.te_enabled = 0; + rfbi_write_reg(RFBI_CONFIG(0), l); + rfbi_write_reg(RFBI_LINE_NUMBER, line); + + return 0; +} + +static int rfbi_configure_bus(int rfbi_module, int bpp, int lines) +{ + u32 l; + int cycle1 = 0, cycle2 = 0, cycle3 = 0; + enum omap_rfbi_cycleformat cycleformat; + enum omap_rfbi_datatype datatype; + enum omap_rfbi_parallelmode parallelmode; + + switch (bpp) { + case 12: + datatype = OMAP_DSS_RFBI_DATATYPE_12; + break; + case 16: + datatype = OMAP_DSS_RFBI_DATATYPE_16; + break; + case 18: + datatype = OMAP_DSS_RFBI_DATATYPE_18; + break; + case 24: + datatype = OMAP_DSS_RFBI_DATATYPE_24; + break; + default: + BUG(); + return 1; + } + rfbi.datatype = datatype; + + switch (lines) { + case 8: + parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8; + break; + case 9: + parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9; + break; + case 12: + parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12; + break; + case 16: + parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16; + break; + default: + BUG(); + return 1; + } + rfbi.parallelmode = parallelmode; + + if ((bpp % lines) == 0) { + switch (bpp / lines) { + case 1: + cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1; + break; + case 2: + cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1; + break; + case 3: + cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1; + break; + default: + BUG(); + return 1; + } + } else if ((2 * bpp % lines) == 0) { + if ((2 * bpp / lines) == 3) + cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2; + else { + BUG(); + return 1; + } + } else { + BUG(); + return 1; + } + + switch (cycleformat) { + case OMAP_DSS_RFBI_CYCLEFORMAT_1_1: + cycle1 = lines; + break; + + case OMAP_DSS_RFBI_CYCLEFORMAT_2_1: + cycle1 = lines; + cycle2 = lines; + break; + + case OMAP_DSS_RFBI_CYCLEFORMAT_3_1: + cycle1 = lines; + cycle2 = lines; + cycle3 = lines; + break; + + case OMAP_DSS_RFBI_CYCLEFORMAT_3_2: + cycle1 = lines; + cycle2 = (lines / 2) | ((lines / 2) << 16); + cycle3 = (lines << 16); + break; + } + + REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */ + + l = 0; + l |= FLD_VAL(parallelmode, 1, 0); + l |= FLD_VAL(0, 3, 2); /* TRIGGERMODE: ITE */ + l |= FLD_VAL(0, 4, 4); /* TIMEGRANULARITY */ + l |= FLD_VAL(datatype, 6, 5); + /* l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */ + l |= FLD_VAL(0, 8, 7); /* L4FORMAT, 1pix/L4 */ + l |= FLD_VAL(cycleformat, 10, 9); + l |= FLD_VAL(0, 12, 11); /* UNUSEDBITS */ + l |= FLD_VAL(0, 16, 16); /* A0POLARITY */ + l |= FLD_VAL(0, 17, 17); /* REPOLARITY */ + l |= FLD_VAL(0, 18, 18); /* WEPOLARITY */ + l |= FLD_VAL(0, 19, 19); /* CSPOLARITY */ + l |= FLD_VAL(1, 20, 20); /* TE_VSYNC_POLARITY */ + l |= FLD_VAL(1, 21, 21); /* HSYNCPOLARITY */ + rfbi_write_reg(RFBI_CONFIG(rfbi_module), l); + + rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1); + rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2); + rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3); + + + l = rfbi_read_reg(RFBI_CONTROL); + l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */ + l = FLD_MOD(l, 0, 1, 1); /* clear bypass */ + rfbi_write_reg(RFBI_CONTROL, l); + + + DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n", + bpp, lines, cycle1, cycle2, cycle3); + + return 0; +} + +static int rfbi_configure(struct omap_dss_device *dssdev) +{ + return rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size, + rfbi.data_lines); +} + +static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *), + void *data) +{ + return rfbi_transfer_area(dssdev, callback, data); +} + +static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h) +{ + rfbi.timings.x_res = w; + rfbi.timings.y_res = h; +} + +static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size) +{ + rfbi.pixel_size = pixel_size; +} + +static void rfbi_set_data_lines(struct omap_dss_device *dssdev, int data_lines) +{ + rfbi.data_lines = data_lines; +} + +static void rfbi_set_interface_timings(struct omap_dss_device *dssdev, + struct rfbi_timings *timings) +{ + rfbi.intf_timings = *timings; +} + +static void rfbi_dump_regs(struct seq_file *s) +{ +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) + + if (rfbi_runtime_get()) + return; + + DUMPREG(RFBI_REVISION); + DUMPREG(RFBI_SYSCONFIG); + DUMPREG(RFBI_SYSSTATUS); + DUMPREG(RFBI_CONTROL); + DUMPREG(RFBI_PIXEL_CNT); + DUMPREG(RFBI_LINE_NUMBER); + DUMPREG(RFBI_CMD); + DUMPREG(RFBI_PARAM); + DUMPREG(RFBI_DATA); + DUMPREG(RFBI_READ); + DUMPREG(RFBI_STATUS); + + DUMPREG(RFBI_CONFIG(0)); + DUMPREG(RFBI_ONOFF_TIME(0)); + DUMPREG(RFBI_CYCLE_TIME(0)); + DUMPREG(RFBI_DATA_CYCLE1(0)); + DUMPREG(RFBI_DATA_CYCLE2(0)); + DUMPREG(RFBI_DATA_CYCLE3(0)); + + DUMPREG(RFBI_CONFIG(1)); + DUMPREG(RFBI_ONOFF_TIME(1)); + DUMPREG(RFBI_CYCLE_TIME(1)); + DUMPREG(RFBI_DATA_CYCLE1(1)); + DUMPREG(RFBI_DATA_CYCLE2(1)); + DUMPREG(RFBI_DATA_CYCLE3(1)); + + DUMPREG(RFBI_VSYNC_WIDTH); + DUMPREG(RFBI_HSYNC_WIDTH); + + rfbi_runtime_put(); +#undef DUMPREG +} + +static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev) +{ + struct omap_overlay_manager *mgr = rfbi.output.manager; + struct dss_lcd_mgr_config mgr_config; + + mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI; + + mgr_config.stallmode = true; + /* Do we need fifohandcheck for RFBI? */ + mgr_config.fifohandcheck = false; + + mgr_config.video_port_width = rfbi.pixel_size; + mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(mgr, &mgr_config); + + /* + * Set rfbi.timings with default values, the x_res and y_res fields + * are expected to be already configured by the panel driver via + * omapdss_rfbi_set_size() + */ + rfbi.timings.hsw = 1; + rfbi.timings.hfp = 1; + rfbi.timings.hbp = 1; + rfbi.timings.vsw = 1; + rfbi.timings.vfp = 0; + rfbi.timings.vbp = 0; + + rfbi.timings.interlace = false; + rfbi.timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + rfbi.timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + rfbi.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + rfbi.timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH; + rfbi.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE; + + dss_mgr_set_timings(mgr, &rfbi.timings); +} + +static int rfbi_display_enable(struct omap_dss_device *dssdev) +{ + struct omap_dss_device *out = &rfbi.output; + int r; + + if (out->manager == NULL) { + DSSERR("failed to enable display: no output/manager\n"); + return -ENODEV; + } + + r = rfbi_runtime_get(); + if (r) + return r; + + r = dss_mgr_register_framedone_handler(out->manager, + framedone_callback, NULL); + if (r) { + DSSERR("can't get FRAMEDONE irq\n"); + goto err1; + } + + rfbi_config_lcd_manager(dssdev); + + rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size, + rfbi.data_lines); + + rfbi_set_timings(dssdev->phy.rfbi.channel, &rfbi.intf_timings); + + return 0; +err1: + rfbi_runtime_put(); + return r; +} + +static void rfbi_display_disable(struct omap_dss_device *dssdev) +{ + struct omap_dss_device *out = &rfbi.output; + + dss_mgr_unregister_framedone_handler(out->manager, + framedone_callback, NULL); + + rfbi_runtime_put(); +} + +static int rfbi_init_display(struct omap_dss_device *dssdev) +{ + rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev; + return 0; +} + +static void rfbi_init_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &rfbi.output; + + out->dev = &pdev->dev; + out->id = OMAP_DSS_OUTPUT_DBI; + out->output_type = OMAP_DISPLAY_TYPE_DBI; + out->name = "rfbi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_LCD; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void rfbi_uninit_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &rfbi.output; + + omapdss_unregister_output(out); +} + +/* RFBI HW IP initialisation */ +static int rfbi_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + u32 rev; + struct resource *rfbi_mem; + struct clk *clk; + int r; + + rfbi.pdev = pdev; + + sema_init(&rfbi.bus_lock, 1); + + rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); + if (!rfbi_mem) { + DSSERR("can't get IORESOURCE_MEM RFBI\n"); + return -EINVAL; + } + + rfbi.base = devm_ioremap(&pdev->dev, rfbi_mem->start, + resource_size(rfbi_mem)); + if (!rfbi.base) { + DSSERR("can't ioremap RFBI\n"); + return -ENOMEM; + } + + clk = clk_get(&pdev->dev, "ick"); + if (IS_ERR(clk)) { + DSSERR("can't get ick\n"); + return PTR_ERR(clk); + } + + rfbi.l4_khz = clk_get_rate(clk) / 1000; + + clk_put(clk); + + pm_runtime_enable(&pdev->dev); + + r = rfbi_runtime_get(); + if (r) + goto err_runtime_get; + + msleep(10); + + rev = rfbi_read_reg(RFBI_REVISION); + dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n", + FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + rfbi_runtime_put(); + + dss_debugfs_create_file("rfbi", rfbi_dump_regs); + + rfbi_init_output(pdev); + + return 0; + +err_runtime_get: + pm_runtime_disable(&pdev->dev); + return r; +} + +static void rfbi_unbind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + rfbi_uninit_output(pdev); + + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct component_ops rfbi_component_ops = { + .bind = rfbi_bind, + .unbind = rfbi_unbind, +}; + +static int rfbi_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &rfbi_component_ops); +} + +static int rfbi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &rfbi_component_ops); + return 0; +} + +static int rfbi_runtime_suspend(struct device *dev) +{ + dispc_runtime_put(); + + return 0; +} + +static int rfbi_runtime_resume(struct device *dev) +{ + int r; + + r = dispc_runtime_get(); + if (r < 0) + return r; + + return 0; +} + +static const struct dev_pm_ops rfbi_pm_ops = { + .runtime_suspend = rfbi_runtime_suspend, + .runtime_resume = rfbi_runtime_resume, +}; + +static struct platform_driver omap_rfbihw_driver = { + .probe = rfbi_probe, + .remove = rfbi_remove, + .driver = { + .name = "omapdss_rfbi", + .pm = &rfbi_pm_ops, + .suppress_bind_attrs = true, + }, +}; + +int __init rfbi_init_platform_driver(void) +{ + return platform_driver_register(&omap_rfbihw_driver); +} + +void rfbi_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_rfbihw_driver); +} diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c new file mode 100644 index 000000000000..d747cc6b59e1 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -0,0 +1,454 @@ +/* + * linux/drivers/video/omap2/dss/sdi.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "SDI" + +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/regulator/consumer.h> +#include <linux/export.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/of.h> +#include <linux/component.h> + +#include <video/omapdss.h> +#include "dss.h" + +static struct { + struct platform_device *pdev; + + bool update_enabled; + struct regulator *vdds_sdi_reg; + + struct dss_lcd_mgr_config mgr_config; + struct omap_video_timings timings; + int datapairs; + + struct omap_dss_device output; + + bool port_initialized; +} sdi; + +struct sdi_clk_calc_ctx { + unsigned long pck_min, pck_max; + + unsigned long fck; + struct dispc_clock_info dispc_cinfo; +}; + +static bool dpi_calc_dispc_cb(int lckd, int pckd, unsigned long lck, + unsigned long pck, void *data) +{ + struct sdi_clk_calc_ctx *ctx = data; + + ctx->dispc_cinfo.lck_div = lckd; + ctx->dispc_cinfo.pck_div = pckd; + ctx->dispc_cinfo.lck = lck; + ctx->dispc_cinfo.pck = pck; + + return true; +} + +static bool dpi_calc_dss_cb(unsigned long fck, void *data) +{ + struct sdi_clk_calc_ctx *ctx = data; + + ctx->fck = fck; + + return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max, + dpi_calc_dispc_cb, ctx); +} + +static int sdi_calc_clock_div(unsigned long pclk, + unsigned long *fck, + struct dispc_clock_info *dispc_cinfo) +{ + int i; + struct sdi_clk_calc_ctx ctx; + + /* + * DSS fclk gives us very few possibilities, so finding a good pixel + * clock may not be possible. We try multiple times to find the clock, + * each time widening the pixel clock range we look for, up to + * +/- 1MHz. + */ + + for (i = 0; i < 10; ++i) { + bool ok; + + memset(&ctx, 0, sizeof(ctx)); + if (pclk > 1000 * i * i * i) + ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu); + else + ctx.pck_min = 0; + ctx.pck_max = pclk + 1000 * i * i * i; + + ok = dss_div_calc(pclk, ctx.pck_min, dpi_calc_dss_cb, &ctx); + if (ok) { + *fck = ctx.fck; + *dispc_cinfo = ctx.dispc_cinfo; + return 0; + } + } + + return -EINVAL; +} + +static void sdi_config_lcd_manager(struct omap_dss_device *dssdev) +{ + struct omap_overlay_manager *mgr = sdi.output.manager; + + sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + sdi.mgr_config.stallmode = false; + sdi.mgr_config.fifohandcheck = false; + + sdi.mgr_config.video_port_width = 24; + sdi.mgr_config.lcden_sig_polarity = 1; + + dss_mgr_set_lcd_config(mgr, &sdi.mgr_config); +} + +static int sdi_display_enable(struct omap_dss_device *dssdev) +{ + struct omap_dss_device *out = &sdi.output; + struct omap_video_timings *t = &sdi.timings; + unsigned long fck; + struct dispc_clock_info dispc_cinfo; + unsigned long pck; + int r; + + if (out->manager == NULL) { + DSSERR("failed to enable display: no output/manager\n"); + return -ENODEV; + } + + r = regulator_enable(sdi.vdds_sdi_reg); + if (r) + goto err_reg_enable; + + r = dispc_runtime_get(); + if (r) + goto err_get_dispc; + + /* 15.5.9.1.2 */ + t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + + r = sdi_calc_clock_div(t->pixelclock, &fck, &dispc_cinfo); + if (r) + goto err_calc_clock_div; + + sdi.mgr_config.clock_info = dispc_cinfo; + + pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div; + + if (pck != t->pixelclock) { + DSSWARN("Could not find exact pixel clock. Requested %d Hz, got %lu Hz\n", + t->pixelclock, pck); + + t->pixelclock = pck; + } + + + dss_mgr_set_timings(out->manager, t); + + r = dss_set_fck_rate(fck); + if (r) + goto err_set_dss_clock_div; + + sdi_config_lcd_manager(dssdev); + + /* + * LCLK and PCLK divisors are located in shadow registers, and we + * normally write them to DISPC registers when enabling the output. + * However, SDI uses pck-free as source clock for its PLL, and pck-free + * is affected by the divisors. And as we need the PLL before enabling + * the output, we need to write the divisors early. + * + * It seems just writing to the DISPC register is enough, and we don't + * need to care about the shadow register mechanism for pck-free. The + * exact reason for this is unknown. + */ + dispc_mgr_set_clock_div(out->manager->id, &sdi.mgr_config.clock_info); + + dss_sdi_init(sdi.datapairs); + r = dss_sdi_enable(); + if (r) + goto err_sdi_enable; + mdelay(2); + + r = dss_mgr_enable(out->manager); + if (r) + goto err_mgr_enable; + + return 0; + +err_mgr_enable: + dss_sdi_disable(); +err_sdi_enable: +err_set_dss_clock_div: +err_calc_clock_div: + dispc_runtime_put(); +err_get_dispc: + regulator_disable(sdi.vdds_sdi_reg); +err_reg_enable: + return r; +} + +static void sdi_display_disable(struct omap_dss_device *dssdev) +{ + struct omap_overlay_manager *mgr = sdi.output.manager; + + dss_mgr_disable(mgr); + + dss_sdi_disable(); + + dispc_runtime_put(); + + regulator_disable(sdi.vdds_sdi_reg); +} + +static void sdi_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + sdi.timings = *timings; +} + +static void sdi_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + *timings = sdi.timings; +} + +static int sdi_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + struct omap_overlay_manager *mgr = sdi.output.manager; + + if (mgr && !dispc_mgr_timings_ok(mgr->id, timings)) + return -EINVAL; + + if (timings->pixelclock == 0) + return -EINVAL; + + return 0; +} + +static void sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs) +{ + sdi.datapairs = datapairs; +} + +static int sdi_init_regulator(void) +{ + struct regulator *vdds_sdi; + + if (sdi.vdds_sdi_reg) + return 0; + + vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi"); + if (IS_ERR(vdds_sdi)) { + if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER) + DSSERR("can't get VDDS_SDI regulator\n"); + return PTR_ERR(vdds_sdi); + } + + sdi.vdds_sdi_reg = vdds_sdi; + + return 0; +} + +static int sdi_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct omap_overlay_manager *mgr; + int r; + + r = sdi_init_regulator(); + if (r) + return r; + + mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); + if (!mgr) + return -ENODEV; + + r = dss_mgr_connect(mgr, dssdev); + if (r) + return r; + + r = omapdss_output_set_device(dssdev, dst); + if (r) { + DSSERR("failed to connect output to new device: %s\n", + dst->name); + dss_mgr_disconnect(mgr, dssdev); + return r; + } + + return 0; +} + +static void sdi_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + WARN_ON(dst != dssdev->dst); + + if (dst != dssdev->dst) + return; + + omapdss_output_unset_device(dssdev); + + if (dssdev->manager) + dss_mgr_disconnect(dssdev->manager, dssdev); +} + +static const struct omapdss_sdi_ops sdi_ops = { + .connect = sdi_connect, + .disconnect = sdi_disconnect, + + .enable = sdi_display_enable, + .disable = sdi_display_disable, + + .check_timings = sdi_check_timings, + .set_timings = sdi_set_timings, + .get_timings = sdi_get_timings, + + .set_datapairs = sdi_set_datapairs, +}; + +static void sdi_init_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &sdi.output; + + out->dev = &pdev->dev; + out->id = OMAP_DSS_OUTPUT_SDI; + out->output_type = OMAP_DISPLAY_TYPE_SDI; + out->name = "sdi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_LCD; + /* We have SDI only on OMAP3, where it's on port 1 */ + out->port_num = 1; + out->ops.sdi = &sdi_ops; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void sdi_uninit_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &sdi.output; + + omapdss_unregister_output(out); +} + +static int sdi_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + sdi.pdev = pdev; + + sdi_init_output(pdev); + + return 0; +} + +static void sdi_unbind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + sdi_uninit_output(pdev); +} + +static const struct component_ops sdi_component_ops = { + .bind = sdi_bind, + .unbind = sdi_unbind, +}; + +static int sdi_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &sdi_component_ops); +} + +static int sdi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &sdi_component_ops); + return 0; +} + +static struct platform_driver omap_sdi_driver = { + .probe = sdi_probe, + .remove = sdi_remove, + .driver = { + .name = "omapdss_sdi", + .suppress_bind_attrs = true, + }, +}; + +int __init sdi_init_platform_driver(void) +{ + return platform_driver_register(&omap_sdi_driver); +} + +void sdi_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_sdi_driver); +} + +int sdi_init_port(struct platform_device *pdev, struct device_node *port) +{ + struct device_node *ep; + u32 datapairs; + int r; + + ep = omapdss_of_get_next_endpoint(port, NULL); + if (!ep) + return 0; + + r = of_property_read_u32(ep, "datapairs", &datapairs); + if (r) { + DSSERR("failed to parse datapairs\n"); + goto err_datapairs; + } + + sdi.datapairs = datapairs; + + of_node_put(ep); + + sdi.pdev = pdev; + + sdi_init_output(pdev); + + sdi.port_initialized = true; + + return 0; + +err_datapairs: + of_node_put(ep); + + return r; +} + +void sdi_uninit_port(struct device_node *port) +{ + if (!sdi.port_initialized) + return; + + sdi_uninit_output(sdi.pdev); +} diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c new file mode 100644 index 000000000000..08f9def76e27 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -0,0 +1,1009 @@ +/* + * linux/drivers/video/omap2/dss/venc.c + * + * Copyright (C) 2009 Nokia Corporation + * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * + * VENC settings from TI's DSS driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define DSS_SUBSYS_NAME "VENC" + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/seq_file.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/component.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +/* Venc registers */ +#define VENC_REV_ID 0x00 +#define VENC_STATUS 0x04 +#define VENC_F_CONTROL 0x08 +#define VENC_VIDOUT_CTRL 0x10 +#define VENC_SYNC_CTRL 0x14 +#define VENC_LLEN 0x1C +#define VENC_FLENS 0x20 +#define VENC_HFLTR_CTRL 0x24 +#define VENC_CC_CARR_WSS_CARR 0x28 +#define VENC_C_PHASE 0x2C +#define VENC_GAIN_U 0x30 +#define VENC_GAIN_V 0x34 +#define VENC_GAIN_Y 0x38 +#define VENC_BLACK_LEVEL 0x3C +#define VENC_BLANK_LEVEL 0x40 +#define VENC_X_COLOR 0x44 +#define VENC_M_CONTROL 0x48 +#define VENC_BSTAMP_WSS_DATA 0x4C +#define VENC_S_CARR 0x50 +#define VENC_LINE21 0x54 +#define VENC_LN_SEL 0x58 +#define VENC_L21__WC_CTL 0x5C +#define VENC_HTRIGGER_VTRIGGER 0x60 +#define VENC_SAVID__EAVID 0x64 +#define VENC_FLEN__FAL 0x68 +#define VENC_LAL__PHASE_RESET 0x6C +#define VENC_HS_INT_START_STOP_X 0x70 +#define VENC_HS_EXT_START_STOP_X 0x74 +#define VENC_VS_INT_START_X 0x78 +#define VENC_VS_INT_STOP_X__VS_INT_START_Y 0x7C +#define VENC_VS_INT_STOP_Y__VS_EXT_START_X 0x80 +#define VENC_VS_EXT_STOP_X__VS_EXT_START_Y 0x84 +#define VENC_VS_EXT_STOP_Y 0x88 +#define VENC_AVID_START_STOP_X 0x90 +#define VENC_AVID_START_STOP_Y 0x94 +#define VENC_FID_INT_START_X__FID_INT_START_Y 0xA0 +#define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X 0xA4 +#define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y 0xA8 +#define VENC_TVDETGP_INT_START_STOP_X 0xB0 +#define VENC_TVDETGP_INT_START_STOP_Y 0xB4 +#define VENC_GEN_CTRL 0xB8 +#define VENC_OUTPUT_CONTROL 0xC4 +#define VENC_OUTPUT_TEST 0xC8 +#define VENC_DAC_B__DAC_C 0xC8 + +struct venc_config { + u32 f_control; + u32 vidout_ctrl; + u32 sync_ctrl; + u32 llen; + u32 flens; + u32 hfltr_ctrl; + u32 cc_carr_wss_carr; + u32 c_phase; + u32 gain_u; + u32 gain_v; + u32 gain_y; + u32 black_level; + u32 blank_level; + u32 x_color; + u32 m_control; + u32 bstamp_wss_data; + u32 s_carr; + u32 line21; + u32 ln_sel; + u32 l21__wc_ctl; + u32 htrigger_vtrigger; + u32 savid__eavid; + u32 flen__fal; + u32 lal__phase_reset; + u32 hs_int_start_stop_x; + u32 hs_ext_start_stop_x; + u32 vs_int_start_x; + u32 vs_int_stop_x__vs_int_start_y; + u32 vs_int_stop_y__vs_ext_start_x; + u32 vs_ext_stop_x__vs_ext_start_y; + u32 vs_ext_stop_y; + u32 avid_start_stop_x; + u32 avid_start_stop_y; + u32 fid_int_start_x__fid_int_start_y; + u32 fid_int_offset_y__fid_ext_start_x; + u32 fid_ext_start_y__fid_ext_offset_y; + u32 tvdetgp_int_start_stop_x; + u32 tvdetgp_int_start_stop_y; + u32 gen_ctrl; +}; + +/* from TRM */ +static const struct venc_config venc_config_pal_trm = { + .f_control = 0, + .vidout_ctrl = 1, + .sync_ctrl = 0x40, + .llen = 0x35F, /* 863 */ + .flens = 0x270, /* 624 */ + .hfltr_ctrl = 0, + .cc_carr_wss_carr = 0x2F7225ED, + .c_phase = 0, + .gain_u = 0x111, + .gain_v = 0x181, + .gain_y = 0x140, + .black_level = 0x3B, + .blank_level = 0x3B, + .x_color = 0x7, + .m_control = 0x2, + .bstamp_wss_data = 0x3F, + .s_carr = 0x2A098ACB, + .line21 = 0, + .ln_sel = 0x01290015, + .l21__wc_ctl = 0x0000F603, + .htrigger_vtrigger = 0, + + .savid__eavid = 0x06A70108, + .flen__fal = 0x00180270, + .lal__phase_reset = 0x00040135, + .hs_int_start_stop_x = 0x00880358, + .hs_ext_start_stop_x = 0x000F035F, + .vs_int_start_x = 0x01A70000, + .vs_int_stop_x__vs_int_start_y = 0x000001A7, + .vs_int_stop_y__vs_ext_start_x = 0x01AF0000, + .vs_ext_stop_x__vs_ext_start_y = 0x000101AF, + .vs_ext_stop_y = 0x00000025, + .avid_start_stop_x = 0x03530083, + .avid_start_stop_y = 0x026C002E, + .fid_int_start_x__fid_int_start_y = 0x0001008A, + .fid_int_offset_y__fid_ext_start_x = 0x002E0138, + .fid_ext_start_y__fid_ext_offset_y = 0x01380001, + + .tvdetgp_int_start_stop_x = 0x00140001, + .tvdetgp_int_start_stop_y = 0x00010001, + .gen_ctrl = 0x00FF0000, +}; + +/* from TRM */ +static const struct venc_config venc_config_ntsc_trm = { + .f_control = 0, + .vidout_ctrl = 1, + .sync_ctrl = 0x8040, + .llen = 0x359, + .flens = 0x20C, + .hfltr_ctrl = 0, + .cc_carr_wss_carr = 0x043F2631, + .c_phase = 0, + .gain_u = 0x102, + .gain_v = 0x16C, + .gain_y = 0x12F, + .black_level = 0x43, + .blank_level = 0x38, + .x_color = 0x7, + .m_control = 0x1, + .bstamp_wss_data = 0x38, + .s_carr = 0x21F07C1F, + .line21 = 0, + .ln_sel = 0x01310011, + .l21__wc_ctl = 0x0000F003, + .htrigger_vtrigger = 0, + + .savid__eavid = 0x069300F4, + .flen__fal = 0x0016020C, + .lal__phase_reset = 0x00060107, + .hs_int_start_stop_x = 0x008E0350, + .hs_ext_start_stop_x = 0x000F0359, + .vs_int_start_x = 0x01A00000, + .vs_int_stop_x__vs_int_start_y = 0x020701A0, + .vs_int_stop_y__vs_ext_start_x = 0x01AC0024, + .vs_ext_stop_x__vs_ext_start_y = 0x020D01AC, + .vs_ext_stop_y = 0x00000006, + .avid_start_stop_x = 0x03480078, + .avid_start_stop_y = 0x02060024, + .fid_int_start_x__fid_int_start_y = 0x0001008A, + .fid_int_offset_y__fid_ext_start_x = 0x01AC0106, + .fid_ext_start_y__fid_ext_offset_y = 0x01060006, + + .tvdetgp_int_start_stop_x = 0x00140001, + .tvdetgp_int_start_stop_y = 0x00010001, + .gen_ctrl = 0x00F90000, +}; + +static const struct venc_config venc_config_pal_bdghi = { + .f_control = 0, + .vidout_ctrl = 0, + .sync_ctrl = 0, + .hfltr_ctrl = 0, + .x_color = 0, + .line21 = 0, + .ln_sel = 21, + .htrigger_vtrigger = 0, + .tvdetgp_int_start_stop_x = 0x00140001, + .tvdetgp_int_start_stop_y = 0x00010001, + .gen_ctrl = 0x00FB0000, + + .llen = 864-1, + .flens = 625-1, + .cc_carr_wss_carr = 0x2F7625ED, + .c_phase = 0xDF, + .gain_u = 0x111, + .gain_v = 0x181, + .gain_y = 0x140, + .black_level = 0x3e, + .blank_level = 0x3e, + .m_control = 0<<2 | 1<<1, + .bstamp_wss_data = 0x42, + .s_carr = 0x2a098acb, + .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0, + .savid__eavid = 0x06A70108, + .flen__fal = 23<<16 | 624<<0, + .lal__phase_reset = 2<<17 | 310<<0, + .hs_int_start_stop_x = 0x00920358, + .hs_ext_start_stop_x = 0x000F035F, + .vs_int_start_x = 0x1a7<<16, + .vs_int_stop_x__vs_int_start_y = 0x000601A7, + .vs_int_stop_y__vs_ext_start_x = 0x01AF0036, + .vs_ext_stop_x__vs_ext_start_y = 0x27101af, + .vs_ext_stop_y = 0x05, + .avid_start_stop_x = 0x03530082, + .avid_start_stop_y = 0x0270002E, + .fid_int_start_x__fid_int_start_y = 0x0005008A, + .fid_int_offset_y__fid_ext_start_x = 0x002E0138, + .fid_ext_start_y__fid_ext_offset_y = 0x01380005, +}; + +const struct omap_video_timings omap_dss_pal_timings = { + .x_res = 720, + .y_res = 574, + .pixelclock = 13500000, + .hsw = 64, + .hfp = 12, + .hbp = 68, + .vsw = 5, + .vfp = 5, + .vbp = 41, + + .interlace = true, + + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, +}; +EXPORT_SYMBOL(omap_dss_pal_timings); + +const struct omap_video_timings omap_dss_ntsc_timings = { + .x_res = 720, + .y_res = 482, + .pixelclock = 13500000, + .hsw = 64, + .hfp = 16, + .hbp = 58, + .vsw = 6, + .vfp = 6, + .vbp = 31, + + .interlace = true, + + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, +}; +EXPORT_SYMBOL(omap_dss_ntsc_timings); + +static struct { + struct platform_device *pdev; + void __iomem *base; + struct mutex venc_lock; + u32 wss_data; + struct regulator *vdda_dac_reg; + + struct clk *tv_dac_clk; + + struct omap_video_timings timings; + enum omap_dss_venc_type type; + bool invert_polarity; + + struct omap_dss_device output; +} venc; + +static inline void venc_write_reg(int idx, u32 val) +{ + __raw_writel(val, venc.base + idx); +} + +static inline u32 venc_read_reg(int idx) +{ + u32 l = __raw_readl(venc.base + idx); + return l; +} + +static void venc_write_config(const struct venc_config *config) +{ + DSSDBG("write venc conf\n"); + + venc_write_reg(VENC_LLEN, config->llen); + venc_write_reg(VENC_FLENS, config->flens); + venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr); + venc_write_reg(VENC_C_PHASE, config->c_phase); + venc_write_reg(VENC_GAIN_U, config->gain_u); + venc_write_reg(VENC_GAIN_V, config->gain_v); + venc_write_reg(VENC_GAIN_Y, config->gain_y); + venc_write_reg(VENC_BLACK_LEVEL, config->black_level); + venc_write_reg(VENC_BLANK_LEVEL, config->blank_level); + venc_write_reg(VENC_M_CONTROL, config->m_control); + venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | + venc.wss_data); + venc_write_reg(VENC_S_CARR, config->s_carr); + venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl); + venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid); + venc_write_reg(VENC_FLEN__FAL, config->flen__fal); + venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset); + venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x); + venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x); + venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x); + venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y, + config->vs_int_stop_x__vs_int_start_y); + venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X, + config->vs_int_stop_y__vs_ext_start_x); + venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y, + config->vs_ext_stop_x__vs_ext_start_y); + venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y); + venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x); + venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y); + venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y, + config->fid_int_start_x__fid_int_start_y); + venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X, + config->fid_int_offset_y__fid_ext_start_x); + venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y, + config->fid_ext_start_y__fid_ext_offset_y); + + venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C)); + venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl); + venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl); + venc_write_reg(VENC_X_COLOR, config->x_color); + venc_write_reg(VENC_LINE21, config->line21); + venc_write_reg(VENC_LN_SEL, config->ln_sel); + venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger); + venc_write_reg(VENC_TVDETGP_INT_START_STOP_X, + config->tvdetgp_int_start_stop_x); + venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y, + config->tvdetgp_int_start_stop_y); + venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl); + venc_write_reg(VENC_F_CONTROL, config->f_control); + venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl); +} + +static void venc_reset(void) +{ + int t = 1000; + + venc_write_reg(VENC_F_CONTROL, 1<<8); + while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) { + if (--t == 0) { + DSSERR("Failed to reset venc\n"); + return; + } + } + +#ifdef CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET + /* the magical sleep that makes things work */ + /* XXX more info? What bug this circumvents? */ + msleep(20); +#endif +} + +static int venc_runtime_get(void) +{ + int r; + + DSSDBG("venc_runtime_get\n"); + + r = pm_runtime_get_sync(&venc.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void venc_runtime_put(void) +{ + int r; + + DSSDBG("venc_runtime_put\n"); + + r = pm_runtime_put_sync(&venc.pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); +} + +static const struct venc_config *venc_timings_to_config( + struct omap_video_timings *timings) +{ + if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0) + return &venc_config_pal_trm; + + if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0) + return &venc_config_ntsc_trm; + + BUG(); + return NULL; +} + +static int venc_power_on(struct omap_dss_device *dssdev) +{ + struct omap_overlay_manager *mgr = venc.output.manager; + u32 l; + int r; + + r = venc_runtime_get(); + if (r) + goto err0; + + venc_reset(); + venc_write_config(venc_timings_to_config(&venc.timings)); + + dss_set_venc_output(venc.type); + dss_set_dac_pwrdn_bgz(1); + + l = 0; + + if (venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE) + l |= 1 << 1; + else /* S-Video */ + l |= (1 << 0) | (1 << 2); + + if (venc.invert_polarity == false) + l |= 1 << 3; + + venc_write_reg(VENC_OUTPUT_CONTROL, l); + + dss_mgr_set_timings(mgr, &venc.timings); + + r = regulator_enable(venc.vdda_dac_reg); + if (r) + goto err1; + + r = dss_mgr_enable(mgr); + if (r) + goto err2; + + return 0; + +err2: + regulator_disable(venc.vdda_dac_reg); +err1: + venc_write_reg(VENC_OUTPUT_CONTROL, 0); + dss_set_dac_pwrdn_bgz(0); + + venc_runtime_put(); +err0: + return r; +} + +static void venc_power_off(struct omap_dss_device *dssdev) +{ + struct omap_overlay_manager *mgr = venc.output.manager; + + venc_write_reg(VENC_OUTPUT_CONTROL, 0); + dss_set_dac_pwrdn_bgz(0); + + dss_mgr_disable(mgr); + + regulator_disable(venc.vdda_dac_reg); + + venc_runtime_put(); +} + +static int venc_display_enable(struct omap_dss_device *dssdev) +{ + struct omap_dss_device *out = &venc.output; + int r; + + DSSDBG("venc_display_enable\n"); + + mutex_lock(&venc.venc_lock); + + if (out->manager == NULL) { + DSSERR("Failed to enable display: no output/manager\n"); + r = -ENODEV; + goto err0; + } + + r = venc_power_on(dssdev); + if (r) + goto err0; + + venc.wss_data = 0; + + mutex_unlock(&venc.venc_lock); + + return 0; +err0: + mutex_unlock(&venc.venc_lock); + return r; +} + +static void venc_display_disable(struct omap_dss_device *dssdev) +{ + DSSDBG("venc_display_disable\n"); + + mutex_lock(&venc.venc_lock); + + venc_power_off(dssdev); + + mutex_unlock(&venc.venc_lock); +} + +static void venc_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + DSSDBG("venc_set_timings\n"); + + mutex_lock(&venc.venc_lock); + + /* Reset WSS data when the TV standard changes. */ + if (memcmp(&venc.timings, timings, sizeof(*timings))) + venc.wss_data = 0; + + venc.timings = *timings; + + dispc_set_tv_pclk(13500000); + + mutex_unlock(&venc.venc_lock); +} + +static int venc_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + DSSDBG("venc_check_timings\n"); + + if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0) + return 0; + + if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0) + return 0; + + return -EINVAL; +} + +static void venc_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + mutex_lock(&venc.venc_lock); + + *timings = venc.timings; + + mutex_unlock(&venc.venc_lock); +} + +static u32 venc_get_wss(struct omap_dss_device *dssdev) +{ + /* Invert due to VENC_L21_WC_CTL:INV=1 */ + return (venc.wss_data >> 8) ^ 0xfffff; +} + +static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) +{ + const struct venc_config *config; + int r; + + DSSDBG("venc_set_wss\n"); + + mutex_lock(&venc.venc_lock); + + config = venc_timings_to_config(&venc.timings); + + /* Invert due to VENC_L21_WC_CTL:INV=1 */ + venc.wss_data = (wss ^ 0xfffff) << 8; + + r = venc_runtime_get(); + if (r) + goto err; + + venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | + venc.wss_data); + + venc_runtime_put(); + +err: + mutex_unlock(&venc.venc_lock); + + return r; +} + +static void venc_set_type(struct omap_dss_device *dssdev, + enum omap_dss_venc_type type) +{ + mutex_lock(&venc.venc_lock); + + venc.type = type; + + mutex_unlock(&venc.venc_lock); +} + +static void venc_invert_vid_out_polarity(struct omap_dss_device *dssdev, + bool invert_polarity) +{ + mutex_lock(&venc.venc_lock); + + venc.invert_polarity = invert_polarity; + + mutex_unlock(&venc.venc_lock); +} + +static int venc_init_regulator(void) +{ + struct regulator *vdda_dac; + + if (venc.vdda_dac_reg != NULL) + return 0; + + if (venc.pdev->dev.of_node) + vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda"); + else + vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda_dac"); + + if (IS_ERR(vdda_dac)) { + if (PTR_ERR(vdda_dac) != -EPROBE_DEFER) + DSSERR("can't get VDDA_DAC regulator\n"); + return PTR_ERR(vdda_dac); + } + + venc.vdda_dac_reg = vdda_dac; + + return 0; +} + +static void venc_dump_regs(struct seq_file *s) +{ +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) + + if (venc_runtime_get()) + return; + + DUMPREG(VENC_F_CONTROL); + DUMPREG(VENC_VIDOUT_CTRL); + DUMPREG(VENC_SYNC_CTRL); + DUMPREG(VENC_LLEN); + DUMPREG(VENC_FLENS); + DUMPREG(VENC_HFLTR_CTRL); + DUMPREG(VENC_CC_CARR_WSS_CARR); + DUMPREG(VENC_C_PHASE); + DUMPREG(VENC_GAIN_U); + DUMPREG(VENC_GAIN_V); + DUMPREG(VENC_GAIN_Y); + DUMPREG(VENC_BLACK_LEVEL); + DUMPREG(VENC_BLANK_LEVEL); + DUMPREG(VENC_X_COLOR); + DUMPREG(VENC_M_CONTROL); + DUMPREG(VENC_BSTAMP_WSS_DATA); + DUMPREG(VENC_S_CARR); + DUMPREG(VENC_LINE21); + DUMPREG(VENC_LN_SEL); + DUMPREG(VENC_L21__WC_CTL); + DUMPREG(VENC_HTRIGGER_VTRIGGER); + DUMPREG(VENC_SAVID__EAVID); + DUMPREG(VENC_FLEN__FAL); + DUMPREG(VENC_LAL__PHASE_RESET); + DUMPREG(VENC_HS_INT_START_STOP_X); + DUMPREG(VENC_HS_EXT_START_STOP_X); + DUMPREG(VENC_VS_INT_START_X); + DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y); + DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X); + DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y); + DUMPREG(VENC_VS_EXT_STOP_Y); + DUMPREG(VENC_AVID_START_STOP_X); + DUMPREG(VENC_AVID_START_STOP_Y); + DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y); + DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X); + DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y); + DUMPREG(VENC_TVDETGP_INT_START_STOP_X); + DUMPREG(VENC_TVDETGP_INT_START_STOP_Y); + DUMPREG(VENC_GEN_CTRL); + DUMPREG(VENC_OUTPUT_CONTROL); + DUMPREG(VENC_OUTPUT_TEST); + + venc_runtime_put(); + +#undef DUMPREG +} + +static int venc_get_clocks(struct platform_device *pdev) +{ + struct clk *clk; + + if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) { + clk = devm_clk_get(&pdev->dev, "tv_dac_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get tv_dac_clk\n"); + return PTR_ERR(clk); + } + } else { + clk = NULL; + } + + venc.tv_dac_clk = clk; + + return 0; +} + +static int venc_connect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + struct omap_overlay_manager *mgr; + int r; + + r = venc_init_regulator(); + if (r) + return r; + + mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel); + if (!mgr) + return -ENODEV; + + r = dss_mgr_connect(mgr, dssdev); + if (r) + return r; + + r = omapdss_output_set_device(dssdev, dst); + if (r) { + DSSERR("failed to connect output to new device: %s\n", + dst->name); + dss_mgr_disconnect(mgr, dssdev); + return r; + } + + return 0; +} + +static void venc_disconnect(struct omap_dss_device *dssdev, + struct omap_dss_device *dst) +{ + WARN_ON(dst != dssdev->dst); + + if (dst != dssdev->dst) + return; + + omapdss_output_unset_device(dssdev); + + if (dssdev->manager) + dss_mgr_disconnect(dssdev->manager, dssdev); +} + +static const struct omapdss_atv_ops venc_ops = { + .connect = venc_connect, + .disconnect = venc_disconnect, + + .enable = venc_display_enable, + .disable = venc_display_disable, + + .check_timings = venc_check_timings, + .set_timings = venc_set_timings, + .get_timings = venc_get_timings, + + .set_type = venc_set_type, + .invert_vid_out_polarity = venc_invert_vid_out_polarity, + + .set_wss = venc_set_wss, + .get_wss = venc_get_wss, +}; + +static void venc_init_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &venc.output; + + out->dev = &pdev->dev; + out->id = OMAP_DSS_OUTPUT_VENC; + out->output_type = OMAP_DISPLAY_TYPE_VENC; + out->name = "venc.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; + out->ops.atv = &venc_ops; + out->owner = THIS_MODULE; + + omapdss_register_output(out); +} + +static void venc_uninit_output(struct platform_device *pdev) +{ + struct omap_dss_device *out = &venc.output; + + omapdss_unregister_output(out); +} + +static int venc_probe_of(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *ep; + u32 channels; + int r; + + ep = omapdss_of_get_first_endpoint(node); + if (!ep) + return 0; + + venc.invert_polarity = of_property_read_bool(ep, "ti,invert-polarity"); + + r = of_property_read_u32(ep, "ti,channels", &channels); + if (r) { + dev_err(&pdev->dev, + "failed to read property 'ti,channels': %d\n", r); + goto err; + } + + switch (channels) { + case 1: + venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE; + break; + case 2: + venc.type = OMAP_DSS_VENC_TYPE_SVIDEO; + break; + default: + dev_err(&pdev->dev, "bad channel propert '%d'\n", channels); + r = -EINVAL; + goto err; + } + + of_node_put(ep); + + return 0; +err: + of_node_put(ep); + + return 0; +} + +/* VENC HW IP initialisation */ +static int venc_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + u8 rev_id; + struct resource *venc_mem; + int r; + + venc.pdev = pdev; + + mutex_init(&venc.venc_lock); + + venc.wss_data = 0; + + venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0); + if (!venc_mem) { + DSSERR("can't get IORESOURCE_MEM VENC\n"); + return -EINVAL; + } + + venc.base = devm_ioremap(&pdev->dev, venc_mem->start, + resource_size(venc_mem)); + if (!venc.base) { + DSSERR("can't ioremap VENC\n"); + return -ENOMEM; + } + + r = venc_get_clocks(pdev); + if (r) + return r; + + pm_runtime_enable(&pdev->dev); + + r = venc_runtime_get(); + if (r) + goto err_runtime_get; + + rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); + dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); + + venc_runtime_put(); + + if (pdev->dev.of_node) { + r = venc_probe_of(pdev); + if (r) { + DSSERR("Invalid DT data\n"); + goto err_probe_of; + } + } + + dss_debugfs_create_file("venc", venc_dump_regs); + + venc_init_output(pdev); + + return 0; + +err_probe_of: +err_runtime_get: + pm_runtime_disable(&pdev->dev); + return r; +} + +static void venc_unbind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + + venc_uninit_output(pdev); + + pm_runtime_disable(&pdev->dev); +} + +static const struct component_ops venc_component_ops = { + .bind = venc_bind, + .unbind = venc_unbind, +}; + +static int venc_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &venc_component_ops); +} + +static int venc_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &venc_component_ops); + return 0; +} + +static int venc_runtime_suspend(struct device *dev) +{ + if (venc.tv_dac_clk) + clk_disable_unprepare(venc.tv_dac_clk); + + dispc_runtime_put(); + + return 0; +} + +static int venc_runtime_resume(struct device *dev) +{ + int r; + + r = dispc_runtime_get(); + if (r < 0) + return r; + + if (venc.tv_dac_clk) + clk_prepare_enable(venc.tv_dac_clk); + + return 0; +} + +static const struct dev_pm_ops venc_pm_ops = { + .runtime_suspend = venc_runtime_suspend, + .runtime_resume = venc_runtime_resume, +}; + +static const struct of_device_id venc_of_match[] = { + { .compatible = "ti,omap2-venc", }, + { .compatible = "ti,omap3-venc", }, + { .compatible = "ti,omap4-venc", }, + {}, +}; + +static struct platform_driver omap_venchw_driver = { + .probe = venc_probe, + .remove = venc_remove, + .driver = { + .name = "omapdss_venc", + .pm = &venc_pm_ops, + .of_match_table = venc_of_match, + .suppress_bind_attrs = true, + }, +}; + +int __init venc_init_platform_driver(void) +{ + return platform_driver_register(&omap_venchw_driver); +} + +void venc_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_venchw_driver); +} diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c new file mode 100644 index 000000000000..b1ec59e42940 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -0,0 +1,211 @@ +/* +* Copyright (C) 2014 Texas Instruments Ltd +* +* This program is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 as published by +* the Free Software Foundation. +* +* You should have received a copy of the GNU General Public License along with +* this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/sched.h> + +#include <video/omapdss.h> + +#include "dss.h" +#include "dss_features.h" + +struct dss_video_pll { + struct dss_pll pll; + + struct device *dev; + + void __iomem *clkctrl_base; +}; + +#define REG_MOD(reg, val, start, end) \ + writel_relaxed(FLD_MOD(readl_relaxed(reg), val, start, end), reg) + +static void dss_dpll_enable_scp_clk(struct dss_video_pll *vpll) +{ + REG_MOD(vpll->clkctrl_base, 1, 14, 14); /* CIO_CLK_ICG */ +} + +static void dss_dpll_disable_scp_clk(struct dss_video_pll *vpll) +{ + REG_MOD(vpll->clkctrl_base, 0, 14, 14); /* CIO_CLK_ICG */ +} + +static void dss_dpll_power_enable(struct dss_video_pll *vpll) +{ + REG_MOD(vpll->clkctrl_base, 2, 31, 30); /* PLL_POWER_ON_ALL */ + + /* + * DRA7x PLL CTRL's PLL_PWR_STATUS seems to always return 0, + * so we have to use fixed delay here. + */ + msleep(1); +} + +static void dss_dpll_power_disable(struct dss_video_pll *vpll) +{ + REG_MOD(vpll->clkctrl_base, 0, 31, 30); /* PLL_POWER_OFF */ +} + +static int dss_video_pll_enable(struct dss_pll *pll) +{ + struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll); + int r; + + r = dss_runtime_get(); + if (r) + return r; + + dss_ctrl_pll_enable(pll->id, true); + + dss_dpll_enable_scp_clk(vpll); + + r = dss_pll_wait_reset_done(pll); + if (r) + goto err_reset; + + dss_dpll_power_enable(vpll); + + return 0; + +err_reset: + dss_dpll_disable_scp_clk(vpll); + dss_ctrl_pll_enable(pll->id, false); + dss_runtime_put(); + + return r; +} + +static void dss_video_pll_disable(struct dss_pll *pll) +{ + struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll); + + dss_dpll_power_disable(vpll); + + dss_dpll_disable_scp_clk(vpll); + + dss_ctrl_pll_enable(pll->id, false); + + dss_runtime_put(); +} + +static const struct dss_pll_ops dss_pll_ops = { + .enable = dss_video_pll_enable, + .disable = dss_video_pll_disable, + .set_config = dss_pll_write_config_type_a, +}; + +static const struct dss_pll_hw dss_dra7_video_pll_hw = { + .n_max = (1 << 8) - 1, + .m_max = (1 << 12) - 1, + .mX_max = (1 << 5) - 1, + .fint_min = 500000, + .fint_max = 2500000, + .clkdco_max = 1800000000, + + .n_msb = 8, + .n_lsb = 1, + .m_msb = 20, + .m_lsb = 9, + + .mX_msb[0] = 25, + .mX_lsb[0] = 21, + .mX_msb[1] = 30, + .mX_lsb[1] = 26, + + .has_refsel = true, +}; + +struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id, + struct regulator *regulator) +{ + const char * const reg_name[] = { "pll1", "pll2" }; + const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" }; + const char * const clkin_name[] = { "video1_clk", "video2_clk" }; + + struct resource *res; + struct dss_video_pll *vpll; + void __iomem *pll_base, *clkctrl_base; + struct clk *clk; + struct dss_pll *pll; + int r; + + /* PLL CONTROL */ + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]); + if (!res) { + dev_err(&pdev->dev, + "missing platform resource data for pll%d\n", id); + return ERR_PTR(-ENODEV); + } + + pll_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pll_base)) { + dev_err(&pdev->dev, "failed to ioremap pll%d reg_name\n", id); + return ERR_CAST(pll_base); + } + + /* CLOCK CONTROL */ + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + clkctrl_name[id]); + if (!res) { + dev_err(&pdev->dev, + "missing platform resource data for pll%d\n", id); + return ERR_PTR(-ENODEV); + } + + clkctrl_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(clkctrl_base)) { + dev_err(&pdev->dev, "failed to ioremap pll%d clkctrl\n", id); + return ERR_CAST(clkctrl_base); + } + + /* CLKIN */ + + clk = devm_clk_get(&pdev->dev, clkin_name[id]); + if (IS_ERR(clk)) { + DSSERR("can't get video pll clkin\n"); + return ERR_CAST(clk); + } + + vpll = devm_kzalloc(&pdev->dev, sizeof(*vpll), GFP_KERNEL); + if (!vpll) + return ERR_PTR(-ENOMEM); + + vpll->dev = &pdev->dev; + vpll->clkctrl_base = clkctrl_base; + + pll = &vpll->pll; + + pll->name = id == 0 ? "video0" : "video1"; + pll->id = id == 0 ? DSS_PLL_VIDEO1 : DSS_PLL_VIDEO2; + pll->clkin = clk; + pll->regulator = regulator; + pll->base = pll_base; + pll->hw = &dss_dra7_video_pll_hw; + pll->ops = &dss_pll_ops; + + r = dss_pll_register(pll); + if (r) + return ERR_PTR(r); + + return pll; +} + +void dss_video_pll_uninit(struct dss_pll *pll) +{ + dss_pll_unregister(pll); +} diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 984462622291..8495a1a4b617 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -19,6 +19,7 @@ #include <linux/shmem_fs.h> #include <linux/spinlock.h> +#include <linux/pfn_t.h> #include <drm/drm_vma_manager.h> @@ -402,7 +403,8 @@ static int fault_1d(struct drm_gem_object *obj, VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, pfn, pfn << PAGE_SHIFT); - return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, + __pfn_to_pfn_t(pfn, PFN_DEV)); } /* Special handling for the case of faulting in 2d tiled buffers */ @@ -496,7 +498,8 @@ static int fault_2d(struct drm_gem_object *obj, pfn, pfn << PAGE_SHIFT); for (i = n; i > 0; i--) { - vm_insert_mixed(vma, (unsigned long)vaddr, pfn); + vm_insert_mixed(vma, (unsigned long)vaddr, + __pfn_to_pfn_t(pfn, PFN_DEV)); pfn += priv->usergart[fmt].stride_pfn; vaddr += PAGE_SIZE * m; } diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 6bfc46369db1..367a916f364e 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -304,18 +304,10 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev, unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) & DENTIST_DPREFCLK_WDIVIDER_MASK) >> DENTIST_DPREFCLK_WDIVIDER_SHIFT; - - if (div < 128 && div >= 96) - div -= 64; - else if (div >= 64) - div = div / 2 - 16; - else if (div >= 8) - div /= 4; - else - div = 0; + div = radeon_audio_decode_dfs_div(div); if (div) - clock = rdev->clock.gpupll_outputfreq * 10 / div; + clock = clock * 100 / div; WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 9953356fe263..3cf04a2f44bb 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev, * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ + if (ASIC_IS_DCE41(rdev)) { + unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) & + DENTIST_DPREFCLK_WDIVIDER_MASK) >> + DENTIST_DPREFCLK_WDIVIDER_SHIFT; + div = radeon_audio_decode_dfs_div(div); + + if (div) + clock = 100 * clock / div; + } + WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); WREG32(DCCG_AUDIO_DTO1_MODULE, clock); } diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 4aa5f755572b..13b6029d65cc 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -511,6 +511,11 @@ #define DCCG_AUDIO_DTO1_CNTL 0x05cc # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3) +#define DCE41_DENTIST_DISPCLK_CNTL 0x049c +# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24) +# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24) +# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24 + /* DCE 4.0 AFMT */ #define HDMI_CONTROL 0x7030 # define HDMI_KEEPOUT_MODE (1 << 0) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5ae6db98aa4d..78a51b3eda10 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -268,7 +268,7 @@ struct radeon_clock { uint32_t current_dispclk; uint32_t dp_extclk; uint32_t max_pixel_clock; - uint32_t gpupll_outputfreq; + uint32_t vco_freq; }; /* diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 08fc1b5effa8..de9a2ffcf5f7 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1106,6 +1106,31 @@ union firmware_info { ATOM_FIRMWARE_INFO_V2_2 info_22; }; +union igp_info { + struct _ATOM_INTEGRATED_SYSTEM_INFO info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; +}; + +static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + union igp_info *igp_info; + u8 frev, crev; + u16 data_offset; + + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *)(mode_info->atom_context->bios + + data_offset); + rdev->clock.vco_freq = + le32_to_cpu(igp_info->info_6.ulDentistVCOFreq); + } +} + bool radeon_atom_get_clock_info(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -1257,12 +1282,18 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) rdev->mode_info.firmware_flags = le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); - if (ASIC_IS_DCE8(rdev)) { - rdev->clock.gpupll_outputfreq = + if (ASIC_IS_DCE8(rdev)) + rdev->clock.vco_freq = le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq); - if (rdev->clock.gpupll_outputfreq == 0) - rdev->clock.gpupll_outputfreq = 360000; /* 3.6 GHz */ - } + else if (ASIC_IS_DCE5(rdev)) + rdev->clock.vco_freq = rdev->clock.current_dispclk; + else if (ASIC_IS_DCE41(rdev)) + radeon_atombios_get_dentist_vco_freq(rdev); + else + rdev->clock.vco_freq = rdev->clock.current_dispclk; + + if (rdev->clock.vco_freq == 0) + rdev->clock.vco_freq = 360000; /* 3.6 GHz */ return true; } @@ -1270,14 +1301,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) return false; } -union igp_info { - struct _ATOM_INTEGRATED_SYSTEM_INFO info; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; -}; - bool radeon_atombios_sideport_present(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 2c02e99b5f95..b214663b370d 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *dig_connector = - radeon_connector->con_priv; if (!dig || !dig->afmt) return; @@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, radeon_audio_write_speaker_allocation(encoder); radeon_audio_write_sad_regs(encoder); radeon_audio_write_latency_fields(encoder, mode); - if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) - radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); - else - radeon_audio_set_dto(encoder, dig_connector->dp_clock); + radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10); radeon_audio_set_audio_packet(encoder); radeon_audio_select_pin(encoder); @@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode) if (radeon_encoder->audio && radeon_encoder->audio->dpms) radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON); } + +unsigned int radeon_audio_decode_dfs_div(unsigned int div) +{ + if (div >= 8 && div < 64) + return (div - 8) * 25 + 200; + else if (div >= 64 && div < 96) + return (div - 64) * 50 + 1600; + else if (div >= 96 && div < 128) + return (div - 96) * 100 + 3200; + else + return 0; +} diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h index 059cc3012062..5c70cceaa4a6 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.h +++ b/drivers/gpu/drm/radeon/radeon_audio.h @@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev); void radeon_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode); void radeon_audio_dpms(struct drm_encoder *encoder, int mode); +unsigned int radeon_audio_decode_dfs_div(unsigned int div); #endif diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b3bb92368ae0..298ea1c453c3 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1670,8 +1670,10 @@ int radeon_modeset_init(struct radeon_device *rdev) /* setup afmt */ radeon_afmt_init(rdev); - radeon_fbdev_init(rdev); - drm_kms_helper_poll_init(rdev->ddev); + if (!list_empty(&rdev->ddev->mode_config.connector_list)) { + radeon_fbdev_init(rdev); + drm_kms_helper_poll_init(rdev->ddev); + } /* do pm late init */ ret = radeon_pm_late_init(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3dcc5733ff69..e26c963f2e93 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -663,6 +663,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); if (!bo_va) { args->operation = RADEON_VA_RESULT_ERROR; + radeon_bo_unreserve(rbo); drm_gem_object_unreference_unlocked(gobj); return -ENOENT; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 84d45633d28c..fb6ad143873f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> +#include <drm/drm_cache.h> #include "radeon.h" #include "radeon_trace.h" @@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev, DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " "better performance thanks to write-combining\n"); bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#else + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + bo->flags &= ~RADEON_GEM_GTT_WC; #endif radeon_ttm_placement_from_domain(bo, domain); diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c index 07a0d378e122..a01efe39a820 100644 --- a/drivers/gpu/drm/radeon/vce_v1_0.c +++ b/drivers/gpu/drm/radeon/vce_v1_0.c @@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) return -EINVAL; } - for (i = 0; i < sign->num; ++i) { - if (sign->val[i].chip_id == chip_id) + for (i = 0; i < le32_to_cpu(sign->num); ++i) { + if (le32_to_cpu(sign->val[i].chip_id) == chip_id) break; } - if (i == sign->num) + if (i == le32_to_cpu(sign->num)) return -EINVAL; data += (256 - 64) / 4; @@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) data[1] = sign->val[i].nonce[1]; data[2] = sign->val[i].nonce[2]; data[3] = sign->val[i].nonce[3]; - data[4] = sign->len + 64; + data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64); memset(&data[5], 0, 44); memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); - data += data[4] / 4; + data += le32_to_cpu(data[4]) / 4; data[0] = sign->val[i].sigval[0]; data[1] = sign->val[i].sigval[1]; data[2] = sign->val[i].sigval[2]; data[3] = sign->val[i].sigval[3]; - rdev->vce.keyselect = sign->val[i].keyselect; + rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect); return 0; } diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index d4e0a39568f6..96dcd4a78951 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -1,6 +1,6 @@ config DRM_RCAR_DU tristate "DRM Support for R-Car Display Unit" - depends on DRM && ARM && HAVE_DMA_ATTRS && OF + depends on DRM && ARM && OF depends on ARCH_SHMOBILE || COMPILE_TEST select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index d1dc0f7b01db..f6a809afceec 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -2,11 +2,11 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \ - rockchip_drm_gem.o +rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ + rockchip_drm_gem.o rockchip_drm_vop.o +rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o -obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o \ - rockchip_vop_reg.o +obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index 7bfe243c6173..f8f8f29fb7c3 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -461,10 +461,11 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi) static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi) { - unsigned int bpp, i, pre; + unsigned int i, pre; unsigned long mpclk, pllref, tmp; unsigned int m = 1, n = 1, target_mbps = 1000; unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps; + int bpp; bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); if (bpp < 0) { diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 8397d1b62ef9..a0d51ccb6ea4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -55,14 +55,12 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, return arm_iommu_attach_device(dev, mapping); } -EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device); void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev) { arm_iommu_detach_device(dev); } -EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device); int rockchip_register_crtc_funcs(struct drm_crtc *crtc, const struct rockchip_crtc_funcs *crtc_funcs) @@ -77,7 +75,6 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc, return 0; } -EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs); void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc) { @@ -89,7 +86,6 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc) priv->crtc_funcs[pipe] = NULL; } -EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs); static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm, int pipe) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index f7844883cb76..3b8f652698f8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -39,7 +39,6 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb, return rk_fb->obj[plane]; } -EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj); static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) { @@ -177,8 +176,23 @@ static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc) crtc_funcs->wait_for_update(crtc); } +/* + * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066 + * have hardware counters for neither vblanks nor scanlines, which results in + * a race where: + * | <-- HW vsync irq and reg take effect + * plane_commit --> | + * get_vblank and wait --> | + * | <-- handle_vblank, vblank->count + 1 + * cleanup_fb --> | + * iommu crash --> | + * | <-- HW vsync irq and reg take effect + * + * This function is equivalent but uses rockchip_crtc_wait_for_update() instead + * of waiting for vblank_count to change. + */ static void -rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state) +rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state) { struct drm_crtc_state *old_crtc_state; struct drm_crtc *crtc; @@ -194,6 +208,10 @@ rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state) if (!crtc->state->active) continue; + if (!drm_atomic_helper_framebuffer_changed(dev, + old_state, crtc)) + continue; + ret = drm_crtc_vblank_get(crtc); if (ret != 0) continue; @@ -241,7 +259,7 @@ rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) drm_atomic_helper_commit_planes(dev, state, true); - rockchip_atomic_wait_for_complete(state); + rockchip_atomic_wait_for_complete(dev, state); drm_atomic_helper_cleanup_planes(dev, state); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h index 50432e9b5b37..73718c5f5bbf 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h @@ -15,7 +15,18 @@ #ifndef _ROCKCHIP_DRM_FBDEV_H #define _ROCKCHIP_DRM_FBDEV_H +#ifdef CONFIG_DRM_FBDEV_EMULATION int rockchip_drm_fbdev_init(struct drm_device *dev); void rockchip_drm_fbdev_fini(struct drm_device *dev); +#else +static inline int rockchip_drm_fbdev_init(struct drm_device *dev) +{ + return 0; +} + +static inline void rockchip_drm_fbdev_fini(struct drm_device *dev) +{ +} +#endif #endif /* _ROCKCHIP_DRM_FBDEV_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index d908321b94ce..18e07338c6e5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -234,13 +234,8 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv, /* * align to 64 bytes since Mali requires it. */ - min_pitch = ALIGN(min_pitch, 64); - - if (args->pitch < min_pitch) - args->pitch = min_pitch; - - if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; + args->pitch = ALIGN(min_pitch, 64); + args->size = args->pitch * args->height; rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, &args->handle); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 46c2a8dfd8aa..fd370548d7d7 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -43,8 +43,8 @@ #define REG_SET(x, base, reg, v, mode) \ __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) -#define REG_SET_MASK(x, base, reg, v, mode) \ - __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) +#define REG_SET_MASK(x, base, reg, mask, v, mode) \ + __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v) #define VOP_WIN_SET(x, win, name, v) \ REG_SET(x, win->base, win->phy->name, v, RELAXED) @@ -58,16 +58,18 @@ #define VOP_INTR_GET(vop, name) \ vop_read_reg(vop, 0, &vop->data->ctrl->name) -#define VOP_INTR_SET(vop, name, v) \ - REG_SET(vop, 0, vop->data->intr->name, v, NORMAL) +#define VOP_INTR_SET(vop, name, mask, v) \ + REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL) #define VOP_INTR_SET_TYPE(vop, name, type, v) \ do { \ - int i, reg = 0; \ + int i, reg = 0, mask = 0; \ for (i = 0; i < vop->data->intr->nintrs; i++) { \ - if (vop->data->intr->intrs[i] & type) \ + if (vop->data->intr->intrs[i] & type) { \ reg |= (v) << i; \ + mask |= 1 << i; \ + } \ } \ - VOP_INTR_SET(vop, name, reg); \ + VOP_INTR_SET(vop, name, mask, reg); \ } while (0) #define VOP_INTR_GET_TYPE(vop, name, type) \ vop_get_intr_type(vop, &vop->data->intr->name, type) diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index b9202aa6f8ab..8d17d00ddb4b 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -1,6 +1,6 @@ config DRM_SHMOBILE tristate "DRM Support for SH Mobile" - depends on DRM && ARM && HAVE_DMA_ATTRS + depends on DRM && ARM depends on ARCH_SHMOBILE || COMPILE_TEST depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 10c1b1926e6f..5ad43a1bb260 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -1,6 +1,6 @@ config DRM_STI tristate "DRM Support for STMicroelectronics SoC stiH41x Series" - depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS + depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) select RESET_CONTROLLER select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 78beafb0742c..f60a1ec84fa4 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -1,6 +1,6 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" - depends on DRM && OF && ARM && HAVE_DMA_ATTRS + depends on DRM && OF && ARM select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 8fb7213277cc..06d26dc438b2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -35,6 +35,7 @@ #include <ttm/ttm_placement.h> #include <drm/drm_vma_manager.h> #include <linux/mm.h> +#include <linux/pfn_t.h> #include <linux/rbtree.h> #include <linux/module.h> #include <linux/uaccess.h> @@ -229,7 +230,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } if (vma->vm_flags & VM_MIXEDMAP) - ret = vm_insert_mixed(&cvma, address, pfn); + ret = vm_insert_mixed(&cvma, address, + __pfn_to_pfn_t(pfn, PFN_DEV)); else ret = vm_insert_pfn(&cvma, address, pfn); diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 2d7d115ddf3f..584810474e5b 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -1,7 +1,7 @@ config DRM_VC4 tristate "Broadcom VC4 Graphics" depends on ARCH_BCM2835 || COMPILE_TEST - depends on DRM && HAVE_DMA_ATTRS + depends on DRM select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 424d515ffcda..314ff71db978 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -144,19 +144,16 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) } #endif /* CONFIG_DEBUG_FS */ -/* - * Asks the firmware to turn on power to the V3D engine. - * - * This may be doable with just the clocks interface, though this - * packet does some other register setup from the firmware, too. - */ int vc4_v3d_set_power(struct vc4_dev *vc4, bool on) { - if (on) - return pm_generic_poweroff(&vc4->v3d->pdev->dev); - else - return pm_generic_resume(&vc4->v3d->pdev->dev); + /* XXX: This interface is needed for GPU reset, and the way to + * do it is to turn our power domain off and back on. We + * can't just reset from within the driver, because the reset + * bits are in the power domain's register area, and get set + * during the poweron process. + */ + return 0; } static void vc4_v3d_init_hw(struct drm_device *dev) diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 0fb5b994b9dd..e26d9f6face3 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -861,7 +861,7 @@ validate_gl_shader_rec(struct drm_device *dev, if (vbo->base.size < offset || vbo->base.size - offset < attr_size) { - DRM_ERROR("BO offset overflow (%d + %d > %d)\n", + DRM_ERROR("BO offset overflow (%d + %d > %zu)\n", offset, attr_size, vbo->base.size); return -EINVAL; } diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 06496a128162..4150873d432e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -130,7 +130,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) static vq_callback_t *callbacks[] = { virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack }; - static const char *names[] = { "control", "cursor" }; + static const char * const names[] = { "control", "cursor" }; struct virtio_gpu_device *vgdev; /* this will expand later */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c96a2d2d5107..0ee76e523a90 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -25,6 +25,7 @@ * **************************************************************************/ #include <linux/module.h> +#include <linux/console.h> #include <drm/drmP.h> #include "vmwgfx_drv.h" @@ -1527,6 +1528,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int __init vmwgfx_init(void) { int ret; + +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force()) + return -EINVAL; +#endif + ret = drm_pci_init(&driver, &vmw_pci_driver); if (ret) DRM_ERROR("Failed initializing DRM.\n"); diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index 63eb16bf2cf0..883a314cd83a 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c @@ -161,7 +161,7 @@ static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs) * The DRM pixel formats and IPU internal representation are ordered the other * way around, with the first named component ordered at the most significant * bits. Further, V4L2 formats are not well defined: - * http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html + * https://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html * We choose the interpretation which matches GStreamer behavior. */ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index d64d9058bce5..665ab9fd0e01 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -36,6 +36,7 @@ #include <linux/fs.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/uaccess.h> @@ -918,17 +919,17 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend; domain->ops.runtime_resume = vga_switcheroo_runtime_resume; - dev->pm_domain = domain; + dev_pm_domain_set(dev, domain); return 0; } - dev->pm_domain = NULL; + dev_pm_domain_set(dev, NULL); return -EINVAL; } EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops); void vga_switcheroo_fini_domain_pm_ops(struct device *dev) { - dev->pm_domain = NULL; + dev_pm_domain_set(dev, NULL); } EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops); @@ -989,10 +990,10 @@ vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio; - dev->pm_domain = domain; + dev_pm_domain_set(dev, domain); return 0; } - dev->pm_domain = NULL; + dev_pm_domain_set(dev, NULL); return -EINVAL; } EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio); diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 9abcaa53bd25..f17cb0431833 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1163,12 +1163,8 @@ done: static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait) { - struct vga_arb_private *priv = file->private_data; - pr_debug("%s\n", __func__); - if (priv == NULL) - return -ENODEV; poll_wait(file, &vga_wait_queue, wait); return POLLIN; } @@ -1209,9 +1205,6 @@ static int vga_arb_release(struct inode *inode, struct file *file) pr_debug("%s\n", __func__); - if (priv == NULL) - return -ENODEV; - spin_lock_irqsave(&vga_user_lock, flags); list_del(&priv->list); for (i = 0; i < MAX_USER_CARDS; i++) { |