diff options
349 files changed, 10487 insertions, 8839 deletions
diff --git a/Documentation/driver-api/device-io.rst b/Documentation/driver-api/device-io.rst index e9f04b1815d1..4d2baac0311c 100644 --- a/Documentation/driver-api/device-io.rst +++ b/Documentation/driver-api/device-io.rst @@ -502,6 +502,15 @@ pcim_iomap() Not using these wrappers may make drivers unusable on certain platforms with stricter rules for mapping I/O memory. +Generalizing Access to System and I/O Memory +============================================ + +.. kernel-doc:: include/linux/iosys-map.h + :doc: overview + +.. kernel-doc:: include/linux/iosys-map.h + :internal: + Public Functions Provided ========================= diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 2cd7db82d9fe..55006678394a 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -128,15 +128,6 @@ Kernel Functions and Structures Reference .. kernel-doc:: include/linux/dma-buf.h :internal: -Buffer Mapping Helpers -~~~~~~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: include/linux/dma-buf-map.h - :doc: overview - -.. kernel-doc:: include/linux/dma-buf-map.h - :internal: - Reservation Objects ------------------- diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 1b2372ef4131..ee842606e883 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -222,7 +222,7 @@ Convert drivers to use drm_fbdev_generic_setup() Most drivers can use drm_fbdev_generic_setup(). Driver have to implement atomic modesetting and GEM vmap support. Historically, generic fbdev emulation expected the framebuffer in system memory or system-like memory. By employing -struct dma_buf_map, drivers with frambuffers in I/O memory can be supported +struct iosys_map, drivers with frambuffers in I/O memory can be supported as well. Contact: Maintainer of the driver you plan to convert @@ -234,7 +234,7 @@ Reimplement functions in drm_fbdev_fb_ops without fbdev A number of callback functions in drm_fbdev_fb_ops could benefit from being rewritten without dependencies on the fbdev module. Some of the -helpers could further benefit from using struct dma_buf_map instead of +helpers could further benefit from using struct iosys_map instead of raw pointers. Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter @@ -434,19 +434,19 @@ Contact: Emil Velikov, respective driver maintainers Level: Intermediate -Use struct dma_buf_map throughout codebase ------------------------------------------- +Use struct iosys_map throughout codebase +---------------------------------------- -Pointers to shared device memory are stored in struct dma_buf_map. Each +Pointers to shared device memory are stored in struct iosys_map. Each instance knows whether it refers to system or I/O memory. Most of the DRM-wide -interface have been converted to use struct dma_buf_map, but implementations +interface have been converted to use struct iosys_map, but implementations often still use raw pointers. -The task is to use struct dma_buf_map where it makes sense. +The task is to use struct iosys_map where it makes sense. -* Memory managers should use struct dma_buf_map for dma-buf-imported buffers. -* TTM might benefit from using struct dma_buf_map internally. -* Framebuffer copying and blitting helpers should operate on struct dma_buf_map. +* Memory managers should use struct iosys_map for dma-buf-imported buffers. +* TTM might benefit from using struct iosys_map internally. +* Framebuffer copying and blitting helpers should operate on struct iosys_map. Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vetter diff --git a/MAINTAINERS b/MAINTAINERS index f41088418aae..1a18eafee497 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5734,7 +5734,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/driver-api/dma-buf.rst F: drivers/dma-buf/ F: include/linux/*fence.h -F: include/linux/dma-buf* +F: include/linux/dma-buf.h F: include/linux/dma-resv.h K: \bdma_(?:buf|fence|resv)\b @@ -10050,6 +10050,13 @@ F: include/linux/iova.h F: include/linux/of_iommu.h F: include/uapi/linux/iommu.h +IOSYS-MAP HELPERS +M: Thomas Zimmermann <tzimmermann@suse.de> +L: dri-devel@lists.freedesktop.org +S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc +F: include/linux/iosys-map.h + IO_URING M: Jens Axboe <axboe@kernel.dk> R: Pavel Begunkov <asml.silence@gmail.com> diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index dc7da08bc700..bd6dad83c65b 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -555,6 +555,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_RKL_IDS(&gen11_early_ops), INTEL_ADLS_IDS(&gen11_early_ops), INTEL_ADLP_IDS(&gen11_early_ops), + INTEL_ADLN_IDS(&gen11_early_ops), INTEL_RPLS_IDS(&gen11_early_ops), }; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 602b12d7470d..df23239b04fc 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1047,8 +1047,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); * * Interfaces:: * - * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) - * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) + * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) + * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) * * The vmap call can fail if there is no vmap support in the exporter, or if * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference @@ -1260,12 +1260,12 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); * * Returns 0 on success, or a negative errno code otherwise. */ -int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { - struct dma_buf_map ptr; + struct iosys_map ptr; int ret = 0; - dma_buf_map_clear(map); + iosys_map_clear(map); if (WARN_ON(!dmabuf)) return -EINVAL; @@ -1276,12 +1276,12 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) mutex_lock(&dmabuf->lock); if (dmabuf->vmapping_counter) { dmabuf->vmapping_counter++; - BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); + BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); *map = dmabuf->vmap_ptr; goto out_unlock; } - BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr)); + BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); ret = dmabuf->ops->vmap(dmabuf, &ptr); if (WARN_ON_ONCE(ret)) @@ -1303,20 +1303,20 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); * @dmabuf: [in] buffer to vunmap * @map: [in] vmap pointer to vunmap */ -void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { if (WARN_ON(!dmabuf)) return; - BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); + BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); BUG_ON(dmabuf->vmapping_counter == 0); - BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map)); + BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); mutex_lock(&dmabuf->lock); if (--dmabuf->vmapping_counter == 0) { if (dmabuf->ops->vunmap) dmabuf->ops->vunmap(dmabuf, map); - dma_buf_map_clear(&dmabuf->vmap_ptr); + iosys_map_clear(&dmabuf->vmap_ptr); } mutex_unlock(&dmabuf->lock); } diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 83f02bd51dda..28fb04eccdd0 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -202,7 +202,7 @@ static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) return vaddr; } -static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct cma_heap_buffer *buffer = dmabuf->priv; void *vaddr; @@ -211,7 +211,7 @@ static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) mutex_lock(&buffer->lock); if (buffer->vmap_cnt) { buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); goto out; } @@ -222,14 +222,14 @@ static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) } buffer->vaddr = vaddr; buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); out: mutex_unlock(&buffer->lock); return ret; } -static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct cma_heap_buffer *buffer = dmabuf->priv; @@ -239,7 +239,7 @@ static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) buffer->vaddr = NULL; } mutex_unlock(&buffer->lock); - dma_buf_map_clear(map); + iosys_map_clear(map); } static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index ab7fd896d2c4..fcf836ba9c1f 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -241,7 +241,7 @@ static void *system_heap_do_vmap(struct system_heap_buffer *buffer) return vaddr; } -static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct system_heap_buffer *buffer = dmabuf->priv; void *vaddr; @@ -250,7 +250,7 @@ static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) mutex_lock(&buffer->lock); if (buffer->vmap_cnt) { buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); goto out; } @@ -262,14 +262,14 @@ static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) buffer->vaddr = vaddr; buffer->vmap_cnt++; - dma_buf_map_set_vaddr(map, buffer->vaddr); + iosys_map_set_vaddr(map, buffer->vaddr); out: mutex_unlock(&buffer->lock); return ret; } -static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct system_heap_buffer *buffer = dmabuf->priv; @@ -279,7 +279,7 @@ static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) buffer->vaddr = NULL; } mutex_unlock(&buffer->lock); - dma_buf_map_clear(map); + iosys_map_clear(map); } static void system_heap_dma_buf_release(struct dma_buf *dmabuf) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 00bfa41ff7cb..9c8d56b0a41b 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -107,7 +107,7 @@ struct ast_cursor_plane { struct { struct drm_gem_vram_object *gbo; - struct dma_buf_map map; + struct iosys_map map; u64 off; } hwc[AST_DEFAULT_HWC_NUM]; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index ab52efb15670..2c7115a4d81f 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -804,11 +804,11 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); struct drm_framebuffer *fb = new_state->fb; struct ast_private *ast = to_ast_private(plane->dev); - struct dma_buf_map dst_map = + struct iosys_map dst_map = ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map; u64 dst_off = ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off; - struct dma_buf_map src_map = shadow_plane_state->data[0]; + struct iosys_map src_map = shadow_plane_state->data[0]; unsigned int offset_x, offset_y; u16 x, y; u8 x_offset, y_offset; @@ -886,7 +886,7 @@ static void ast_cursor_plane_destroy(struct drm_plane *plane) struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane); size_t i; struct drm_gem_vram_object *gbo; - struct dma_buf_map map; + struct iosys_map map; for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) { gbo = ast_cursor_plane->hwc[i].gbo; @@ -913,7 +913,7 @@ static int ast_cursor_plane_init(struct ast_private *ast) struct drm_plane *cursor_plane = &ast_cursor_plane->base; size_t size, i; struct drm_gem_vram_object *gbo; - struct dma_buf_map map; + struct iosys_map map; int ret; s64 off; diff --git a/drivers/gpu/drm/dp/drm_dp.c b/drivers/gpu/drm/dp/drm_dp.c index 6d43325acca5..a20b0f8f24b8 100644 --- a/drivers/gpu/drm/dp/drm_dp.c +++ b/drivers/gpu/drm/dp/drm_dp.c @@ -144,6 +144,69 @@ u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], } EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset); +/* DP 2.0 errata for 128b/132b */ +bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_align, lane_status; + int lane; + + lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + if (!(lane_align & DP_INTERLANE_ALIGN_DONE)) + return false; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if (!(lane_status & DP_LANE_CHANNEL_EQ_DONE)) + return false; + } + return true; +} +EXPORT_SYMBOL(drm_dp_128b132b_lane_channel_eq_done); + +/* DP 2.0 errata for 128b/132b */ +bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + u8 lane_status; + int lane; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = dp_get_lane_status(link_status, lane); + if (!(lane_status & DP_LANE_SYMBOL_LOCKED)) + return false; + } + return true; +} +EXPORT_SYMBOL(drm_dp_128b132b_lane_symbol_locked); + +/* DP 2.0 errata for 128b/132b */ +bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + + return status & DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE; +} +EXPORT_SYMBOL(drm_dp_128b132b_eq_interlane_align_done); + +/* DP 2.0 errata for 128b/132b */ +bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + + return status & DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE; +} +EXPORT_SYMBOL(drm_dp_128b132b_cds_interlane_align_done); + +/* DP 2.0 errata for 128b/132b */ +bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 status = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + + return status & DP_128B132B_LT_FAILED; +} +EXPORT_SYMBOL(drm_dp_128b132b_link_training_failed); + u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE], unsigned int lane) { @@ -281,6 +344,26 @@ int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIV } EXPORT_SYMBOL(drm_dp_read_channel_eq_delay); +/* Per DP 2.0 Errata */ +int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux) +{ + int unit; + u8 val; + + if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) { + drm_err(aux->drm_dev, "%s: failed rd interval read\n", + aux->name); + /* default to max */ + val = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK; + } + + unit = (val & DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT) ? 1 : 2; + val &= DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK; + + return (val + 1) * unit * 1000; +} +EXPORT_SYMBOL(drm_dp_128b132b_read_aux_rd_interval); + void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index f19d9acbe959..4b0da6baff78 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -28,10 +28,10 @@ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ -#include <linux/dma-buf-map.h> +#include <linux/cc_platform.h> #include <linux/export.h> #include <linux/highmem.h> -#include <linux/cc_platform.h> +#include <linux/iosys-map.h> #include <xen/xen.h> #include <drm/drm_cache.h> @@ -214,14 +214,14 @@ bool drm_need_swiotlb(int dma_bits) } EXPORT_SYMBOL(drm_need_swiotlb); -static void memcpy_fallback(struct dma_buf_map *dst, - const struct dma_buf_map *src, +static void memcpy_fallback(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len) { if (!dst->is_iomem && !src->is_iomem) { memcpy(dst->vaddr, src->vaddr, len); } else if (!src->is_iomem) { - dma_buf_map_memcpy_to(dst, src->vaddr, len); + iosys_map_memcpy_to(dst, 0, src->vaddr, len); } else if (!dst->is_iomem) { memcpy_fromio(dst->vaddr, src->vaddr_iomem, len); } else { @@ -305,8 +305,8 @@ static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len) * Tries an arch optimized memcpy for prefetching reading out of a WC region, * and if no such beast is available, falls back to a normal memcpy. */ -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len) { if (WARN_ON(in_interrupt())) { @@ -343,8 +343,8 @@ void drm_memcpy_init_early(void) static_branch_enable(&has_movntdqa); } #else -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len) { WARN_ON(in_interrupt()); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index ce45e380f4a2..af3b7395bf69 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -3,7 +3,7 @@ * Copyright 2018 Noralf Trønnes */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> @@ -309,9 +309,10 @@ err_delete: * 0 on success, or a negative errno code otherwise. */ int -drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map_copy) +drm_client_buffer_vmap(struct drm_client_buffer *buffer, + struct iosys_map *map_copy) { - struct dma_buf_map *map = &buffer->map; + struct iosys_map *map = &buffer->map; int ret; /* @@ -342,7 +343,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { - struct dma_buf_map *map = &buffer->map; + struct iosys_map *map = &buffer->map; drm_gem_vunmap(buffer->gem, map); } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index ed43b987d306..6f72627369f8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -373,7 +373,7 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, struct drm_clip_rect *clip, - struct dma_buf_map *dst) + struct iosys_map *dst) { struct drm_framebuffer *fb = fb_helper->fb; unsigned int cpp = fb->format->cpp[0]; @@ -382,11 +382,11 @@ static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y; - dma_buf_map_incr(dst, offset); /* go to first pixel within clip rect */ + iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ for (y = clip->y1; y < clip->y2; y++) { - dma_buf_map_memcpy_to(dst, src, len); - dma_buf_map_incr(dst, fb->pitches[0]); + iosys_map_memcpy_to(dst, 0, src, len); + iosys_map_incr(dst, fb->pitches[0]); src += fb->pitches[0]; } } @@ -395,7 +395,7 @@ static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, struct drm_clip_rect *clip) { struct drm_client_buffer *buffer = fb_helper->buffer; - struct dma_buf_map map, dst; + struct iosys_map map, dst; int ret; /* @@ -2322,7 +2322,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, struct drm_framebuffer *fb; struct fb_info *fbi; u32 format; - struct dma_buf_map map; + struct iosys_map map; int ret; drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4dcdec6487bb..8c7b24f4b0e4 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -36,7 +36,7 @@ #include <linux/pagemap.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/mem_encrypt.h> #include <linux/pagevec.h> @@ -1165,7 +1165,7 @@ void drm_gem_unpin(struct drm_gem_object *obj) obj->funcs->unpin(obj); } -int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { int ret; @@ -1175,23 +1175,23 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) ret = obj->funcs->vmap(obj, map); if (ret) return ret; - else if (dma_buf_map_is_null(map)) + else if (iosys_map_is_null(map)) return -ENOMEM; return 0; } EXPORT_SYMBOL(drm_gem_vmap); -void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { - if (dma_buf_map_is_null(map)) + if (iosys_map_is_null(map)) return; if (obj->funcs->vunmap) obj->funcs->vunmap(obj, map); /* Always set the mapping to NULL. Callers may rely on this. */ - dma_buf_map_clear(map); + iosys_map_clear(map); } EXPORT_SYMBOL(drm_gem_vunmap); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index cefd0cbf9deb..88c432a7cb3c 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -209,7 +209,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj) { struct drm_gem_object *gem_obj = &cma_obj->base; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr); if (gem_obj->import_attach) { if (cma_obj->vaddr) @@ -480,9 +480,10 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); * Returns: * 0 on success, or a negative error code otherwise. */ -int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map) +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, + struct iosys_map *map) { - dma_buf_map_set_vaddr(map, cma_obj->vaddr); + iosys_map_set_vaddr(map, cma_obj->vaddr); return 0; } @@ -557,7 +558,7 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev, { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *obj; - struct dma_buf_map map; + struct iosys_map map; int ret; ret = dma_buf_vmap(attach->dmabuf, &map); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 746fd8c73845..f4619803acd0 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); * @data: returns the data address for each BO, can be NULL * * This function maps all buffer objects of the given framebuffer into - * kernel address space and stores them in struct dma_buf_map. If the + * kernel address space and stores them in struct iosys_map. If the * mapping operation fails for one of the BOs, the function unmaps the * already established mappings automatically. * @@ -335,8 +335,8 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); * 0 on success, or a negative errno code otherwise. */ int drm_gem_fb_vmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]) + struct iosys_map map[static DRM_FORMAT_MAX_PLANES], + struct iosys_map data[DRM_FORMAT_MAX_PLANES]) { struct drm_gem_object *obj; unsigned int i; @@ -345,7 +345,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) { obj = drm_gem_fb_get_obj(fb, i); if (!obj) { - dma_buf_map_clear(&map[i]); + iosys_map_clear(&map[i]); continue; } ret = drm_gem_vmap(obj, &map[i]); @@ -356,9 +356,9 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, if (data) { for (i = 0; i < DRM_FORMAT_MAX_PLANES; ++i) { memcpy(&data[i], &map[i], sizeof(data[i])); - if (dma_buf_map_is_null(&data[i])) + if (iosys_map_is_null(&data[i])) continue; - dma_buf_map_incr(&data[i], fb->offsets[i]); + iosys_map_incr(&data[i], fb->offsets[i]); } } @@ -386,7 +386,7 @@ EXPORT_SYMBOL(drm_gem_fb_vmap); * See drm_gem_fb_vmap() for more information. */ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]) + struct iosys_map map[static DRM_FORMAT_MAX_PLANES]) { unsigned int i = DRM_FORMAT_MAX_PLANES; struct drm_gem_object *obj; @@ -396,7 +396,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, obj = drm_gem_fb_get_obj(fb, i); if (!obj) continue; - if (dma_buf_map_is_null(&map[i])) + if (iosys_map_is_null(&map[i])) continue; drm_gem_vunmap(obj, &map[i]); } diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 621924116eb4..3e738aea2664 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -286,13 +286,14 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) } EXPORT_SYMBOL(drm_gem_shmem_unpin); -static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; if (shmem->vmap_use_count++ > 0) { - dma_buf_map_set_vaddr(map, shmem->vaddr); + iosys_map_set_vaddr(map, shmem->vaddr); return 0; } @@ -319,7 +320,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct if (!shmem->vaddr) ret = -ENOMEM; else - dma_buf_map_set_vaddr(map, shmem->vaddr); + iosys_map_set_vaddr(map, shmem->vaddr); } if (ret) { @@ -353,7 +354,8 @@ err_zero_use: * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { int ret; @@ -368,7 +370,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *m EXPORT_SYMBOL(drm_gem_shmem_vmap); static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, - struct dma_buf_map *map) + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; @@ -400,7 +402,8 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, * This function hides the differences between dma-buf imported and natively * allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { mutex_lock(&shmem->vmap_lock); drm_gem_shmem_vunmap_locked(shmem, map); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index ecf3d2a54a98..d5962a34c01d 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -61,7 +61,7 @@ EXPORT_SYMBOL(drm_gem_ttm_print_info); * 0 on success, or a negative errno code otherwise. */ int drm_gem_ttm_vmap(struct drm_gem_object *gem, - struct dma_buf_map *map) + struct iosys_map *map) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); @@ -78,7 +78,7 @@ EXPORT_SYMBOL(drm_gem_ttm_vmap); * &drm_gem_object_funcs.vmap callback. */ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, - struct dma_buf_map *map) + struct iosys_map *map) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 3f00192215d1..dc7f938bfff2 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/module.h> #include <drm/drm_debugfs.h> @@ -116,7 +116,7 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) */ WARN_ON(gbo->vmap_use_count); - WARN_ON(dma_buf_map_is_set(&gbo->map)); + WARN_ON(iosys_map_is_set(&gbo->map)); drm_gem_object_release(&gbo->bo.base); } @@ -365,7 +365,7 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) EXPORT_SYMBOL(drm_gem_vram_unpin); static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, - struct dma_buf_map *map) + struct iosys_map *map) { int ret; @@ -377,7 +377,7 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, * page mapping might still be around. Only vmap if the there's * no mapping present. */ - if (dma_buf_map_is_null(&gbo->map)) { + if (iosys_map_is_null(&gbo->map)) { ret = ttm_bo_vmap(&gbo->bo, &gbo->map); if (ret) return ret; @@ -391,14 +391,14 @@ out: } static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, - struct dma_buf_map *map) + struct iosys_map *map) { struct drm_device *dev = gbo->bo.base.dev; if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) return; - if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map))) + if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map))) return; /* BUG: map not mapped from this BO */ if (--gbo->vmap_use_count > 0) @@ -428,7 +428,7 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, * Returns: * 0 on success, or a negative error code otherwise. */ -int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map) { int ret; @@ -463,7 +463,8 @@ EXPORT_SYMBOL(drm_gem_vram_vmap); * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See * the documentation for drm_gem_vram_vmap() for more information. */ -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, + struct iosys_map *map) { int ret; @@ -567,7 +568,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo) return; ttm_bo_vunmap(bo, &gbo->map); - dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */ + iosys_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */ } static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, @@ -802,7 +803,8 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) * Returns: * 0 on success, or a negative error code otherwise. */ -static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map) +static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, + struct iosys_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); @@ -815,7 +817,8 @@ static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_m * @gem: The GEM object to unmap * @map: Kernel virtual address where the VRAM GEM object was mapped */ -static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map) +static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, + struct iosys_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 17f3548c8ed2..1fbbc19f1ac0 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -33,7 +33,7 @@ struct dentry; struct dma_buf; -struct dma_buf_map; +struct iosys_map; struct drm_connector; struct drm_crtc; struct drm_framebuffer; @@ -174,8 +174,8 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, int drm_gem_pin(struct drm_gem_object *obj); void drm_gem_unpin(struct drm_gem_object *obj); -int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, u32 handle); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 0327d595e028..9314f2ead79f 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -201,8 +201,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, struct drm_rect *clip, bool swap) { struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; void *src; int ret; @@ -258,8 +258,8 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev, static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index b910978d3e48..4e853acfd1e8 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -180,6 +180,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"), }, .driver_data = (void *)&lcd720x1280_rightside_up, + }, { /* GPD Win Max */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* * GPD Pocket, note that the the DMI data is less generic then * it seems, devices with a board-vendor of "AMI Corporation" diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index c773d3dfb1ab..e3f09f18110c 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -674,7 +674,7 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf); * * Returns 0 on success or a negative errno code otherwise. */ -int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; @@ -690,7 +690,7 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap); * Releases a kernel virtual mapping. This can be used as the * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. */ -void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 049ae87de9be..f32f4771dada 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -49,7 +49,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); -int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int etnaviv_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 6788ea8490d1..3fa2da149639 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -25,14 +25,14 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); } -int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { void *vaddr; vaddr = etnaviv_gem_vmap(obj); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } @@ -62,7 +62,7 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) { - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); if (etnaviv_obj->vaddr) dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map); @@ -77,7 +77,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) { - struct dma_buf_map map; + struct iosys_map map; int ret; lockdep_assert_held(&etnaviv_obj->lock); diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index a150a5a4b5d4..4873f9799f41 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -152,8 +152,8 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, { struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach; u8 compression = gdrm->compression; - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map map_data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map_data[DRM_FORMAT_MAX_PLANES]; void *vaddr, *buf; size_t pitch, len; int ret = 0; diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index 93f51e70a951..e82b815f83a6 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -19,7 +19,7 @@ #include "hyperv_drm.h" static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb, - const struct dma_buf_map *map, + const struct iosys_map *map, struct drm_rect *rect) { struct hyperv_drm_device *hv = to_hv(fb->dev); @@ -38,7 +38,8 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb, return 0; } -static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map) +static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb, + const struct iosys_map *map) { struct drm_rect fullscreen = { .x1 = 0, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 451df10e3a36..9d588d936e3d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -13,6 +13,7 @@ # will most likely get a sudden build breakage... Hopefully we will fix # new warnings before CI updates! subdir-ccflags-y := -Wall -Wextra +subdir-ccflags-y += -Wno-format-security subdir-ccflags-y += -Wno-unused-parameter subdir-ccflags-y += -Wno-type-limits subdir-ccflags-y += -Wno-missing-field-initializers @@ -32,8 +33,9 @@ subdir-ccflags-y += -I$(srctree)/$(src) # core driver code i915-y += i915_driver.o \ i915_config.o \ - i915_irq.o \ i915_getparam.o \ + i915_ioctl.o \ + i915_irq.o \ i915_mitigations.o \ i915_module.o \ i915_params.o \ @@ -196,6 +198,7 @@ i915-y += gt/uc/intel_uc.o \ # modesetting core code i915-y += \ + display/hsw_ips.o \ display/intel_atomic.o \ display/intel_atomic_plane.o \ display/intel_audio.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index f37677df6ebf..f67bbaaad8e0 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -18,6 +18,7 @@ #include "intel_fifo_underrun.h" #include "intel_hdmi.h" #include "intel_hotplug.h" +#include "intel_pch_display.h" #include "intel_pps.h" #include "vlv_sideband.h" @@ -333,6 +334,21 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, return ret; } +static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc_state->has_pch_encoder) { + intel_pch_transcoder_get_m1_n1(crtc, &crtc_state->dp_m_n); + intel_pch_transcoder_get_m2_n2(crtc, &crtc_state->dp_m2_n2); + } else { + intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_get_m2_n2(crtc, crtc_state->cpu_transcoder, + &crtc_state->dp_m2_n2); + } +} + static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -384,7 +400,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->lane_count = ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; - intel_dp_get_m_n(crtc, pipe_config); + g4x_dp_get_m_n(pipe_config); if (port == PORT_A) { if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h index 7aca14b602c6..db9a93bc9321 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.h +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h @@ -8,7 +8,7 @@ #include <linux/types.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" enum port; struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c new file mode 100644 index 000000000000..38014e0cc9ad --- /dev/null +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "hsw_ips.h" +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_pcode.h" + +static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (!crtc_state->ips_enabled) + return; + + /* + * We can only enable IPS after we enable a plane and wait for a vblank + * This function is called from post_plane_update, which is run after + * a vblank wait. + */ + drm_WARN_ON(&i915->drm, + !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); + + if (IS_BROADWELL(i915)) { + drm_WARN_ON(&i915->drm, + snb_pcode_write(i915, DISPLAY_IPS_CONTROL, + IPS_ENABLE | IPS_PCODE_CONTROL)); + /* + * Quoting Art Runyan: "its not safe to expect any particular + * value in IPS_CTL bit 31 after enabling IPS through the + * mailbox." Moreover, the mailbox may return a bogus state, + * so we need to just enable it and continue on. + */ + } else { + intel_de_write(i915, IPS_CTL, IPS_ENABLE); + /* + * The bit only becomes 1 in the next vblank, so this wait here + * is essentially intel_wait_for_vblank. If we don't have this + * and don't wait for vblanks until the end of crtc_enable, then + * the HW state readout code will complain that the expected + * IPS_CTL value is not the one we read. + */ + if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50)) + drm_err(&i915->drm, + "Timed out waiting for IPS enable\n"); + } +} + +bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + bool need_vblank_wait = false; + + if (!crtc_state->ips_enabled) + return need_vblank_wait; + + if (IS_BROADWELL(i915)) { + drm_WARN_ON(&i915->drm, + snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0)); + /* + * Wait for PCODE to finish disabling IPS. The BSpec specified + * 42ms timeout value leads to occasional timeouts so use 100ms + * instead. + */ + if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100)) + drm_err(&i915->drm, + "Timed out waiting for IPS disable\n"); + } else { + intel_de_write(i915, IPS_CTL, 0); + intel_de_posting_read(i915, IPS_CTL); + } + + /* We need to wait for a vblank before we can disable the plane. */ + need_vblank_wait = true; + + return need_vblank_wait; +} + +static bool hsw_ips_need_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!old_crtc_state->ips_enabled) + return false; + + if (intel_crtc_needs_modeset(new_crtc_state)) + return true; + + /* + * Workaround : Do not read or write the pipe palette/gamma data while + * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. + * + * Disable IPS before we program the LUT. + */ + if (IS_HASWELL(i915) && + (new_crtc_state->uapi.color_mgmt_changed || + new_crtc_state->update_pipe) && + new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) + return true; + + return !new_crtc_state->ips_enabled; +} + +bool hsw_ips_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + if (!hsw_ips_need_disable(state, crtc)) + return false; + + return hsw_ips_disable(old_crtc_state); +} + +static bool hsw_ips_need_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!new_crtc_state->ips_enabled) + return false; + + if (intel_crtc_needs_modeset(new_crtc_state)) + return true; + + /* + * Workaround : Do not read or write the pipe palette/gamma data while + * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. + * + * Re-enable IPS after the LUT has been programmed. + */ + if (IS_HASWELL(i915) && + (new_crtc_state->uapi.color_mgmt_changed || + new_crtc_state->update_pipe) && + new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) + return true; + + /* + * We can't read out IPS on broadwell, assume the worst and + * forcibly enable IPS on the first fastset. + */ + if (new_crtc_state->update_pipe && old_crtc_state->inherited) + return true; + + return !old_crtc_state->ips_enabled; +} + +void hsw_ips_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!hsw_ips_need_enable(state, crtc)) + return; + + hsw_ips_enable(new_crtc_state); +} + +/* IPS only exists on ULT machines and is tied to pipe A. */ +bool hsw_crtc_supports_ips(struct intel_crtc *crtc) +{ + return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; +} + +bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + /* IPS only exists on ULT machines and is tied to pipe A. */ + if (!hsw_crtc_supports_ips(crtc)) + return false; + + if (!i915->params.enable_ips) + return false; + + if (crtc_state->pipe_bpp > 24) + return false; + + /* + * We compare against max which means we must take + * the increased cdclk requirement into account when + * calculating the new cdclk. + * + * Should measure whether using a lower cdclk w/o IPS + */ + if (IS_BROADWELL(i915) && + crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100) + return false; + + return true; +} + +int hsw_ips_compute_config(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->ips_enabled = false; + + if (!hsw_crtc_state_ips_capable(crtc_state)) + return 0; + + /* + * When IPS gets enabled, the pipe CRC changes. Since IPS gets + * enabled and disabled dynamically based on package C states, + * user space can't make reliable use of the CRCs, so let's just + * completely disable it. + */ + if (crtc_state->crc_enabled) + return 0; + + /* IPS should be fine as long as at least one plane is enabled. */ + if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) + return 0; + + if (IS_BROADWELL(i915)) { + const struct intel_cdclk_state *cdclk_state; + + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) + return 0; + } + + crtc_state->ips_enabled = true; + + return 0; +} + +void hsw_ips_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (!hsw_crtc_supports_ips(crtc)) + return; + + if (IS_HASWELL(i915)) { + crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE; + } else { + /* + * We cannot readout IPS state on broadwell, set to + * true so we can set it to a defined state on first + * commit. + */ + crtc_state->ips_enabled = true; + } +} diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h new file mode 100644 index 000000000000..4564dee497d7 --- /dev/null +++ b/drivers/gpu/drm/i915/display/hsw_ips.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __HSW_IPS_H__ +#define __HSW_IPS_H__ + +#include <linux/types.h> + +struct intel_atomic_state; +struct intel_crtc; +struct intel_crtc_state; + +bool hsw_ips_disable(const struct intel_crtc_state *crtc_state); +bool hsw_ips_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void hsw_ips_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); +bool hsw_crtc_supports_ips(struct intel_crtc *crtc); +bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); +int hsw_ips_compute_config(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void hsw_ips_get_config(struct intel_crtc_state *crtc_state); + +#endif /* __HSW_IPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 85950ff67609..a87b65cd41fd 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -125,7 +125,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) - return dev_priv->fbc; + return dev_priv->fbc[INTEL_FBC_A]; else return NULL; } @@ -155,51 +155,51 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, unsigned int rotation = plane_state->hw.rotation; u32 dspcntr; - dspcntr = DISPLAY_PLANE_ENABLE; + dspcntr = DISP_ENABLE; if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + dspcntr |= DISP_TRICKLE_FEED_DISABLE; switch (fb->format->format) { case DRM_FORMAT_C8: - dspcntr |= DISPPLANE_8BPP; + dspcntr |= DISP_FORMAT_8BPP; break; case DRM_FORMAT_XRGB1555: - dspcntr |= DISPPLANE_BGRX555; + dspcntr |= DISP_FORMAT_BGRX555; break; case DRM_FORMAT_ARGB1555: - dspcntr |= DISPPLANE_BGRA555; + dspcntr |= DISP_FORMAT_BGRA555; break; case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; + dspcntr |= DISP_FORMAT_BGRX565; break; case DRM_FORMAT_XRGB8888: - dspcntr |= DISPPLANE_BGRX888; + dspcntr |= DISP_FORMAT_BGRX888; break; case DRM_FORMAT_XBGR8888: - dspcntr |= DISPPLANE_RGBX888; + dspcntr |= DISP_FORMAT_RGBX888; break; case DRM_FORMAT_ARGB8888: - dspcntr |= DISPPLANE_BGRA888; + dspcntr |= DISP_FORMAT_BGRA888; break; case DRM_FORMAT_ABGR8888: - dspcntr |= DISPPLANE_RGBA888; + dspcntr |= DISP_FORMAT_RGBA888; break; case DRM_FORMAT_XRGB2101010: - dspcntr |= DISPPLANE_BGRX101010; + dspcntr |= DISP_FORMAT_BGRX101010; break; case DRM_FORMAT_XBGR2101010: - dspcntr |= DISPPLANE_RGBX101010; + dspcntr |= DISP_FORMAT_RGBX101010; break; case DRM_FORMAT_ARGB2101010: - dspcntr |= DISPPLANE_BGRA101010; + dspcntr |= DISP_FORMAT_BGRA101010; break; case DRM_FORMAT_ABGR2101010: - dspcntr |= DISPPLANE_RGBA101010; + dspcntr |= DISP_FORMAT_RGBA101010; break; case DRM_FORMAT_XBGR16161616F: - dspcntr |= DISPPLANE_RGBX161616; + dspcntr |= DISP_FORMAT_RGBX161616; break; default: MISSING_CASE(fb->format->format); @@ -208,13 +208,13 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) - dspcntr |= DISPPLANE_TILED; + dspcntr |= DISP_TILED; if (rotation & DRM_MODE_ROTATE_180) - dspcntr |= DISPPLANE_ROTATE_180; + dspcntr |= DISP_ROTATE_180; if (rotation & DRM_MODE_REFLECT_X) - dspcntr |= DISPPLANE_MIRROR; + dspcntr |= DISP_MIRROR; return dspcntr; } @@ -354,13 +354,13 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 dspcntr = 0; if (crtc_state->gamma_enable) - dspcntr |= DISPPLANE_GAMMA_ENABLE; + dspcntr |= DISP_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) - dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; + dspcntr |= DISP_PIPE_CSC_ENABLE; if (DISPLAY_VER(dev_priv) < 5) - dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); + dspcntr |= DISP_PIPE_SEL(crtc->pipe); return dspcntr; } @@ -437,9 +437,9 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane, * program whatever is there. */ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), - (crtc_y << 16) | crtc_x); + DISP_POS_Y(crtc_y) | DISP_POS_X(crtc_x)); intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); + DISP_HEIGHT(crtc_h - 1) | DISP_WIDTH(crtc_w - 1)); } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -474,20 +474,20 @@ static void i9xx_plane_update_arm(struct intel_plane *plane, int crtc_h = drm_rect_height(&plane_state->uapi.dst); intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), - (crtc_y << 16) | crtc_x); + PRIM_POS_Y(crtc_y) | PRIM_POS_X(crtc_x)); intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); + PRIM_HEIGHT(crtc_h - 1) | PRIM_WIDTH(crtc_w - 1)); intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), - (y << 16) | x); + DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } else if (DISPLAY_VER(dev_priv) >= 4) { intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), linear_offset); intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), - (y << 16) | x); + DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } /* @@ -564,7 +564,7 @@ g4x_primary_async_flip(struct intel_plane *plane, unsigned long irqflags; if (async_flip) - dspcntr |= DISPPLANE_ASYNC_FLIP; + dspcntr |= DISP_ASYNC_FLIP; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); @@ -696,13 +696,12 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); - ret = val & DISPLAY_PLANE_ENABLE; + ret = val & DISP_ENABLE; if (DISPLAY_VER(dev_priv) >= 5) *pipe = plane->pipe; else - *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> - DISPPLANE_SEL_PIPE_SHIFT; + *pipe = REG_FIELD_GET(DISP_PIPE_SEL_MASK, val); intel_display_power_put(dev_priv, power_domain, wakeref); @@ -958,32 +957,32 @@ fail: static int i9xx_format_to_fourcc(int format) { switch (format) { - case DISPPLANE_8BPP: + case DISP_FORMAT_8BPP: return DRM_FORMAT_C8; - case DISPPLANE_BGRA555: + case DISP_FORMAT_BGRA555: return DRM_FORMAT_ARGB1555; - case DISPPLANE_BGRX555: + case DISP_FORMAT_BGRX555: return DRM_FORMAT_XRGB1555; - case DISPPLANE_BGRX565: + case DISP_FORMAT_BGRX565: return DRM_FORMAT_RGB565; default: - case DISPPLANE_BGRX888: + case DISP_FORMAT_BGRX888: return DRM_FORMAT_XRGB8888; - case DISPPLANE_RGBX888: + case DISP_FORMAT_RGBX888: return DRM_FORMAT_XBGR8888; - case DISPPLANE_BGRA888: + case DISP_FORMAT_BGRA888: return DRM_FORMAT_ARGB8888; - case DISPPLANE_RGBA888: + case DISP_FORMAT_RGBA888: return DRM_FORMAT_ABGR8888; - case DISPPLANE_BGRX101010: + case DISP_FORMAT_BGRX101010: return DRM_FORMAT_XRGB2101010; - case DISPPLANE_RGBX101010: + case DISP_FORMAT_RGBX101010: return DRM_FORMAT_XBGR2101010; - case DISPPLANE_BGRA101010: + case DISP_FORMAT_BGRA101010: return DRM_FORMAT_ARGB2101010; - case DISPPLANE_RGBA101010: + case DISP_FORMAT_RGBA101010: return DRM_FORMAT_ABGR2101010; - case DISPPLANE_RGBX161616: + case DISP_FORMAT_RGBX161616: return DRM_FORMAT_XBGR16161616F; } } @@ -1021,26 +1020,26 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); if (DISPLAY_VER(dev_priv) >= 4) { - if (val & DISPPLANE_TILED) { + if (val & DISP_TILED) { plane_config->tiling = I915_TILING_X; fb->modifier = I915_FORMAT_MOD_X_TILED; } - if (val & DISPPLANE_ROTATE_180) + if (val & DISP_ROTATE_180) plane_config->rotation = DRM_MODE_ROTATE_180; } if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && - val & DISPPLANE_MIRROR) + val & DISP_MIRROR) plane_config->rotation |= DRM_MODE_REFLECT_X; - pixel_format = val & DISPPLANE_PIXFORMAT_MASK; + pixel_format = val & DISP_FORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); fb->format = drm_format_info(fourcc); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); - base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK; } else if (DISPLAY_VER(dev_priv) >= 4) { if (plane_config->tiling) offset = intel_de_read(dev_priv, @@ -1048,15 +1047,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, else offset = intel_de_read(dev_priv, DSPLINOFF(i9xx_plane)); - base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK; } else { base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); } plane_config->base = base; val = intel_de_read(dev_priv, PIPESRC(pipe)); - fb->width = ((val >> 16) & 0xfff) + 1; - fb->height = ((val >> 0) & 0xfff) + 1; + fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1; + fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1; val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); fb->pitches[0] = val & 0xffffffc0; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 5781e9fac8b4..13b07c6fd6be 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -29,9 +29,11 @@ #include <drm/drm_mipi_dsi.h> #include "icl_dsi.h" +#include "icl_dsi_regs.h" #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" @@ -569,7 +571,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) { tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port)); - tmp &= ~MASTER_INIT_TIMER_MASK; + tmp &= ~DSI_T_INIT_MASTER_MASK; tmp |= intel_dsi->init_count; intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp); } @@ -787,14 +789,14 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* program DSI operation mode */ if (is_vid_mode(intel_dsi)) { tmp &= ~OP_MODE_MASK; - switch (intel_dsi->video_mode_format) { + switch (intel_dsi->video_mode) { default: - MISSING_CASE(intel_dsi->video_mode_format); + MISSING_CASE(intel_dsi->video_mode); fallthrough; - case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS: + case NON_BURST_SYNC_EVENTS: tmp |= VIDEO_MODE_SYNC_EVENT; break; - case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE: + case NON_BURST_SYNC_PULSE: tmp |= VIDEO_MODE_SYNC_PULSE; break; } @@ -959,8 +961,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, /* TRANS_HSYNC register to be programmed only for video mode */ if (is_vid_mode(intel_dsi)) { - if (intel_dsi->video_mode_format == - VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) { + if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) { /* BSPEC: hsync size should be atleast 16 pixels */ if (hsync_size < 16) drm_err(&dev_priv->drm, @@ -1050,7 +1051,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) /* wait for transcoder to be enabled */ if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans), - I965_PIPECONF_ACTIVE, 10)) + PIPECONF_STATE_ENABLE, 10)) drm_err(&dev_priv->drm, "DSI transcoder not enabled\n"); } @@ -1232,8 +1233,6 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, intel_dsc_dsi_pps_write(encoder, pipe_config); - intel_dsc_enable(pipe_config); - /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); } @@ -1320,7 +1319,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) /* wait for transcoder to be disabled */ if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans), - I965_PIPECONF_ACTIVE, 50)) + PIPECONF_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "DSI trancoder not disabled\n"); } diff --git a/drivers/gpu/drm/i915/display/icl_dsi_regs.h b/drivers/gpu/drm/i915/display/icl_dsi_regs.h new file mode 100644 index 000000000000..f78f28b8dd94 --- /dev/null +++ b/drivers/gpu/drm/i915/display/icl_dsi_regs.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __ICL_DSI_REGS_H__ +#define __ICL_DSI_REGS_H__ + +#include "i915_reg_defs.h" + +/* Gen11 DSI */ +#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \ + dsi0, dsi1) +#define _ICL_DSI_ESC_CLK_DIV0 0x6b090 +#define _ICL_DSI_ESC_CLK_DIV1 0x6b890 +#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \ + _ICL_DSI_ESC_CLK_DIV0, \ + _ICL_DSI_ESC_CLK_DIV1) +#define _ICL_DPHY_ESC_CLK_DIV0 0x162190 +#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190 +#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \ + _ICL_DPHY_ESC_CLK_DIV0, \ + _ICL_DPHY_ESC_CLK_DIV1) +#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16) +#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16 +#define ICL_ESC_CLK_DIV_MASK 0x1ff +#define ICL_ESC_CLK_DIV_SHIFT 0 +#define DSI_MAX_ESC_CLK 20000 /* in KHz */ + +#define _ADL_MIPIO_REG 0x180 +#define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw)) +#define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16) +#define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16) +#define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f) + +#define _DSI_CMD_FRMCTL_0 0x6b034 +#define _DSI_CMD_FRMCTL_1 0x6b834 +#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \ + _DSI_CMD_FRMCTL_0,\ + _DSI_CMD_FRMCTL_1) +#define DSI_FRAME_UPDATE_REQUEST (1 << 31) +#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29) +#define DSI_NULL_PACKET_ENABLE (1 << 28) +#define DSI_FRAME_IN_PROGRESS (1 << 0) + +#define _DSI_INTR_MASK_REG_0 0x6b070 +#define _DSI_INTR_MASK_REG_1 0x6b870 +#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \ + _DSI_INTR_MASK_REG_0,\ + _DSI_INTR_MASK_REG_1) + +#define _DSI_INTR_IDENT_REG_0 0x6b074 +#define _DSI_INTR_IDENT_REG_1 0x6b874 +#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \ + _DSI_INTR_IDENT_REG_0,\ + _DSI_INTR_IDENT_REG_1) +#define DSI_TE_EVENT (1 << 31) +#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30) +#define DSI_TX_DATA (1 << 29) +#define DSI_ULPS_ENTRY_DONE (1 << 28) +#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27) +#define DSI_HOST_CHKSUM_ERROR (1 << 26) +#define DSI_HOST_MULTI_ECC_ERROR (1 << 25) +#define DSI_HOST_SINGL_ECC_ERROR (1 << 24) +#define DSI_HOST_CONTENTION_DETECTED (1 << 23) +#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22) +#define DSI_HOST_TIMEOUT_ERROR (1 << 21) +#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20) +#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19) +#define DSI_FRAME_UPDATE_DONE (1 << 16) +#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15) +#define DSI_INVALID_TX_LENGTH (1 << 13) +#define DSI_INVALID_VC (1 << 12) +#define DSI_INVALID_DATA_TYPE (1 << 11) +#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10) +#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9) +#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8) +#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7) +#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6) +#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5) +#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4) +#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3) +#define DSI_EOT_SYNC_ERROR (1 << 2) +#define DSI_SOT_SYNC_ERROR (1 << 1) +#define DSI_SOT_ERROR (1 << 0) + +/* ICL DSI MODE control */ +#define _ICL_DSI_IO_MODECTL_0 0x6B094 +#define _ICL_DSI_IO_MODECTL_1 0x6B894 +#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \ + _ICL_DSI_IO_MODECTL_0, \ + _ICL_DSI_IO_MODECTL_1) +#define COMBO_PHY_MODE_DSI (1 << 0) + +/* TGL DSI Chicken register */ +#define _TGL_DSI_CHKN_REG_0 0x6B0C0 +#define _TGL_DSI_CHKN_REG_1 0x6B8C0 +#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ + _TGL_DSI_CHKN_REG_0, \ + _TGL_DSI_CHKN_REG_1) +#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \ + (byte_clocks)) +#define _ICL_DSI_T_INIT_MASTER_0 0x6b088 +#define _ICL_DSI_T_INIT_MASTER_1 0x6b888 +#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \ + _ICL_DSI_T_INIT_MASTER_0,\ + _ICL_DSI_T_INIT_MASTER_1) +#define DSI_T_INIT_MASTER_MASK REG_GENMASK(15, 0) + +#define _DPHY_CLK_TIMING_PARAM_0 0x162180 +#define _DPHY_CLK_TIMING_PARAM_1 0x6c180 +#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DPHY_CLK_TIMING_PARAM_0,\ + _DPHY_CLK_TIMING_PARAM_1) +#define _DSI_CLK_TIMING_PARAM_0 0x6b080 +#define _DSI_CLK_TIMING_PARAM_1 0x6b880 +#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DSI_CLK_TIMING_PARAM_0,\ + _DSI_CLK_TIMING_PARAM_1) +#define CLK_PREPARE_OVERRIDE (1 << 31) +#define CLK_PREPARE(x) ((x) << 28) +#define CLK_PREPARE_MASK (0x7 << 28) +#define CLK_PREPARE_SHIFT 28 +#define CLK_ZERO_OVERRIDE (1 << 27) +#define CLK_ZERO(x) ((x) << 20) +#define CLK_ZERO_MASK (0xf << 20) +#define CLK_ZERO_SHIFT 20 +#define CLK_PRE_OVERRIDE (1 << 19) +#define CLK_PRE(x) ((x) << 16) +#define CLK_PRE_MASK (0x3 << 16) +#define CLK_PRE_SHIFT 16 +#define CLK_POST_OVERRIDE (1 << 15) +#define CLK_POST(x) ((x) << 8) +#define CLK_POST_MASK (0x7 << 8) +#define CLK_POST_SHIFT 8 +#define CLK_TRAIL_OVERRIDE (1 << 7) +#define CLK_TRAIL(x) ((x) << 0) +#define CLK_TRAIL_MASK (0xf << 0) +#define CLK_TRAIL_SHIFT 0 + +#define _DPHY_DATA_TIMING_PARAM_0 0x162184 +#define _DPHY_DATA_TIMING_PARAM_1 0x6c184 +#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DPHY_DATA_TIMING_PARAM_0,\ + _DPHY_DATA_TIMING_PARAM_1) +#define _DSI_DATA_TIMING_PARAM_0 0x6B084 +#define _DSI_DATA_TIMING_PARAM_1 0x6B884 +#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DSI_DATA_TIMING_PARAM_0,\ + _DSI_DATA_TIMING_PARAM_1) +#define HS_PREPARE_OVERRIDE (1 << 31) +#define HS_PREPARE(x) ((x) << 24) +#define HS_PREPARE_MASK (0x7 << 24) +#define HS_PREPARE_SHIFT 24 +#define HS_ZERO_OVERRIDE (1 << 23) +#define HS_ZERO(x) ((x) << 16) +#define HS_ZERO_MASK (0xf << 16) +#define HS_ZERO_SHIFT 16 +#define HS_TRAIL_OVERRIDE (1 << 15) +#define HS_TRAIL(x) ((x) << 8) +#define HS_TRAIL_MASK (0x7 << 8) +#define HS_TRAIL_SHIFT 8 +#define HS_EXIT_OVERRIDE (1 << 7) +#define HS_EXIT(x) ((x) << 0) +#define HS_EXIT_MASK (0x7 << 0) +#define HS_EXIT_SHIFT 0 + +#define _DPHY_TA_TIMING_PARAM_0 0x162188 +#define _DPHY_TA_TIMING_PARAM_1 0x6c188 +#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DPHY_TA_TIMING_PARAM_0,\ + _DPHY_TA_TIMING_PARAM_1) +#define _DSI_TA_TIMING_PARAM_0 0x6b098 +#define _DSI_TA_TIMING_PARAM_1 0x6b898 +#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DSI_TA_TIMING_PARAM_0,\ + _DSI_TA_TIMING_PARAM_1) +#define TA_SURE_OVERRIDE (1 << 31) +#define TA_SURE(x) ((x) << 16) +#define TA_SURE_MASK (0x1f << 16) +#define TA_SURE_SHIFT 16 +#define TA_GO_OVERRIDE (1 << 15) +#define TA_GO(x) ((x) << 8) +#define TA_GO_MASK (0xf << 8) +#define TA_GO_SHIFT 8 +#define TA_GET_OVERRIDE (1 << 7) +#define TA_GET(x) ((x) << 0) +#define TA_GET_MASK (0xf << 0) +#define TA_GET_SHIFT 0 + +/* DSI transcoder configuration */ +#define _DSI_TRANS_FUNC_CONF_0 0x6b030 +#define _DSI_TRANS_FUNC_CONF_1 0x6b830 +#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \ + _DSI_TRANS_FUNC_CONF_0,\ + _DSI_TRANS_FUNC_CONF_1) +#define OP_MODE_MASK (0x3 << 28) +#define OP_MODE_SHIFT 28 +#define CMD_MODE_NO_GATE (0x0 << 28) +#define CMD_MODE_TE_GATE (0x1 << 28) +#define VIDEO_MODE_SYNC_EVENT (0x2 << 28) +#define VIDEO_MODE_SYNC_PULSE (0x3 << 28) +#define TE_SOURCE_GPIO (1 << 27) +#define LINK_READY (1 << 20) +#define PIX_FMT_MASK (0x3 << 16) +#define PIX_FMT_SHIFT 16 +#define PIX_FMT_RGB565 (0x0 << 16) +#define PIX_FMT_RGB666_PACKED (0x1 << 16) +#define PIX_FMT_RGB666_LOOSE (0x2 << 16) +#define PIX_FMT_RGB888 (0x3 << 16) +#define PIX_FMT_RGB101010 (0x4 << 16) +#define PIX_FMT_RGB121212 (0x5 << 16) +#define PIX_FMT_COMPRESSED (0x6 << 16) +#define BGR_TRANSMISSION (1 << 15) +#define PIX_VIRT_CHAN(x) ((x) << 12) +#define PIX_VIRT_CHAN_MASK (0x3 << 12) +#define PIX_VIRT_CHAN_SHIFT 12 +#define PIX_BUF_THRESHOLD_MASK (0x3 << 10) +#define PIX_BUF_THRESHOLD_SHIFT 10 +#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10) +#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10) +#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10) +#define PIX_BUF_THRESHOLD_FULL (0x3 << 10) +#define CONTINUOUS_CLK_MASK (0x3 << 8) +#define CONTINUOUS_CLK_SHIFT 8 +#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8) +#define CLK_HS_OR_LP (0x2 << 8) +#define CLK_HS_CONTINUOUS (0x3 << 8) +#define LINK_CALIBRATION_MASK (0x3 << 4) +#define LINK_CALIBRATION_SHIFT 4 +#define CALIBRATION_DISABLED (0x0 << 4) +#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4) +#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4) +#define BLANKING_PACKET_ENABLE (1 << 2) +#define S3D_ORIENTATION_LANDSCAPE (1 << 1) +#define EOTP_DISABLED (1 << 0) + +#define _DSI_CMD_RXCTL_0 0x6b0d4 +#define _DSI_CMD_RXCTL_1 0x6b8d4 +#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \ + _DSI_CMD_RXCTL_0,\ + _DSI_CMD_RXCTL_1) +#define READ_UNLOADS_DW (1 << 16) +#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15) +#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14) +#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13) +#define RECEIVED_RESET_TRIGGER (1 << 12) +#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11) +#define RECEIVED_CRC_WAS_LOST (1 << 10) +#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0) +#define NUMBER_RX_PLOAD_DW_SHIFT 0 + +#define _DSI_CMD_TXCTL_0 0x6b0d0 +#define _DSI_CMD_TXCTL_1 0x6b8d0 +#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \ + _DSI_CMD_TXCTL_0,\ + _DSI_CMD_TXCTL_1) +#define KEEP_LINK_IN_HS (1 << 24) +#define FREE_HEADER_CREDIT_MASK (0x1f << 8) +#define FREE_HEADER_CREDIT_SHIFT 0x8 +#define FREE_PLOAD_CREDIT_MASK (0xff << 0) +#define FREE_PLOAD_CREDIT_SHIFT 0 +#define MAX_HEADER_CREDIT 0x10 +#define MAX_PLOAD_CREDIT 0x40 + +#define _DSI_CMD_TXHDR_0 0x6b100 +#define _DSI_CMD_TXHDR_1 0x6b900 +#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \ + _DSI_CMD_TXHDR_0,\ + _DSI_CMD_TXHDR_1) +#define PAYLOAD_PRESENT (1 << 31) +#define LP_DATA_TRANSFER (1 << 30) +#define VBLANK_FENCE (1 << 29) +#define PARAM_WC_MASK (0xffff << 8) +#define PARAM_WC_LOWER_SHIFT 8 +#define PARAM_WC_UPPER_SHIFT 16 +#define VC_MASK (0x3 << 6) +#define VC_SHIFT 6 +#define DT_MASK (0x3f << 0) +#define DT_SHIFT 0 + +#define _DSI_CMD_TXPYLD_0 0x6b104 +#define _DSI_CMD_TXPYLD_1 0x6b904 +#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \ + _DSI_CMD_TXPYLD_0,\ + _DSI_CMD_TXPYLD_1) + +#define _DSI_LP_MSG_0 0x6b0d8 +#define _DSI_LP_MSG_1 0x6b8d8 +#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \ + _DSI_LP_MSG_0,\ + _DSI_LP_MSG_1) +#define LPTX_IN_PROGRESS (1 << 17) +#define LINK_IN_ULPS (1 << 16) +#define LINK_ULPS_TYPE_LP11 (1 << 8) +#define LINK_ENTER_ULPS (1 << 0) + +/* DSI timeout registers */ +#define _DSI_HSTX_TO_0 0x6b044 +#define _DSI_HSTX_TO_1 0x6b844 +#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \ + _DSI_HSTX_TO_0,\ + _DSI_HSTX_TO_1) +#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16) +#define HSTX_TIMEOUT_VALUE_SHIFT 16 +#define HSTX_TIMEOUT_VALUE(x) ((x) << 16) +#define HSTX_TIMED_OUT (1 << 0) + +#define _DSI_LPRX_HOST_TO_0 0x6b048 +#define _DSI_LPRX_HOST_TO_1 0x6b848 +#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \ + _DSI_LPRX_HOST_TO_0,\ + _DSI_LPRX_HOST_TO_1) +#define LPRX_TIMED_OUT (1 << 16) +#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0) +#define LPRX_TIMEOUT_VALUE_SHIFT 0 +#define LPRX_TIMEOUT_VALUE(x) ((x) << 0) + +#define _DSI_PWAIT_TO_0 0x6b040 +#define _DSI_PWAIT_TO_1 0x6b840 +#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \ + _DSI_PWAIT_TO_0,\ + _DSI_PWAIT_TO_1) +#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16) +#define PRESET_TIMEOUT_VALUE_SHIFT 16 +#define PRESET_TIMEOUT_VALUE(x) ((x) << 16) +#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0) +#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0 +#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0) + +#define _DSI_TA_TO_0 0x6b04c +#define _DSI_TA_TO_1 0x6b84c +#define DSI_TA_TO(tc) _MMIO_DSI(tc, \ + _DSI_TA_TO_0,\ + _DSI_TA_TO_1) +#define TA_TIMED_OUT (1 << 16) +#define TA_TIMEOUT_VALUE_MASK (0xffff << 0) +#define TA_TIMEOUT_VALUE_SHIFT 0 +#define TA_TIMEOUT_VALUE(x) ((x) << 0) + +#endif /* __ICL_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index a62550711e98..e0667d163266 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -34,6 +34,8 @@ #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> +#include "i915_drv.h" +#include "i915_reg.h" #include "intel_atomic.h" #include "intel_cdclk.h" #include "intel_display_types.h" @@ -279,17 +281,6 @@ void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state) intel_crtc_put_color_blobs(crtc_state); } -void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state, - const struct intel_crtc_state *from_crtc_state) -{ - drm_property_replace_blob(&crtc_state->hw.degamma_lut, - from_crtc_state->uapi.degamma_lut); - drm_property_replace_blob(&crtc_state->hw.gamma_lut, - from_crtc_state->uapi.gamma_lut); - drm_property_replace_blob(&crtc_state->hw.ctm, - from_crtc_state->uapi.ctm); -} - /** * intel_crtc_destroy_state - destroy crtc state * @crtc: drm crtc diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index d2700c74c9da..1dc439983dd9 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -44,8 +44,6 @@ struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); void intel_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state); -void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state, - const struct intel_crtc_state *from_crtc_state); struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev); void intel_atomic_state_free(struct drm_atomic_state *state); void intel_atomic_state_clear(struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index c2c512cd8ec0..c53aa6a4c7a0 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -45,6 +45,7 @@ #include "intel_fb_pin.h" #include "intel_pm.h" #include "intel_sprite.h" +#include "skl_scaler.h" static void intel_plane_state_reset(struct intel_plane_state *plane_state, struct intel_plane *plane) @@ -109,6 +110,7 @@ intel_plane_duplicate_state(struct drm_plane *plane) intel_state->ggtt_vma = NULL; intel_state->dpt_vma = NULL; intel_state->flags = 0; + intel_state->do_async_flip = false; /* add reference to fb */ if (intel_state->hw.fb) @@ -321,6 +323,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); crtc_state->active_planes &= ~BIT(plane->id); + crtc_state->scaled_planes &= ~BIT(plane->id); crtc_state->nv12_planes &= ~BIT(plane->id); crtc_state->c8_planes &= ~BIT(plane->id); crtc_state->data_rate[plane->id] = 0; @@ -329,6 +332,185 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, plane_state->uapi.visible = false; } +/* FIXME nuke when all wm code is atomic */ +static bool intel_wm_need_update(const struct intel_plane_state *cur, + struct intel_plane_state *new) +{ + /* Update watermarks on tiling or size changes. */ + if (new->uapi.visible != cur->uapi.visible) + return true; + + if (!cur->hw.fb || !new->hw.fb) + return false; + + if (cur->hw.fb->modifier != new->hw.fb->modifier || + cur->hw.rotation != new->hw.rotation || + drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || + drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || + drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || + drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) + return true; + + return false; +} + +static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state) +{ + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + int dst_w = drm_rect_width(&plane_state->uapi.dst); + int dst_h = drm_rect_height(&plane_state->uapi.dst); + + return src_w != dst_w || src_h != dst_h; +} + +static bool intel_plane_do_async_flip(struct intel_plane *plane, + const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + if (!plane->async_flip) + return false; + + if (!new_crtc_state->uapi.async_flip) + return false; + + /* + * In platforms after DISPLAY13, we might need to override + * first async flip in order to change watermark levels + * as part of optimization. + * So for those, we are checking if this is a first async flip. + * For platforms earlier than DISPLAY13 we always do async flip. + */ + return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip; +} + +static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state, + const struct intel_plane_state *old_plane_state, + struct intel_plane_state *new_plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + bool mode_changed = intel_crtc_needs_modeset(new_crtc_state); + bool was_crtc_enabled = old_crtc_state->hw.active; + bool is_crtc_enabled = new_crtc_state->hw.active; + bool turn_off, turn_on, visible, was_visible; + int ret; + + if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { + ret = skl_update_scaler_plane(new_crtc_state, new_plane_state); + if (ret) + return ret; + } + + was_visible = old_plane_state->uapi.visible; + visible = new_plane_state->uapi.visible; + + if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) + was_visible = false; + + /* + * Visibility is calculated as if the crtc was on, but + * after scaler setup everything depends on it being off + * when the crtc isn't active. + * + * FIXME this is wrong for watermarks. Watermarks should also + * be computed as if the pipe would be active. Perhaps move + * per-plane wm computation to the .check_plane() hook, and + * only combine the results from all planes in the current place? + */ + if (!is_crtc_enabled) { + intel_plane_set_invisible(new_crtc_state, new_plane_state); + visible = false; + } + + if (!was_visible && !visible) + return 0; + + turn_off = was_visible && (!visible || mode_changed); + turn_on = visible && (!was_visible || mode_changed); + + drm_dbg_atomic(&dev_priv->drm, + "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", + crtc->base.base.id, crtc->base.name, + plane->base.base.id, plane->base.name, + was_visible, visible, + turn_off, turn_on, mode_changed); + + if (turn_on) { + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) + new_crtc_state->update_wm_pre = true; + + /* must disable cxsr around plane enable/disable */ + if (plane->id != PLANE_CURSOR) + new_crtc_state->disable_cxsr = true; + } else if (turn_off) { + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) + new_crtc_state->update_wm_post = true; + + /* must disable cxsr around plane enable/disable */ + if (plane->id != PLANE_CURSOR) + new_crtc_state->disable_cxsr = true; + } else if (intel_wm_need_update(old_plane_state, new_plane_state)) { + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { + /* FIXME bollocks */ + new_crtc_state->update_wm_pre = true; + new_crtc_state->update_wm_post = true; + } + } + + if (visible || was_visible) + new_crtc_state->fb_bits |= plane->frontbuffer_bit; + + /* + * ILK/SNB DVSACNTR/Sprite Enable + * IVB SPR_CTL/Sprite Enable + * "When in Self Refresh Big FIFO mode, a write to enable the + * plane will be internally buffered and delayed while Big FIFO + * mode is exiting." + * + * Which means that enabling the sprite can take an extra frame + * when we start in big FIFO mode (LP1+). Thus we need to drop + * down to LP0 and wait for vblank in order to make sure the + * sprite gets enabled on the next vblank after the register write. + * Doing otherwise would risk enabling the sprite one frame after + * we've already signalled flip completion. We can resume LP1+ + * once the sprite has been enabled. + * + * + * WaCxSRDisabledForSpriteScaling:ivb + * IVB SPR_SCALE/Scaling Enable + * "Low Power watermarks must be disabled for at least one + * frame before enabling sprite scaling, and kept disabled + * until sprite scaling is disabled." + * + * ILK/SNB DVSASCALE/Scaling Enable + * "When in Self Refresh Big FIFO mode, scaling enable will be + * masked off while Big FIFO mode is exiting." + * + * Despite the w/a only being listed for IVB we assume that + * the ILK/SNB note has similar ramifications, hence we apply + * the w/a on all three platforms. + * + * With experimental results seems this is needed also for primary + * plane, not only sprite plane. + */ + if (plane->id != PLANE_CURSOR && + (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || + IS_IVYBRIDGE(dev_priv)) && + (turn_on || (!intel_plane_is_scaled(old_plane_state) && + intel_plane_is_scaled(new_plane_state)))) + new_crtc_state->disable_lp_wm = true; + + if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) + new_plane_state->do_async_flip = true; + + return 0; +} + int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, @@ -356,6 +538,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->active_planes |= BIT(plane->id); if (new_plane_state->uapi.visible && + intel_plane_is_scaled(new_plane_state)) + new_crtc_state->scaled_planes |= BIT(plane->id); + + if (new_plane_state->uapi.visible && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) new_crtc_state->nv12_planes |= BIT(plane->id); @@ -402,10 +588,11 @@ int intel_plane_atomic_check(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (new_crtc_state && new_crtc_state->bigjoiner_slave) { + if (new_crtc_state && intel_crtc_is_bigjoiner_slave(new_crtc_state)) { + struct intel_crtc *master_crtc = + intel_master_crtc(new_crtc_state); struct intel_plane *master_plane = - intel_crtc_get_plane(new_crtc_state->bigjoiner_linked_crtc, - plane->id); + intel_crtc_get_plane(master_crtc, plane->id); new_master_plane_state = intel_atomic_get_new_plane_state(state, master_plane); @@ -491,7 +678,7 @@ void intel_plane_update_arm(struct intel_plane *plane, trace_intel_plane_update_arm(&plane->base, crtc); - if (crtc_state->uapi.async_flip && plane->async_flip) + if (plane_state->do_async_flip) plane->async_flip(plane, crtc_state, plane_state, true); else plane->update_arm(plane, crtc_state, plane_state); @@ -506,8 +693,8 @@ void intel_plane_disable_arm(struct intel_plane *plane, plane->disable_arm(plane, crtc_state); } -void intel_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -535,8 +722,8 @@ void intel_update_planes_on_crtc(struct intel_atomic_state *state, } } -void skl_arm_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void skl_crtc_planes_update_arm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -570,8 +757,8 @@ void skl_arm_planes_on_crtc(struct intel_atomic_state *state, } } -void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -596,11 +783,23 @@ void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, } } +void intel_crtc_planes_update_arm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (DISPLAY_VER(i915) >= 9) + skl_crtc_planes_update_arm(state, crtc); + else + i9xx_crtc_planes_update_arm(state, crtc); +} + int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, struct intel_crtc_state *crtc_state, int min_scale, int max_scale, bool can_position) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_rect *src = &plane_state->uapi.src; struct drm_rect *dst = &plane_state->uapi.dst; @@ -619,7 +818,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (hscale < 0 || vscale < 0) { - DRM_DEBUG_KMS("Invalid scaling of plane\n"); + drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n"); drm_rect_debug_print("src: ", src, true); drm_rect_debug_print("dst: ", dst, false); return -ERANGE; @@ -631,7 +830,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, } /* right side of the image is on the slave crtc, adjust dst to match */ - if (crtc_state->bigjoiner_slave) + if (intel_crtc_is_bigjoiner_slave(crtc_state)) drm_rect_translate(dst, -crtc_state->pipe_src_w, 0); /* @@ -644,7 +843,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, if (!can_position && plane_state->uapi.visible && !drm_rect_equals(dst, &clip)) { - DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); + drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n"); drm_rect_debug_print("dst: ", dst, false); drm_rect_debug_print("clip: ", &clip, false); return -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 7907f601598e..f4763a53541e 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -16,6 +16,7 @@ struct intel_crtc; struct intel_crtc_state; struct intel_plane; struct intel_plane_state; +enum plane_id; unsigned int intel_adjusted_rate(const struct drm_rect *src, const struct drm_rect *dst, @@ -43,22 +44,16 @@ void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); -void intel_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); -void skl_arm_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); -void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); +void intel_crtc_planes_update_noarm(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_crtc_planes_update_arm(struct intel_atomic_state *state, + struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); int intel_plane_atomic_check(struct intel_atomic_state *state, struct intel_plane *plane); -int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *crtc_state, - const struct intel_plane_state *old_plane_state, - struct intel_plane_state *plane_state); int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, struct intel_plane *plane, bool *need_cdclk_calc); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 9523411cddd8..98f7ea44042f 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -13,6 +13,7 @@ #include "intel_dp_aux_backlight.h" #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" +#include "intel_pci_config.h" /** * scale - scale values from one range to another @@ -433,6 +434,8 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; + intel_backlight_set_pwm_level(old_conn_state, level); + panel->backlight.pwm_state.enabled = false; pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 1ff1f1c426b2..40b5e7ed12c2 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -32,6 +32,7 @@ #include "display/intel_gmbus.h" #include "i915_drv.h" +#include "i915_reg.h" #define _INTEL_BIOS_PRIVATE #include "intel_vbt_defs.h" @@ -595,6 +596,12 @@ parse_general_features(struct drm_i915_private *i915, } else { i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; } + + if (bdb->version >= 249 && general->afc_startup_config) { + i915->vbt.override_afc_startup = true; + i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7; + } + drm_dbg_kms(&i915->drm, "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", i915->vbt.int_tv_support, @@ -905,26 +912,6 @@ parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb) i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 : psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames; - switch (psr_table->lines_to_wait) { - case 0: - i915->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT; - break; - case 1: - i915->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT; - break; - case 2: - i915->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT; - break; - case 3: - i915->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT; - break; - default: - drm_dbg_kms(&i915->drm, - "VBT has unknown PSR lines to wait %u\n", - psr_table->lines_to_wait); - break; - } - /* * New psr options 0=500us, 1=100us, 2=2500us, 3=0us * Old decimal value is wake up time in multiples of 100 us. @@ -2073,14 +2060,16 @@ static void parse_ddi_port(struct drm_i915_private *i915, i915->vbt.ports[port] = devdata; } +static bool has_ddi_port_info(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) >= 5 || IS_G4X(i915); +} + static void parse_ddi_ports(struct drm_i915_private *i915) { struct intel_bios_encoder_data *devdata; - if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915)) - return; - - if (i915->vbt.version < 155) + if (!has_ddi_port_info(i915)) return; list_for_each_entry(devdata, &i915->vbt.display_devices, node) @@ -2335,6 +2324,63 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) return vbt; } +static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) +{ + u32 count, data, found, store = 0; + u32 static_region, oprom_offset; + u32 oprom_size = 0x200000; + u16 vbt_size; + u32 *vbt; + + static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS); + static_region &= OPTIONROM_SPI_REGIONID_MASK; + intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region); + + oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET); + oprom_offset &= OROM_OFFSET_MASK; + + for (count = 0; count < oprom_size; count += 4) { + intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, oprom_offset + count); + data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); + + if (data == *((const u32 *)"$VBT")) { + found = oprom_offset + count; + break; + } + } + + if (count >= oprom_size) + goto err_not_found; + + /* Get VBT size and allocate space for the VBT */ + intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + + offsetof(struct vbt_header, vbt_size)); + vbt_size = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); + vbt_size &= 0xffff; + + vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); + if (!vbt) + goto err_not_found; + + for (count = 0; count < vbt_size; count += 4) { + intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + count); + data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); + *(vbt + store++) = data; + } + + if (!intel_bios_is_valid_vbt(vbt, vbt_size)) + goto err_free_vbt; + + drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n"); + + return (struct vbt_header *)vbt; + +err_free_vbt: + kfree(vbt); +err_not_found: + return NULL; +} + static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -2384,6 +2430,8 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) pci_unmap_rom(pdev, oprom); + drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n"); + return vbt; err_free_vbt: @@ -2418,17 +2466,23 @@ void intel_bios_init(struct drm_i915_private *i915) init_vbt_defaults(i915); - /* If the OpRegion does not have VBT, look in PCI ROM. */ + /* + * If the OpRegion does not have VBT, look in SPI flash through MMIO or + * PCI mapping + */ + if (!vbt && IS_DGFX(i915)) { + oprom_vbt = spi_oprom_get_vbt(i915); + vbt = oprom_vbt; + } + if (!vbt) { oprom_vbt = oprom_get_vbt(i915); - if (!oprom_vbt) - goto out; - vbt = oprom_vbt; - - drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n"); } + if (!vbt) + goto out; + bdb = get_bdb_header(vbt); i915->vbt.version = bdb->version; @@ -2596,37 +2650,10 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) */ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; - static const struct { - u16 dp, hdmi; - } port_mapping[] = { - [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, - [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, - [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, - [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, - [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, - }; - - if (HAS_DDI(i915)) - return i915->vbt.ports[port]; - - /* FIXME maybe deal with port A as well? */ - if (drm_WARN_ON(&i915->drm, - port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) - return false; - - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - child = &devdata->child; - - if ((child->dvo_port == port_mapping[port].dp || - child->dvo_port == port_mapping[port].hdmi) && - (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | - DEVICE_TYPE_DISPLAYPORT_OUTPUT))) - return true; - } + if (WARN_ON(!has_ddi_port_info(i915))) + return true; - return false; + return i915->vbt.ports[port]; } /** @@ -2638,40 +2665,18 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) */ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; - static const short port_mapping[] = { - [PORT_B] = DVO_PORT_DPB, - [PORT_C] = DVO_PORT_DPC, - [PORT_D] = DVO_PORT_DPD, - [PORT_E] = DVO_PORT_DPE, - [PORT_F] = DVO_PORT_DPF, - }; - - if (HAS_DDI(i915)) { - const struct intel_bios_encoder_data *devdata; - - devdata = intel_bios_encoder_data_lookup(i915, port); + const struct intel_bios_encoder_data *devdata = + intel_bios_encoder_data_lookup(i915, port); - return devdata && intel_bios_encoder_supports_edp(devdata); - } - - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - child = &devdata->child; - - if (child->dvo_port == port_mapping[port] && - (child->device_type & DEVICE_TYPE_eDP_BITS) == - (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) - return true; - } - - return false; + return devdata && intel_bios_encoder_supports_edp(devdata); } -static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) +static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) { - if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != - (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) + const struct child_device_config *child = &devdata->child; + + if (!intel_bios_encoder_supports_dp(devdata) || + !intel_bios_encoder_supports_hdmi(devdata)) return false; if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA) @@ -2688,40 +2693,10 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, enum port port) { - static const struct { - u16 dp, hdmi; - } port_mapping[] = { - /* - * Buggy VBTs may declare DP ports as having - * HDMI type dvo_port :( So let's check both. - */ - [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, - [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, - [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, - [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, - [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, - }; - const struct intel_bios_encoder_data *devdata; - - if (HAS_DDI(i915)) { - const struct intel_bios_encoder_data *devdata; - - devdata = intel_bios_encoder_data_lookup(i915, port); + const struct intel_bios_encoder_data *devdata = + intel_bios_encoder_data_lookup(i915, port); - return devdata && child_dev_is_dp_dual_mode(&devdata->child); - } - - if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) - return false; - - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - if ((devdata->child.dvo_port == port_mapping[port].dp || - devdata->child.dvo_port == port_mapping[port].hdmi) && - child_dev_is_dp_dual_mode(&devdata->child)) - return true; - } - - return false; + return devdata && intel_bios_encoder_supports_dp_dual_mode(devdata); } /** diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 2da4aacc956b..ad1564ca7269 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -5,10 +5,12 @@ #include <drm/drm_atomic_state_helper.h> +#include "i915_reg.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_display_types.h" +#include "intel_mchbar_regs.h" #include "intel_pcode.h" #include "intel_pm.h" @@ -75,10 +77,9 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, u16 dclk; int ret; - ret = sandybridge_pcode_read(dev_priv, - ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), - &val, &val2); + ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), + &val, &val2); if (ret) return ret; @@ -102,10 +103,8 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, int ret; int i; - ret = sandybridge_pcode_read(dev_priv, - ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, - &val, NULL); + ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); if (ret) return ret; @@ -675,6 +674,49 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state) return to_intel_bw_state(bw_state); } +static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe]; + enum plane_id plane_id; + + memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw)); + + if (!crtc_state->hw.active) + return; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + const struct skl_ddb_entry *ddb_uv = + &crtc_state->wm.skl.plane_ddb_uv[plane_id]; + unsigned int data_rate = crtc_state->data_rate[plane_id]; + unsigned int dbuf_mask = 0; + enum dbuf_slice slice; + + dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_y); + dbuf_mask |= skl_ddb_dbuf_slice_mask(i915, ddb_uv); + + /* + * FIXME: To calculate that more properly we probably + * need to split per plane data_rate into data_rate_y + * and data_rate_uv for multiplanar formats in order not + * to get accounted those twice if they happen to reside + * on different slices. + * However for pre-icl this would work anyway because + * we have only single slice and for icl+ uv plane has + * non-zero data rate. + * So in worst case those calculation are a bit + * pessimistic, which shouldn't pose any significant + * problem anyway. + */ + for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) + crtc_bw->used_bw[slice] += data_rate; + } +} + int skl_bw_calc_min_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); @@ -687,50 +729,13 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state) int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - enum plane_id plane_id; - struct intel_dbuf_bw *crtc_bw; - new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); old_bw_state = intel_atomic_get_old_bw_state(state); - crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe]; - - memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw)); - - if (!crtc_state->hw.active) - continue; - - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_ddb_entry *plane_alloc = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - const struct skl_ddb_entry *uv_plane_alloc = - &crtc_state->wm.skl.plane_ddb_uv[plane_id]; - unsigned int data_rate = crtc_state->data_rate[plane_id]; - unsigned int dbuf_mask = 0; - enum dbuf_slice slice; - - dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc); - dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc); - - /* - * FIXME: To calculate that more properly we probably - * need to to split per plane data_rate into data_rate_y - * and data_rate_uv for multiplanar formats in order not - * to get accounted those twice if they happen to reside - * on different slices. - * However for pre-icl this would work anyway because - * we have only single slice and for icl+ uv plane has - * non-zero data rate. - * So in worst case those calculation are a bit - * pessimistic, which shouldn't pose any significant - * problem anyway. - */ - for_each_dbuf_slice_in_mask(dev_priv, slice, dbuf_mask) - crtc_bw->used_bw[slice] += data_rate; - } + skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state); } if (!old_bw_state) @@ -811,25 +816,11 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state) return 0; } -int intel_bw_atomic_check(struct intel_atomic_state *state) +static u16 icl_qgv_points_mask(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *new_crtc_state, *old_crtc_state; - struct intel_bw_state *new_bw_state = NULL; - const struct intel_bw_state *old_bw_state = NULL; - unsigned int data_rate; - unsigned int num_active_planes; - struct intel_crtc *crtc; - int i, ret; - u32 allowed_points = 0; - unsigned int max_bw_point = 0, max_bw = 0; - unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; - unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; - u32 mask = 0; - - /* FIXME earlier gens need some checks too */ - if (DISPLAY_VER(dev_priv) < 11) - return 0; + unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points; + unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points; + u16 mask = 0; /* * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects @@ -842,6 +833,16 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) if (num_psf_gv_points > 0) mask |= REG_GENMASK(num_psf_gv_points - 1, 0) << ADLS_PSF_PT_SHIFT; + return mask; +} + +static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; + int i; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { unsigned int old_data_rate = @@ -852,6 +853,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) intel_bw_crtc_num_active_planes(old_crtc_state); unsigned int new_active_planes = intel_bw_crtc_num_active_planes(new_crtc_state); + struct intel_bw_state *new_bw_state; /* * Avoid locking the bw state when @@ -868,14 +870,53 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) new_bw_state->data_rate[crtc->pipe] = new_data_rate; new_bw_state->num_active_planes[crtc->pipe] = new_active_planes; - drm_dbg_kms(&dev_priv->drm, - "pipe %c data rate %u num active planes %u\n", - pipe_name(crtc->pipe), + *changed = true; + + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] data rate %u num active planes %u\n", + crtc->base.base.id, crtc->base.name, new_bw_state->data_rate[crtc->pipe], new_bw_state->num_active_planes[crtc->pipe]); } - if (!new_bw_state) + return 0; +} + +int intel_bw_atomic_check(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_bw_state *old_bw_state; + struct intel_bw_state *new_bw_state; + unsigned int data_rate; + unsigned int num_active_planes; + int i, ret; + u32 allowed_points = 0; + unsigned int max_bw_point = 0, max_bw = 0; + unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; + unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; + bool changed = false; + + /* FIXME earlier gens need some checks too */ + if (DISPLAY_VER(dev_priv) < 11) + return 0; + + ret = intel_bw_check_data_rate(state, &changed); + if (ret) + return ret; + + old_bw_state = intel_atomic_get_old_bw_state(state); + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (new_bw_state && + intel_can_enable_sagv(dev_priv, old_bw_state) != + intel_can_enable_sagv(dev_priv, new_bw_state)) + changed = true; + + /* + * If none of our inputs (data rates, number of active + * planes, SAGV yes/no) changed then nothing to do here. + */ + if (!changed) return 0; ret = intel_atomic_lock_global_state(&new_bw_state->base); @@ -959,9 +1000,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) * We store the ones which need to be masked as that is what PCode * actually accepts as a parameter. */ - new_bw_state->qgv_points_mask = ~allowed_points & mask; + new_bw_state->qgv_points_mask = ~allowed_points & + icl_qgv_points_mask(dev_priv); - old_bw_state = intel_atomic_get_old_bw_state(state); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking) diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 46c6eecbd917..0ceaed1c9656 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -30,19 +30,19 @@ struct intel_bw_state { */ u8 pipe_sagv_reject; + /* bitmask of active pipes */ + u8 active_pipes; + /* * Current QGV points mask, which restricts * some particular SAGV states, not to confuse * with pipe_sagv_mask. */ - u8 qgv_points_mask; + u16 qgv_points_mask; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; - /* bitmask of active pipes */ - u8 active_pipes; - int min_cdclk; }; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index c30cf8d2b835..8888fda8b701 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -23,6 +23,7 @@ #include <linux/time.h> +#include "hsw_ips.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_audio.h" @@ -31,6 +32,8 @@ #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_mchbar_regs.h" +#include "intel_pci_config.h" #include "intel_pcode.h" #include "intel_psr.h" #include "vlv_sideband.h" @@ -63,6 +66,17 @@ * dividers can be programmed correctly. */ +struct intel_cdclk_funcs { + void (*get_cdclk)(struct drm_i915_private *i915, + struct intel_cdclk_config *cdclk_config); + void (*set_cdclk)(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config, + enum pipe pipe); + int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); + int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); + u8 (*calc_voltage_level)(int cdclk); +}; + void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { @@ -793,8 +807,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, "trying to change cdclk frequency with cdclk not enabled\n")) return; - ret = sandybridge_pcode_write(dev_priv, - BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); + ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { drm_err(&dev_priv->drm, "failed to inform pcode about cdclk change\n"); @@ -822,8 +835,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); - sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_config->voltage_level); + snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_config->voltage_level); intel_de_write(dev_priv, CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); @@ -1126,8 +1139,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, intel_de_posting_read(dev_priv, CDCLK_CTL); /* inform PCU of the change */ - sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_config->voltage_level); + snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + cdclk_config->voltage_level); intel_update_cdclk(dev_priv); } @@ -1145,7 +1158,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) goto sanitize; intel_update_cdclk(dev_priv); - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ if (dev_priv->cdclk.hw.vco == 0 || @@ -1614,7 +1627,7 @@ static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) /* Timeout 200us */ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) - DRM_ERROR("timeout waiting for FREQ change request ack\n"); + drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n"); val &= ~BXT_DE_PLL_FREQ_REQ; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); @@ -1705,10 +1718,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * BSpec requires us to wait up to 150usec, but that leads to * timeouts; the 2ms used here is based on experiment. */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - 0x80000000, 150, 2); - + ret = snb_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + 0x80000000, 150, 2); if (ret) { drm_err(&dev_priv->drm, "Failed to inform PCU about cdclk change (err %d, freq %d)\n", @@ -1769,8 +1781,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); if (DISPLAY_VER(dev_priv) >= 11) { - ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_config->voltage_level); + ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + cdclk_config->voltage_level); } else { /* * The timeout isn't specified, the 2ms used here is based on @@ -1778,10 +1790,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * FIXME: Waiting for the request completion could be delayed * until the next PCODE request based on BSpec. */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_config->voltage_level, - 150, 2); + ret = snb_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_config->voltage_level, + 150, 2); } if (ret) { @@ -1807,7 +1819,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) int cdclk, clock, vco; intel_update_cdclk(dev_priv); - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); if (dev_priv->cdclk.hw.vco == 0 || dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) @@ -2047,13 +2059,14 @@ static bool intel_cdclk_changed(const struct intel_cdclk_config *a, a->voltage_level != b->voltage_level; } -void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, +void intel_cdclk_dump_config(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config, const char *context) { - DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", - context, cdclk_config->cdclk, cdclk_config->vco, - cdclk_config->ref, cdclk_config->bypass, - cdclk_config->voltage_level); + drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", + context, cdclk_config->cdclk, cdclk_config->vco, + cdclk_config->ref, cdclk_config->bypass, + cdclk_config->voltage_level); } /** @@ -2077,7 +2090,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk)) return; - intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to"); + intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to"); for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2120,8 +2133,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, if (drm_WARN(&dev_priv->drm, intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]"); - intel_dump_cdclk_config(cdclk_config, "[sw state]"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]"); + intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); } } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index fc638522e445..df66f66fbad0 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -8,7 +8,6 @@ #include <linux/types.h> -#include "i915_drv.h" #include "intel_display.h" #include "intel_global_state.h" @@ -16,6 +15,11 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; +struct intel_cdclk_config { + unsigned int cdclk, vco, ref, bypass; + u8 voltage_level; +}; + struct intel_cdclk_state { struct intel_global_state base; @@ -58,7 +62,8 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b); void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state); void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); -void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, +void intel_cdclk_dump_config(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config, const char *context); int intel_modeset_calc_cdclk(struct intel_atomic_state *state); void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index de3ded1e327a..e94ec57260f1 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -28,6 +28,25 @@ #include "intel_dpll.h" #include "vlv_dsi_pll.h" +struct intel_color_funcs { + int (*color_check)(struct intel_crtc_state *crtc_state); + /* + * Program double buffered color management registers during + * vblank evasion. The registers should then latch during the + * next vblank start, alongside any other double buffered registers + * involved with the same commit. + */ + void (*color_commit)(const struct intel_crtc_state *crtc_state); + /* + * Load LUTs (and other single buffered color management + * registers). Will (hopefully) be called during the vblank + * following the latching of any double buffered registers + * involved with the same commit. + */ + void (*load_luts)(const struct intel_crtc_state *crtc_state); + void (*read_luts)(struct intel_crtc_state *crtc_state); +}; + #define CTM_COEFF_SIGN (1ULL << 63) #define CTM_COEFF_1_0 (1ULL << 32) @@ -160,29 +179,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]); - intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]); - intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]); + intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]); + intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]); + intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]); - intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe), - coeff[0] << 16 | coeff[1]); - intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); + intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe), + coeff[0] << 16 | coeff[1]); + intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); - intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe), - coeff[3] << 16 | coeff[4]); - intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); + intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe), + coeff[3] << 16 | coeff[4]); + intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); - intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe), - coeff[6] << 16 | coeff[7]); - intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); + intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe), + coeff[6] << 16 | coeff[7]); + intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); if (DISPLAY_VER(dev_priv) >= 7) { - intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe), - postoff[0]); - intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe), - postoff[1]); - intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe), - postoff[2]); + intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_HI(pipe), + postoff[0]); + intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_ME(pipe), + postoff[1]); + intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_LO(pipe), + postoff[2]); } } @@ -194,28 +213,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), - coeff[0] << 16 | coeff[1]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe), - coeff[2] << 16); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), + coeff[0] << 16 | coeff[1]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe), + coeff[2] << 16); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), - coeff[3] << 16 | coeff[4]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe), - coeff[5] << 16); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), + coeff[3] << 16 | coeff[4]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe), + coeff[5] << 16); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), - coeff[6] << 16 | coeff[7]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe), - coeff[8] << 16); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), + coeff[6] << 16 | coeff[7]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe), + coeff[8] << 16); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); - intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); + intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); } static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) @@ -319,8 +338,8 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) ilk_csc_off_zero); } - intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe), - crtc_state->csc_mode); + intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe), + crtc_state->csc_mode); } static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) @@ -346,8 +365,8 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) ilk_csc_postoff_limited_range); } - intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe), - crtc_state->csc_mode); + intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe), + crtc_state->csc_mode); } static void chv_load_cgm_csc(struct intel_crtc *crtc, @@ -377,16 +396,16 @@ static void chv_load_cgm_csc(struct intel_crtc *crtc, coeffs[i] |= (abs_coeff >> 20) & 0xfff; } - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe), - coeffs[1] << 16 | coeffs[0]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe), - coeffs[3] << 16 | coeffs[2]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe), - coeffs[5] << 16 | coeffs[4]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe), - coeffs[7] << 16 | coeffs[6]); - intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), - coeffs[8]); + intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF01(pipe), + coeffs[1] << 16 | coeffs[0]); + intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF23(pipe), + coeffs[3] << 16 | coeffs[2]); + intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF45(pipe), + coeffs[5] << 16 | coeffs[4]); + intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF67(pipe), + coeffs[7] << 16 | coeffs[6]); + intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), + coeffs[8]); } /* convert hw value with given bit_precision to lut property val */ diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index f628e0542933..4dfe77351b8b 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -4,6 +4,7 @@ */ #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_de.h" #include "intel_display_types.h" diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h new file mode 100644 index 000000000000..2ed65193ca19 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_COMBO_PHY_REGS__ +#define __INTEL_COMBO_PHY_REGS__ + +#include "i915_reg_defs.h" + +#define _ICL_COMBOPHY_A 0x162000 +#define _ICL_COMBOPHY_B 0x6C000 +#define _EHL_COMBOPHY_C 0x160000 +#define _RKL_COMBOPHY_D 0x161000 +#define _ADL_COMBOPHY_E 0x16B000 + +#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \ + _ICL_COMBOPHY_B, \ + _EHL_COMBOPHY_C, \ + _RKL_COMBOPHY_D, \ + _ADL_COMBOPHY_E) + +/* ICL Port CL_DW registers */ +#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ + 4 * (dw)) + +#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy)) +#define CL_POWER_DOWN_ENABLE (1 << 4) +#define SUS_CLOCK_CONFIG (3 << 0) + +#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy)) +#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) +#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 +#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) +#define PWR_UP_ALL_LANES (0x0 << 4) +#define PWR_DOWN_LN_3_2_1 (0xe << 4) +#define PWR_DOWN_LN_3_2 (0xc << 4) +#define PWR_DOWN_LN_3 (0x8 << 4) +#define PWR_DOWN_LN_2_1_0 (0x7 << 4) +#define PWR_DOWN_LN_1_0 (0x3 << 4) +#define PWR_DOWN_LN_3_1 (0xa << 4) +#define PWR_DOWN_LN_3_1_0 (0xb << 4) +#define PWR_DOWN_LN_MASK (0xf << 4) +#define PWR_DOWN_LN_SHIFT 4 +#define EDP4K2K_MODE_OVRD_EN (1 << 3) +#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2) + +#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) +#define ICL_LANE_ENABLE_AUX (1 << 0) + +/* ICL Port COMP_DW registers */ +#define _ICL_PORT_COMP 0x100 +#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_COMP + 4 * (dw)) + +#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy)) +#define COMP_INIT (1 << 31) + +#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy)) + +#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy)) +#define PROCESS_INFO_DOT_0 (0 << 26) +#define PROCESS_INFO_DOT_1 (1 << 26) +#define PROCESS_INFO_DOT_4 (2 << 26) +#define PROCESS_INFO_MASK (7 << 26) +#define PROCESS_INFO_SHIFT 26 +#define VOLTAGE_INFO_0_85V (0 << 24) +#define VOLTAGE_INFO_0_95V (1 << 24) +#define VOLTAGE_INFO_1_05V (2 << 24) +#define VOLTAGE_INFO_MASK (3 << 24) +#define VOLTAGE_INFO_SHIFT 24 + +#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy)) +#define IREFGEN (1 << 24) + +#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy)) + +#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy)) + +/* ICL Port PCS registers */ +#define _ICL_PORT_PCS_AUX 0x300 +#define _ICL_PORT_PCS_GRP 0x600 +#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) +#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_PCS_AUX + 4 * (dw)) +#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_PCS_GRP + 4 * (dw)) +#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_PCS_LN(ln) + 4 * (dw)) +#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) +#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) +#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) +#define DCC_MODE_SELECT_MASK (0x3 << 20) +#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) +#define COMMON_KEEPER_EN (1 << 26) +#define LATENCY_OPTIM_MASK (0x3 << 2) +#define LATENCY_OPTIM_VAL(x) ((x) << 2) + +/* ICL Port TX registers */ +#define _ICL_PORT_TX_AUX 0x380 +#define _ICL_PORT_TX_GRP 0x680 +#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) + +#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_TX_AUX + 4 * (dw)) +#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_TX_GRP + 4 * (dw)) +#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_TX_LN(ln) + 4 * (dw)) + +#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) +#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) +#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy)) +#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) +#define SWING_SEL_UPPER_MASK (1 << 15) +#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) +#define SWING_SEL_LOWER_MASK (0x7 << 11) +#define FRC_LATENCY_OPTIM_MASK (0x7 << 8) +#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) +#define RCOMP_SCALAR(x) ((x) << 0) +#define RCOMP_SCALAR_MASK (0xFF << 0) + +#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) +#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) +#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) +#define LOADGEN_SELECT (1 << 31) +#define POST_CURSOR_1(x) ((x) << 12) +#define POST_CURSOR_1_MASK (0x3F << 12) +#define POST_CURSOR_2(x) ((x) << 6) +#define POST_CURSOR_2_MASK (0x3F << 6) +#define CURSOR_COEFF(x) ((x) << 0) +#define CURSOR_COEFF_MASK (0x3F << 0) + +#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) +#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) +#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy)) +#define TX_TRAINING_EN (1 << 31) +#define TAP2_DISABLE (1 << 30) +#define TAP3_DISABLE (1 << 29) +#define SCALING_MODE_SEL(x) ((x) << 18) +#define SCALING_MODE_SEL_MASK (0x7 << 18) +#define RTERM_SELECT(x) ((x) << 3) +#define RTERM_SELECT_MASK (0x7 << 3) + +#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) +#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) +#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) +#define N_SCALAR(x) ((x) << 24) +#define N_SCALAR_MASK (0x7F << 24) + +#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy)) +#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy)) +#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy)) +#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31) +#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29) +#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1) + +#define _ICL_DPHY_CHKN_REG 0x194 +#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) +#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) + +#endif /* __INTEL_COMBO_PHY_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h index 6c5c44600cbd..c6071efd93ce 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.h +++ b/drivers/gpu/drm/i915/display/intel_crt.h @@ -6,7 +6,7 @@ #ifndef __INTEL_CRT_H__ #define __INTEL_CRT_H__ -#include "i915_reg.h" +#include "i915_reg_defs.h" enum pipe; struct drm_encoder; diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 16c3ca66d9f0..08ee3e17ee5c 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -12,6 +12,7 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_vblank_work.h> +#include "i915_irq.h" #include "i915_vgpu.h" #include "i9xx_plane.h" #include "icl_dsi.h" diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 16d34685d83f..2ade8fdd9bdd 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -51,16 +51,16 @@ static u32 intel_cursor_position(const struct intel_plane_state *plane_state) u32 pos = 0; if (x < 0) { - pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + pos |= CURSOR_POS_X_SIGN; x = -x; } - pos |= x << CURSOR_X_SHIFT; + pos |= CURSOR_POS_X(x); if (y < 0) { - pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + pos |= CURSOR_POS_Y_SIGN; y = -y; } - pos |= y << CURSOR_Y_SHIFT; + pos |= CURSOR_POS_Y(y); return pos; } @@ -180,7 +180,7 @@ static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 cntl = 0; if (crtc_state->gamma_enable) - cntl |= CURSOR_GAMMA_ENABLE; + cntl |= CURSOR_PIPE_GAMMA_ENABLE; return cntl; } @@ -264,7 +264,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane, cntl = plane_state->ctl | i845_cursor_ctl_crtc(crtc_state); - size = (height << 12) | width; + size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); @@ -280,7 +280,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane, plane->cursor.cntl != cntl) { intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); - intel_de_write_fw(dev_priv, CURSIZE, size); + intel_de_write_fw(dev_priv, CURSIZE(PIPE_A), size); intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); @@ -340,13 +340,13 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) return cntl; if (crtc_state->gamma_enable) - cntl = MCURSOR_GAMMA_ENABLE; + cntl = MCURSOR_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) cntl |= MCURSOR_PIPE_CSC_ENABLE; if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); + cntl |= MCURSOR_PIPE_SEL(crtc->pipe); return cntl; } @@ -502,7 +502,7 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, i9xx_cursor_ctl_crtc(crtc_state); if (width != height) - fbc_ctl = CUR_FBC_CTL_EN | (height - 1); + fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); @@ -586,13 +586,12 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); - ret = val & MCURSOR_MODE; + ret = val & MCURSOR_MODE_MASK; if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) *pipe = plane->pipe; else - *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> - MCURSOR_PIPE_SELECT_SHIFT; + *pipe = REG_FIELD_GET(MCURSOR_PIPE_SEL_MASK, val); intel_display_power_put(dev_priv, power_domain, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index cab505277595..e4260806c2a4 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -32,6 +32,7 @@ #include "intel_audio.h" #include "intel_backlight.h" #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" @@ -56,6 +57,7 @@ #include "intel_snps_phy.h" #include "intel_sprite.h" #include "intel_tc.h" +#include "intel_tc_phy_regs.h" #include "intel_vdsc.h" #include "intel_vrr.h" #include "skl_scaler.h" @@ -2287,116 +2289,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) OVERLAP_PIXELS_MASK, dss1); } -static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); - - intel_dp_set_link_params(intel_dp, crtc_state->port_clock, - crtc_state->lane_count); - - /* - * We only configure what the register value will be here. Actual - * enabling happens during link training farther down. - */ - intel_ddi_init_dp_buf_reg(encoder, crtc_state); - - /* - * 1. Enable Power Wells - * - * This was handled at the beginning of intel_atomic_commit_tail(), - * before we called down into this function. - */ - - /* 2. Enable Panel Power if PPS is required */ - intel_pps_on(intel_dp); - - /* - * 3. Enable the port PLL. - */ - intel_ddi_enable_clock(encoder, crtc_state); - - /* 4. Enable IO power */ - if (!intel_tc_port_in_tbt_alt_mode(dig_port)) - dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, - dig_port->ddi_io_power_domain); - - /* - * 5. The rest of the below are substeps under the bspec's "Enable and - * Train Display Port" step. Note that steps that are specific to - * MST will be handled by intel_mst_pre_enable_dp() before/after it - * calls into this function. Also intel_mst_pre_enable_dp() only calls - * us when active_mst_links==0, so any steps designated for "single - * stream or multi-stream master transcoder" can just be performed - * unconditionally here. - */ - - /* - * 5.a Configure Transcoder Clock Select to direct the Port clock to the - * Transcoder. - */ - intel_ddi_enable_pipe_clock(encoder, crtc_state); - - /* 5.b Configure transcoder for DP 2.0 128b/132b */ - intel_ddi_config_transcoder_dp2(encoder, crtc_state); - - /* - * 5.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST - * Transport Select - */ - intel_ddi_config_transcoder_func(encoder, crtc_state); - - /* - * 5.d Configure & enable DP_TP_CTL with link training pattern 1 - * selected - * - * This will be handled by the intel_dp_start_link_train() farther - * down this function. - */ - - /* 5.e Configure voltage swing and related IO settings */ - encoder->set_signal_levels(encoder, crtc_state); - - if (!is_mst) - intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - - intel_dp_configure_protocol_converter(intel_dp, crtc_state); - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); - /* - * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit - * in the FEC_CONFIGURATION register to 1 before initiating link - * training - */ - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); - intel_dp_check_frl_training(intel_dp); - intel_dp_pcon_dsc_configure(intel_dp, crtc_state); - - /* - * 5.h Follow DisplayPort specification training sequence (see notes for - * failure handling) - * 5.i If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle - * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent) - * (timeout after 800 us) - */ - intel_dp_start_link_train(intel_dp, crtc_state); - - /* 5.j Set DP_TP_CTL link training to Normal */ - if (!is_trans_port_sync_mode(crtc_state)) - intel_dp_stop_link_train(intel_dp, crtc_state); - - /* 5.k Configure and enable FEC if needed */ - intel_ddi_enable_fec(encoder, crtc_state); - - intel_dsc_dp_pps_write(encoder, crtc_state); - - intel_dsc_enable(crtc_state); -} - static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -2470,6 +2362,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, */ intel_ddi_enable_pipe_clock(encoder, crtc_state); + if (HAS_DP20(dev_priv)) + intel_ddi_config_transcoder_dp2(encoder, crtc_state); + /* * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST * Transport Select @@ -2530,9 +2425,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); - - if (!crtc_state->bigjoiner) - intel_dsc_enable(crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2598,9 +2490,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_pipe_clock(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); - - if (!crtc_state->bigjoiner) - intel_dsc_enable(crtc_state); } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2610,9 +2499,7 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_DG2(dev_priv)) - dg2_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); - else if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 12) tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); else hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); @@ -2620,11 +2507,8 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, /* MST will call a setting of MSA after an allocating of Virtual Channel * from MST encoder pre_enable callback. */ - if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) intel_ddi_set_dp_msa(crtc_state, conn_state); - - intel_dp_set_m_n(crtc_state, M1_N1); - } } static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, @@ -2819,6 +2703,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); bool is_tc_port = intel_phy_is_tc(dev_priv, phy); + struct intel_crtc *slave_crtc; if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { intel_crtc_vblank_off(old_crtc_state); @@ -2837,9 +2722,8 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, ilk_pfit_disable(old_crtc_state); } - if (old_crtc_state->bigjoiner_linked_crtc) { - struct intel_crtc *slave_crtc = - old_crtc_state->bigjoiner_linked_crtc; + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) { const struct intel_crtc_state *old_slave_crtc_state = intel_atomic_get_old_crtc_state(state, slave_crtc); @@ -3042,7 +2926,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state, { drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); - if (!crtc_state->bigjoiner_slave) + if (!intel_crtc_is_bigjoiner_slave(crtc_state)) intel_ddi_enable_transcoder_func(encoder, crtc_state); intel_vrr_enable(encoder, crtc_state); @@ -3157,6 +3041,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, struct intel_encoder *encoder, struct intel_crtc *crtc) { + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL; int required_lanes = crtc_state ? crtc_state->lane_count : 1; @@ -3166,11 +3051,12 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, intel_tc_port_get_link(enc_to_dig_port(encoder), required_lanes); if (crtc_state && crtc_state->hw.active) { - struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc; + struct intel_crtc *slave_crtc; intel_update_active_dpll(state, crtc, encoder); - if (slave_crtc) + for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(crtc_state)) intel_update_active_dpll(state, slave_crtc, encoder); } } @@ -3215,10 +3101,23 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, crtc_state->lane_lat_optim_mask); } +static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); + int ln; + + for (ln = 0; ln < 2; ln++) { + intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); + intel_de_rmw(i915, DKL_PCS_DW5(tc_port), DKL_PCS_DW5_CORE_SOFTRESET, 0); + } +} + static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u32 dp_tp_ctl, ddi_buf_ctl; @@ -3254,6 +3153,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); + if (IS_ALDERLAKE_P(dev_priv) && + (intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port))) + adlp_tbt_to_dp_alt_switch_wa(encoder); + intel_dp->DP |= DDI_BUF_CTL_ENABLE; intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); @@ -3471,7 +3374,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; - intel_dp_get_m_n(crtc, pipe_config); + + intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, + &pipe_config->dp_m_n); + intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, + &pipe_config->dp_m2_n2); if (DISPLAY_VER(dev_priv) >= 11) { i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config); @@ -3508,7 +3415,8 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->mst_master_transcoder = REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp); - intel_dp_get_m_n(crtc, pipe_config); + intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, + &pipe_config->dp_m_n); pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); @@ -3797,8 +3705,8 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1, const struct intel_link_m_n *m_n_2) { return m_n_1->tu == m_n_2->tu && - m_n_1->gmch_m == m_n_2->gmch_m && - m_n_1->gmch_n == m_n_2->gmch_n && + m_n_1->data_m == m_n_2->data_m && + m_n_1->data_n == m_n_2->data_n && m_n_1->link_m == m_n_2->link_m && m_n_1->link_n == m_n_2->link_n; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index c2fea6562917..d39076facdce 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -6,7 +6,7 @@ #ifndef __INTEL_DDI_H__ #define __INTEL_DDI_H__ -#include "i915_reg.h" +#include "i915_reg_defs.h" struct drm_connector_state; struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index e2dfb93a82bd..0c32210bf503 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -985,15 +985,15 @@ static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr2_hbr3 = { }; static const union intel_ddi_buf_trans_entry _dg2_snps_trans[] = { - { .snps = { 26, 0, 0 } }, /* VS 0, pre-emph 0 */ - { .snps = { 33, 0, 6 } }, /* VS 0, pre-emph 1 */ - { .snps = { 38, 0, 12 } }, /* VS 0, pre-emph 2 */ - { .snps = { 43, 0, 19 } }, /* VS 0, pre-emph 3 */ - { .snps = { 39, 0, 0 } }, /* VS 1, pre-emph 0 */ - { .snps = { 44, 0, 8 } }, /* VS 1, pre-emph 1 */ - { .snps = { 47, 0, 15 } }, /* VS 1, pre-emph 2 */ - { .snps = { 52, 0, 0 } }, /* VS 2, pre-emph 0 */ - { .snps = { 51, 0, 10 } }, /* VS 2, pre-emph 1 */ + { .snps = { 25, 0, 0 } }, /* VS 0, pre-emph 0 */ + { .snps = { 32, 0, 6 } }, /* VS 0, pre-emph 1 */ + { .snps = { 35, 0, 10 } }, /* VS 0, pre-emph 2 */ + { .snps = { 43, 0, 17 } }, /* VS 0, pre-emph 3 */ + { .snps = { 35, 0, 0 } }, /* VS 1, pre-emph 0 */ + { .snps = { 45, 0, 8 } }, /* VS 1, pre-emph 1 */ + { .snps = { 48, 0, 14 } }, /* VS 1, pre-emph 2 */ + { .snps = { 47, 0, 0 } }, /* VS 2, pre-emph 0 */ + { .snps = { 55, 0, 7 } }, /* VS 2, pre-emph 1 */ { .snps = { 62, 0, 0 } }, /* VS 3, pre-emph 0 */ }; @@ -1005,21 +1005,21 @@ static const struct intel_ddi_buf_trans dg2_snps_trans = { static const union intel_ddi_buf_trans_entry _dg2_snps_trans_uhbr[] = { { .snps = { 62, 0, 0 } }, /* preset 0 */ - { .snps = { 56, 0, 6 } }, /* preset 1 */ - { .snps = { 51, 0, 11 } }, /* preset 2 */ - { .snps = { 48, 0, 14 } }, /* preset 3 */ - { .snps = { 43, 0, 19 } }, /* preset 4 */ + { .snps = { 55, 0, 7 } }, /* preset 1 */ + { .snps = { 50, 0, 12 } }, /* preset 2 */ + { .snps = { 44, 0, 18 } }, /* preset 3 */ + { .snps = { 35, 0, 21 } }, /* preset 4 */ { .snps = { 59, 3, 0 } }, /* preset 5 */ { .snps = { 53, 3, 6 } }, /* preset 6 */ - { .snps = { 49, 3, 10 } }, /* preset 7 */ - { .snps = { 45, 3, 14 } }, /* preset 8 */ - { .snps = { 42, 3, 17 } }, /* preset 9 */ + { .snps = { 48, 3, 11 } }, /* preset 7 */ + { .snps = { 42, 5, 15 } }, /* preset 8 */ + { .snps = { 37, 5, 20 } }, /* preset 9 */ { .snps = { 56, 6, 0 } }, /* preset 10 */ - { .snps = { 50, 6, 6 } }, /* preset 11 */ - { .snps = { 47, 6, 9 } }, /* preset 12 */ - { .snps = { 42, 6, 14 } }, /* preset 13 */ - { .snps = { 46, 8, 8 } }, /* preset 14 */ - { .snps = { 56, 3, 3 } }, /* preset 15 */ + { .snps = { 48, 7, 7 } }, /* preset 11 */ + { .snps = { 45, 7, 10 } }, /* preset 12 */ + { .snps = { 39, 8, 15 } }, /* preset 13 */ + { .snps = { 48, 14, 0 } }, /* preset 14 */ + { .snps = { 45, 4, 4 } }, /* preset 15 */ }; static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = { diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h index 9d8c177aa228..9c104f65e4c8 100644 --- a/drivers/gpu/drm/i915/display/intel_de.h +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -7,7 +7,6 @@ #define __INTEL_DE_H__ #include "i915_drv.h" -#include "i915_reg.h" #include "i915_trace.h" #include "intel_uncore.h" diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 7d558217ca16..80b19c304c43 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -74,6 +74,7 @@ #include "g4x_dp.h" #include "g4x_hdmi.h" +#include "hsw_ips.h" #include "i915_drv.h" #include "icl_dsi.h" #include "intel_acpi.h" @@ -112,15 +113,13 @@ #include "i9xx_plane.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "vlv_dsi.h" #include "vlv_dsi_pll.h" +#include "vlv_dsi_regs.h" #include "vlv_sideband.h" -#include "vlv_dsi.h" static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); -static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, - const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2); static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); @@ -340,10 +339,38 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) is_trans_port_sync_slave(crtc_state); } -static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) +static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) +{ + return ffs(crtc_state->bigjoiner_pipes) - 1; +} + +u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); +} + +bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) { - if (crtc_state->bigjoiner_slave) - return crtc_state->bigjoiner_linked_crtc; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + return crtc_state->bigjoiner_pipes && + crtc->pipe != bigjoiner_master_pipe(crtc_state); +} + +bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + return crtc_state->bigjoiner_pipes && + crtc->pipe == bigjoiner_master_pipe(crtc_state); +} + +struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + if (intel_crtc_is_bigjoiner_slave(crtc_state)) + return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); else return to_intel_crtc(crtc_state->uapi.crtc); } @@ -353,16 +380,10 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, { i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; - u32 line_mask; - - if (DISPLAY_VER(dev_priv) == 2) - line_mask = DSL_LINEMASK_GEN2; - else - line_mask = DSL_LINEMASK_GEN3; - line1 = intel_de_read(dev_priv, reg) & line_mask; + line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; msleep(5); - line2 = intel_de_read(dev_priv, reg) & line_mask; + line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; return line1 != line2; } @@ -397,13 +418,11 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) if (DISPLAY_VER(dev_priv) >= 4) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ - if (intel_de_wait_for_clear(dev_priv, reg, - I965_PIPECONF_ACTIVE, 100)) - drm_WARN(&dev_priv->drm, 1, - "pipe_off wait timed out\n"); + if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder), + PIPECONF_STATE_ENABLE, 100)) + drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); } else { intel_wait_for_pipe_scanline_stopped(crtc); } @@ -763,8 +782,11 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, crtc_state->data_rate[plane->id] = 0; crtc_state->min_cdclk[plane->id] = 0; - if (plane->id == PLANE_PRIMARY) - hsw_disable_ips(crtc_state); + if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && + hsw_ips_disable(crtc_state)) { + crtc_state->ips_enabled = false; + intel_crtc_wait_for_next_vblank(crtc); + } /* * Vblank time updates from the shadow to live plane control register @@ -1101,72 +1123,6 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height); } -void hsw_enable_ips(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - if (!crtc_state->ips_enabled) - return; - - /* - * We can only enable IPS after we enable a plane and wait for a vblank - * This function is called from post_plane_update, which is run after - * a vblank wait. - */ - drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); - - if (IS_BROADWELL(dev_priv)) { - drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, - IPS_ENABLE | IPS_PCODE_CONTROL)); - /* Quoting Art Runyan: "its not safe to expect any particular - * value in IPS_CTL bit 31 after enabling IPS through the - * mailbox." Moreover, the mailbox may return a bogus state, - * so we need to just enable it and continue on. - */ - } else { - intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE); - /* The bit only becomes 1 in the next vblank, so this wait here - * is essentially intel_wait_for_vblank. If we don't have this - * and don't wait for vblanks until the end of crtc_enable, then - * the HW state readout code will complain that the expected - * IPS_CTL value is not the one we read. */ - if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) - drm_err(&dev_priv->drm, - "Timed out waiting for IPS enable\n"); - } -} - -void hsw_disable_ips(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - if (!crtc_state->ips_enabled) - return; - - if (IS_BROADWELL(dev_priv)) { - drm_WARN_ON(dev, - sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); - /* - * Wait for PCODE to finish disabling IPS. The BSpec specified - * 42ms timeout value leads to occasional timeouts so use 100ms - * instead. - */ - if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) - drm_err(&dev_priv->drm, - "Timed out waiting for IPS disable\n"); - } else { - intel_de_write(dev_priv, IPS_CTL, 0); - intel_de_posting_read(dev_priv, IPS_CTL); - } - - /* We need to wait for a vblank before we can disable the plane. */ - intel_crtc_wait_for_next_vblank(crtc); -} - static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) { if (crtc->overlay) @@ -1177,67 +1133,6 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) */ } -static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state, - const struct intel_crtc_state *new_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (!old_crtc_state->ips_enabled) - return false; - - if (intel_crtc_needs_modeset(new_crtc_state)) - return true; - - /* - * Workaround : Do not read or write the pipe palette/gamma data while - * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. - * - * Disable IPS before we program the LUT. - */ - if (IS_HASWELL(dev_priv) && - (new_crtc_state->uapi.color_mgmt_changed || - new_crtc_state->update_pipe) && - new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) - return true; - - return !new_crtc_state->ips_enabled; -} - -static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state, - const struct intel_crtc_state *new_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (!new_crtc_state->ips_enabled) - return false; - - if (intel_crtc_needs_modeset(new_crtc_state)) - return true; - - /* - * Workaround : Do not read or write the pipe palette/gamma data while - * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. - * - * Re-enable IPS after the LUT has been programmed. - */ - if (IS_HASWELL(dev_priv) && - (new_crtc_state->uapi.color_mgmt_changed || - new_crtc_state->update_pipe) && - new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) - return true; - - /* - * We can't read out IPS on broadwell, assume the worst and - * forcibly enable IPS on the first fastset. - */ - if (new_crtc_state->update_pipe && old_crtc_state->inherited) - return true; - - return !old_crtc_state->ips_enabled; -} - static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -1332,9 +1227,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state, if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) intel_update_watermarks(dev_priv); - if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) - hsw_enable_ips(new_crtc_state); - + hsw_ips_post_update(state, crtc); intel_fbc_post_update(state, crtc); intel_drrs_page_flip(state, crtc); @@ -1369,7 +1262,8 @@ static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->enable_flip_done && plane->pipe == crtc->pipe && - update_planes & BIT(plane->id)) + update_planes & BIT(plane->id) && + plane_state->do_async_flip) plane->enable_flip_done(plane); } } @@ -1387,7 +1281,8 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->disable_flip_done && plane->pipe == crtc->pipe && - update_planes & BIT(plane->id)) + update_planes & BIT(plane->id) && + plane_state->do_async_flip) plane->disable_flip_done(plane); } } @@ -1435,8 +1330,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_psr_pre_plane_update(state, crtc); - if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) - hsw_disable_ips(old_crtc_state); + if (hsw_ips_pre_update(state, crtc)) + intel_crtc_wait_for_next_vblank(crtc); if (intel_fbc_pre_update(state, crtc)) intel_crtc_wait_for_next_vblank(crtc); @@ -1817,6 +1712,26 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat plane->disable_arm(plane, crtc_state); } +static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (crtc_state->has_pch_encoder) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->fdi_m_n); + } else if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, + &crtc_state->dp_m2_n2); + } + + intel_set_transcoder_timings(crtc_state); + + ilk_set_pipeconf(crtc_state); +} + static void ilk_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -1841,27 +1756,16 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_dp_set_m_n(new_crtc_state, M1_N1); + ilk_configure_cpu_transcoder(new_crtc_state); - intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); - if (new_crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->fdi_m_n, NULL); - - ilk_set_pipeconf(new_crtc_state); - crtc->active = true; intel_encoders_pre_enable(state, crtc); if (new_crtc_state->has_pch_encoder) { - /* Note: FDI PLL enabling _must_ be done before we enable the - * cpu pipes, hence this is separate from all the other fdi/pch - * enabling. */ - ilk_fdi_pll_enable(new_crtc_state); + ilk_pch_pre_enable(state, crtc); } else { assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); @@ -1905,12 +1809,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } -/* IPS only exists on ULT machines and is tied to pipe A. */ -static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) -{ - return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; -} - static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, enum pipe pipe, bool apply) { @@ -1974,42 +1872,46 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *master_crtc_state; - struct intel_crtc *master_crtc; - struct drm_connector_state *conn_state; - struct drm_connector *conn; - struct intel_encoder *encoder = NULL; - int i; - - master_crtc = intel_master_crtc(crtc_state); - master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); - - for_each_new_connector_in_state(&state->base, conn, conn_state, i) { - if (conn_state->crtc != &master_crtc->base) - continue; - - encoder = to_intel_encoder(conn_state->best_encoder); - break; - } + struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); /* * Enable sequence steps 1-7 on bigjoiner master */ - if (crtc_state->bigjoiner_slave) + if (intel_crtc_is_bigjoiner_slave(crtc_state)) intel_encoders_pre_pll_enable(state, master_crtc); if (crtc_state->shared_dpll) intel_enable_shared_dpll(crtc_state); - if (crtc_state->bigjoiner_slave) + if (intel_crtc_is_bigjoiner_slave(crtc_state)) intel_encoders_pre_enable(state, master_crtc); +} - /* need to enable VDSC, which we skipped in pre-enable */ - intel_dsc_enable(crtc_state); +static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (DISPLAY_VER(dev_priv) >= 13) - intel_uncompressed_joiner_enable(crtc_state); + if (crtc_state->has_pch_encoder) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->fdi_m_n); + } else if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, + &crtc_state->dp_m2_n2); + } + + intel_set_transcoder_timings(crtc_state); + + if (cpu_transcoder != TRANSCODER_EDP) + intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), + crtc_state->pixel_multiplier - 1); + + hsw_set_frame_start_delay(crtc_state); + + hsw_set_transconf(crtc_state); } static void hsw_crtc_enable(struct intel_atomic_state *state, @@ -2036,25 +1938,18 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); } + intel_dsc_enable(new_crtc_state); + + if (DISPLAY_VER(dev_priv) >= 13) + intel_uncompressed_joiner_enable(new_crtc_state); + intel_set_pipe_src_size(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) bdw_set_pipemisc(new_crtc_state); - if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) { - intel_set_transcoder_timings(new_crtc_state); - - if (cpu_transcoder != TRANSCODER_EDP) - intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), - new_crtc_state->pixel_multiplier - 1); - - if (new_crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->fdi_m_n, NULL); - - hsw_set_frame_start_delay(new_crtc_state); - - hsw_set_transconf(new_crtc_state); - } + if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && + !transcoder_is_dsi(cpu_transcoder)) + hsw_configure_cpu_transcoder(new_crtc_state); crtc->active = true; @@ -2093,7 +1988,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus); } - if (new_crtc_state->bigjoiner_slave) + if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) intel_crtc_vblank_on(new_crtc_state); intel_encoders_enable(state, crtc); @@ -2178,7 +2073,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, * FIXME collapse everything to one hook. * Need care with mst->ddi interactions. */ - if (!old_crtc_state->bigjoiner_slave) { + if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { intel_encoders_disable(state, crtc); intel_encoders_post_disable(state, crtc); } @@ -2441,6 +2336,23 @@ static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, domains); } +static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, + &crtc_state->dp_m2_n2); + } + + intel_set_transcoder_timings(crtc_state); + + i9xx_set_pipeconf(crtc_state); +} + static void valleyview_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -2452,10 +2364,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_dp_set_m_n(new_crtc_state, M1_N1); + i9xx_configure_cpu_transcoder(new_crtc_state); - intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { @@ -2463,8 +2373,6 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); } - i9xx_set_pipeconf(new_crtc_state); - crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); @@ -2504,14 +2412,10 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_dp_set_m_n(new_crtc_state, M1_N1); + i9xx_configure_cpu_transcoder(new_crtc_state); - intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); - i9xx_set_pipeconf(new_crtc_state); - crtc->active = true; if (DISPLAY_VER(dev_priv) != 2) @@ -2757,77 +2661,6 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, } } -bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - /* IPS only exists on ULT machines and is tied to pipe A. */ - if (!hsw_crtc_supports_ips(crtc)) - return false; - - if (!dev_priv->params.enable_ips) - return false; - - if (crtc_state->pipe_bpp > 24) - return false; - - /* - * We compare against max which means we must take - * the increased cdclk requirement into account when - * calculating the new cdclk. - * - * Should measure whether using a lower cdclk w/o IPS - */ - if (IS_BROADWELL(dev_priv) && - crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100) - return false; - - return true; -} - -static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = - to_i915(crtc_state->uapi.crtc->dev); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); - - crtc_state->ips_enabled = false; - - if (!hsw_crtc_state_ips_capable(crtc_state)) - return 0; - - /* - * When IPS gets enabled, the pipe CRC changes. Since IPS gets - * enabled and disabled dynamically based on package C states, - * user space can't make reliable use of the CRCs, so let's just - * completely disable it. - */ - if (crtc_state->crc_enabled) - return 0; - - /* IPS should be fine as long as at least one plane is enabled. */ - if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) - return 0; - - if (IS_BROADWELL(dev_priv)) { - const struct intel_cdclk_state *cdclk_state; - - cdclk_state = intel_atomic_get_cdclk_state(state); - if (IS_ERR(cdclk_state)) - return PTR_ERR(cdclk_state); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) - return 0; - } - - crtc_state->ips_enabled = true; - - return 0; -} - static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) { const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -3085,7 +2918,7 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, m_n->tu = 64; compute_m_n(data_clock, link_clock * nlanes * 8, - &m_n->gmch_m, &m_n->gmch_n, + &m_n->data_m, &m_n->data_n, constant_n); compute_m_n(pixel_clock, link_clock, @@ -3116,99 +2949,66 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) } } -static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, - const struct intel_link_m_n *m_n) +void intel_zero_m_n(struct intel_link_m_n *m_n) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; + /* corresponds to 0 register value */ + memset(m_n, 0, sizeof(*m_n)); + m_n->tu = 1; +} - intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe), - TU_SIZE(m_n->tu) | m_n->gmch_m); - intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); - intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m); - intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n); +void intel_set_m_n(struct drm_i915_private *i915, + const struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg) +{ + intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); + intel_de_write(i915, data_n_reg, m_n->data_n); + intel_de_write(i915, link_m_reg, m_n->link_m); + /* + * On BDW+ writing LINK_N arms the double buffered update + * of all the M/N registers, so it must be written last. + */ + intel_de_write(i915, link_n_reg, m_n->link_n); } -static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, - enum transcoder transcoder) +bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, + enum transcoder transcoder) { if (IS_HASWELL(dev_priv)) return transcoder == TRANSCODER_EDP; - /* - * Strictly speaking some registers are available before - * gen7, but we only support DRRS on gen7+ - */ - return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv); + return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); } -static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, - const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2) +void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, + enum transcoder transcoder, + const struct intel_link_m_n *m_n) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - enum transcoder transcoder = crtc_state->cpu_transcoder; - if (DISPLAY_VER(dev_priv) >= 5) { - intel_de_write(dev_priv, PIPE_DATA_M1(transcoder), - TU_SIZE(m_n->tu) | m_n->gmch_m); - intel_de_write(dev_priv, PIPE_DATA_N1(transcoder), - m_n->gmch_n); - intel_de_write(dev_priv, PIPE_LINK_M1(transcoder), - m_n->link_m); - intel_de_write(dev_priv, PIPE_LINK_N1(transcoder), - m_n->link_n); - /* - * M2_N2 registers are set only if DRRS is supported - * (to make sure the registers are not unnecessarily accessed). - */ - if (m2_n2 && crtc_state->has_drrs && - transcoder_has_m2_n2(dev_priv, transcoder)) { - intel_de_write(dev_priv, PIPE_DATA_M2(transcoder), - TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); - intel_de_write(dev_priv, PIPE_DATA_N2(transcoder), - m2_n2->gmch_n); - intel_de_write(dev_priv, PIPE_LINK_M2(transcoder), - m2_n2->link_m); - intel_de_write(dev_priv, PIPE_LINK_N2(transcoder), - m2_n2->link_n); - } - } else { - intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe), - TU_SIZE(m_n->tu) | m_n->gmch_m); - intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n); - intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m); - intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n); - } + if (DISPLAY_VER(dev_priv) >= 5) + intel_set_m_n(dev_priv, m_n, + PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), + PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); + else + intel_set_m_n(dev_priv, m_n, + PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), + PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); } -void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) +void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, + enum transcoder transcoder, + const struct intel_link_m_n *m_n) { - const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - - if (m_n == M1_N1) { - dp_m_n = &crtc_state->dp_m_n; - dp_m2_n2 = &crtc_state->dp_m2_n2; - } else if (m_n == M2_N2) { + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - /* - * M2_N2 registers are not supported. Hence m2_n2 divider value - * needs to be programmed into M1_N1. - */ - dp_m_n = &crtc_state->dp_m2_n2; - } else { - drm_err(&i915->drm, "Unsupported divider value\n"); + if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) return; - } - if (crtc_state->has_pch_encoder) - intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); - else - intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); + intel_set_m_n(dev_priv, m_n, + PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), + PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) @@ -3279,7 +3079,8 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) * always be the user's requested size. */ intel_de_write(dev_priv, PIPESRC(pipe), - ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1)); + PIPESRC_WIDTH(crtc_state->pipe_src_w - 1) | + PIPESRC_HEIGHT(crtc_state->pipe_src_h - 1)); } static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) @@ -3350,21 +3151,19 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc, u32 tmp; tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); - pipe_config->pipe_src_h = (tmp & 0xffff) + 1; - pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; + pipe_config->pipe_src_w = REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1; + pipe_config->pipe_src_h = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1; } static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 pipeconf; - - pipeconf = 0; + u32 pipeconf = 0; /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) - pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; + pipeconf |= PIPECONF_ENABLE; if (crtc_state->double_wide) pipeconf |= PIPECONF_DOUBLE_WIDE; @@ -3379,13 +3178,13 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - pipeconf |= PIPECONF_6BPC; + pipeconf |= PIPECONF_BPC_6; break; case 24: - pipeconf |= PIPECONF_8BPC; + pipeconf |= PIPECONF_BPC_8; break; case 30: - pipeconf |= PIPECONF_10BPC; + pipeconf |= PIPECONF_BPC_10; break; default: /* Case prevented by intel_choose_pipe_bpp_dither. */ @@ -3400,7 +3199,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; } else { - pipeconf |= PIPECONF_PROGRESSIVE; + pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE; } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && @@ -3543,11 +3342,11 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); - if (tmp & DISPPLANE_GAMMA_ENABLE) + if (tmp & DISP_PIPE_GAMMA_ENABLE) crtc_state->gamma_enable = true; if (!HAS_GMCH(dev_priv) && - tmp & DISPPLANE_PIPE_CSC_ENABLE) + tmp & DISP_PIPE_CSC_ENABLE) crtc_state->csc_enable = true; } @@ -3578,16 +3377,17 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_6BPC: + case PIPECONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_8BPC: + case PIPECONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_10BPC: + case PIPECONF_BPC_10: pipe_config->pipe_bpp = 30; break; default: + MISSING_CASE(tmp); break; } } @@ -3596,8 +3396,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, (tmp & PIPECONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; - pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> - PIPECONF_GAMMA_MODE_SHIFT; + pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp); if (IS_CHERRYVIEW(dev_priv)) pipe_config->cgm_mode = intel_de_read(dev_priv, @@ -3684,16 +3483,16 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - val |= PIPECONF_6BPC; + val |= PIPECONF_BPC_6; break; case 24: - val |= PIPECONF_8BPC; + val |= PIPECONF_BPC_8; break; case 30: - val |= PIPECONF_10BPC; + val |= PIPECONF_BPC_10; break; case 36: - val |= PIPECONF_12BPC; + val |= PIPECONF_BPC_12; break; default: /* Case prevented by intel_choose_pipe_bpp_dither. */ @@ -3701,12 +3500,12 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) } if (crtc_state->dither) - val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); + val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACED_ILK; + val |= PIPECONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_PROGRESSIVE; + val |= PIPECONF_INTERLACE_PF_PD_ILK; /* * This would end up with an odd purple hue over @@ -3738,12 +3537,12 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) u32 val = 0; if (IS_HASWELL(dev_priv) && crtc_state->dither) - val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); + val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACED_ILK; + val |= PIPECONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_PROGRESSIVE; + val |= PIPECONF_INTERLACE_PF_PD_ILK; if (IS_HASWELL(dev_priv) && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) @@ -3765,18 +3564,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - val |= PIPEMISC_6_BPC; + val |= PIPEMISC_BPC_6; break; case 24: - val |= PIPEMISC_8_BPC; + val |= PIPEMISC_BPC_8; break; case 30: - val |= PIPEMISC_10_BPC; + val |= PIPEMISC_BPC_10; break; case 36: /* Port output 12BPC defined for ADLP+ */ if (DISPLAY_VER(dev_priv) > 12) - val |= PIPEMISC_12_BPC_ADLP; + val |= PIPEMISC_BPC_12_ADLP; break; default: MISSING_CASE(crtc_state->pipe_bpp); @@ -3812,7 +3611,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) } intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe), - PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK, + PIPE_MISC2_BUBBLE_COUNTER_MASK, scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN : PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS); } @@ -3828,11 +3627,11 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); switch (tmp & PIPEMISC_BPC_MASK) { - case PIPEMISC_6_BPC: + case PIPEMISC_BPC_6: return 18; - case PIPEMISC_8_BPC: + case PIPEMISC_BPC_8: return 24; - case PIPEMISC_10_BPC: + case PIPEMISC_BPC_10: return 30; /* * PORT OUTPUT 12 BPC defined for ADLP+. @@ -3844,7 +3643,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) * on older platforms, need to find a workaround for 12 BPC * MIPI DSI HW readout. */ - case PIPEMISC_12_BPC_ADLP: + case PIPEMISC_BPC_12_ADLP: if (DISPLAY_VER(dev_priv) > 12) return 36; fallthrough; @@ -3865,83 +3664,47 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) return DIV_ROUND_UP(bps, link_bw * 8); } -static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n) +void intel_get_m_n(struct drm_i915_private *i915, + struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - - m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe)); - m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe)); - m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) - & ~TU_SIZE_MASK; - m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe)); - m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; + m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; + m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; + m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; + m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; } -static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, - enum transcoder transcoder, - struct intel_link_m_n *m_n, - struct intel_link_m_n *m2_n2) +void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, + enum transcoder transcoder, + struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (DISPLAY_VER(dev_priv) >= 5) { - m_n->link_m = intel_de_read(dev_priv, - PIPE_LINK_M1(transcoder)); - m_n->link_n = intel_de_read(dev_priv, - PIPE_LINK_N1(transcoder)); - m_n->gmch_m = intel_de_read(dev_priv, - PIPE_DATA_M1(transcoder)) - & ~TU_SIZE_MASK; - m_n->gmch_n = intel_de_read(dev_priv, - PIPE_DATA_N1(transcoder)); - m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; - - if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { - m2_n2->link_m = intel_de_read(dev_priv, - PIPE_LINK_M2(transcoder)); - m2_n2->link_n = intel_de_read(dev_priv, - PIPE_LINK_N2(transcoder)); - m2_n2->gmch_m = intel_de_read(dev_priv, - PIPE_DATA_M2(transcoder)) - & ~TU_SIZE_MASK; - m2_n2->gmch_n = intel_de_read(dev_priv, - PIPE_DATA_N2(transcoder)); - m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; - } - } else { - m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe)); - m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe)); - m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) - & ~TU_SIZE_MASK; - m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe)); - m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; - } -} - -void intel_dp_get_m_n(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - if (pipe_config->has_pch_encoder) - intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); + if (DISPLAY_VER(dev_priv) >= 5) + intel_get_m_n(dev_priv, m_n, + PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), + PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); else - intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, - &pipe_config->dp_m_n, - &pipe_config->dp_m2_n2); + intel_get_m_n(dev_priv, m_n, + PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), + PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); } -void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, + enum transcoder transcoder, + struct intel_link_m_n *m_n) { - intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, - &pipe_config->fdi_m_n, NULL); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) + return; + + intel_get_m_n(dev_priv, m_n, + PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), + PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, @@ -4037,16 +3800,16 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, goto out; switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_6BPC: + case PIPECONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_8BPC: + case PIPECONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_10BPC: + case PIPECONF_BPC_10: pipe_config->pipe_bpp = 30; break; - case PIPECONF_12BPC: + case PIPECONF_BPC_12: pipe_config->pipe_bpp = 36; break; default: @@ -4066,8 +3829,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, break; } - pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> - PIPECONF_GAMMA_MODE_SHIFT; + pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp); pipe_config->csc_mode = intel_de_read(dev_priv, PIPE_CSC_MODE(crtc->pipe)); @@ -4117,19 +3879,20 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, return tmp & TRANS_DDI_FUNC_ENABLE; } -static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv) +static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv, + u8 *master_pipes, u8 *slave_pipes) { - u8 master_pipes = 0, slave_pipes = 0; struct intel_crtc *crtc; - for_each_intel_crtc(&dev_priv->drm, crtc) { + *master_pipes = 0; + *slave_pipes = 0; + + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, + bigjoiner_pipes(dev_priv)) { enum intel_display_power_domain power_domain; enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; - if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0) - continue; - power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); @@ -4138,9 +3901,9 @@ static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv) continue; if (tmp & MASTER_BIG_JOINER_ENABLE) - master_pipes |= BIT(pipe); + *master_pipes |= BIT(pipe); else - slave_pipes |= BIT(pipe); + *slave_pipes |= BIT(pipe); } if (DISPLAY_VER(dev_priv) < 13) @@ -4151,18 +3914,47 @@ static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv) u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); if (tmp & UNCOMPRESSED_JOINER_MASTER) - master_pipes |= BIT(pipe); + *master_pipes |= BIT(pipe); if (tmp & UNCOMPRESSED_JOINER_SLAVE) - slave_pipes |= BIT(pipe); + *slave_pipes |= BIT(pipe); } } /* Bigjoiner pipes should always be consecutive master and slave */ - drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1, + drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1, "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", - master_pipes, slave_pipes); + *master_pipes, *slave_pipes); +} + +static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes) +{ + if ((slave_pipes & BIT(pipe)) == 0) + return pipe; - return slave_pipes; + /* ignore everything above our pipe */ + master_pipes &= ~GENMASK(7, pipe); + + /* highest remaining bit should be our master pipe */ + return fls(master_pipes) - 1; +} + +static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes) +{ + enum pipe master_pipe, next_master_pipe; + + master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes); + + if ((master_pipes & BIT(master_pipe)) == 0) + return 0; + + /* ignore our master pipe and everything below it */ + master_pipes &= ~GENMASK(master_pipe, 0); + /* make sure a high bit is set for the ffs() */ + master_pipes |= BIT(7); + /* lowest remaining bit should be the next master pipe */ + next_master_pipe = ffs(master_pipes) - 1; + + return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe); } static u8 hsw_panel_transcoders(struct drm_i915_private *i915) @@ -4181,6 +3973,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(dev); u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); enum transcoder cpu_transcoder; + u8 master_pipes, slave_pipes; u8 enabled_transcoders = 0; /* @@ -4232,8 +4025,10 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) enabled_transcoders |= BIT(cpu_transcoder); /* bigjoiner slave -> consider the master pipe's transcoder as well */ - if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) { - cpu_transcoder = (enum transcoder) crtc->pipe - 1; + enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes); + if (slave_pipes & BIT(crtc->pipe)) { + cpu_transcoder = (enum transcoder) + get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes); if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) enabled_transcoders |= BIT(cpu_transcoder); } @@ -4358,6 +4153,24 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, return transcoder_is_dsi(pipe_config->cpu_transcoder); } +static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + u8 master_pipes, slave_pipes; + enum pipe pipe = crtc->pipe; + + enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes); + + if (((master_pipes | slave_pipes) & BIT(pipe)) == 0) + return; + + crtc_state->bigjoiner = true; + crtc_state->bigjoiner_pipes = + BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) | + get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes); +} + static bool hsw_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -4380,13 +4193,12 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, active = true; } - intel_dsc_get_config(pipe_config); - if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable) - intel_uncompressed_joiner_get_config(pipe_config); - if (!active) goto out; + intel_dsc_get_config(pipe_config); + intel_bigjoiner_get_config(pipe_config); + if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || DISPLAY_VER(dev_priv) >= 11) intel_get_transcoder_timings(crtc, pipe_config); @@ -4443,19 +4255,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, ilk_get_pfit_config(pipe_config); } - if (hsw_crtc_supports_ips(crtc)) { - if (IS_HASWELL(dev_priv)) - pipe_config->ips_enabled = intel_de_read(dev_priv, - IPS_CTL) & IPS_ENABLE; - else { - /* - * We cannot readout IPS state on broadwell, set to - * true so we can set it to a defined state on first - * commit. - */ - pipe_config->ips_enabled = true; - } - } + hsw_ips_get_config(pipe_config); if (pipe_config->cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(pipe_config->cpu_transcoder)) { @@ -4867,169 +4667,6 @@ intel_encoder_current_mode(struct intel_encoder *encoder) return mode; } -/** - * intel_wm_need_update - Check whether watermarks need updating - * @cur: current plane state - * @new: new plane state - * - * Check current plane state versus the new one to determine whether - * watermarks need to be recalculated. - * - * Returns true or false. - */ -static bool intel_wm_need_update(const struct intel_plane_state *cur, - struct intel_plane_state *new) -{ - /* Update watermarks on tiling or size changes. */ - if (new->uapi.visible != cur->uapi.visible) - return true; - - if (!cur->hw.fb || !new->hw.fb) - return false; - - if (cur->hw.fb->modifier != new->hw.fb->modifier || - cur->hw.rotation != new->hw.rotation || - drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) || - drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) || - drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) || - drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst)) - return true; - - return false; -} - -static bool needs_scaling(const struct intel_plane_state *state) -{ - int src_w = drm_rect_width(&state->uapi.src) >> 16; - int src_h = drm_rect_height(&state->uapi.src) >> 16; - int dst_w = drm_rect_width(&state->uapi.dst); - int dst_h = drm_rect_height(&state->uapi.dst); - - return (src_w != dst_w || src_h != dst_h); -} - -int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state, - const struct intel_plane_state *old_plane_state, - struct intel_plane_state *new_plane_state) -{ - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - bool mode_changed = intel_crtc_needs_modeset(new_crtc_state); - bool was_crtc_enabled = old_crtc_state->hw.active; - bool is_crtc_enabled = new_crtc_state->hw.active; - bool turn_off, turn_on, visible, was_visible; - int ret; - - if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { - ret = skl_update_scaler_plane(new_crtc_state, new_plane_state); - if (ret) - return ret; - } - - was_visible = old_plane_state->uapi.visible; - visible = new_plane_state->uapi.visible; - - if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) - was_visible = false; - - /* - * Visibility is calculated as if the crtc was on, but - * after scaler setup everything depends on it being off - * when the crtc isn't active. - * - * FIXME this is wrong for watermarks. Watermarks should also - * be computed as if the pipe would be active. Perhaps move - * per-plane wm computation to the .check_plane() hook, and - * only combine the results from all planes in the current place? - */ - if (!is_crtc_enabled) { - intel_plane_set_invisible(new_crtc_state, new_plane_state); - visible = false; - } - - if (!was_visible && !visible) - return 0; - - turn_off = was_visible && (!visible || mode_changed); - turn_on = visible && (!was_visible || mode_changed); - - drm_dbg_atomic(&dev_priv->drm, - "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", - crtc->base.base.id, crtc->base.name, - plane->base.base.id, plane->base.name, - was_visible, visible, - turn_off, turn_on, mode_changed); - - if (turn_on) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - new_crtc_state->update_wm_pre = true; - - /* must disable cxsr around plane enable/disable */ - if (plane->id != PLANE_CURSOR) - new_crtc_state->disable_cxsr = true; - } else if (turn_off) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - new_crtc_state->update_wm_post = true; - - /* must disable cxsr around plane enable/disable */ - if (plane->id != PLANE_CURSOR) - new_crtc_state->disable_cxsr = true; - } else if (intel_wm_need_update(old_plane_state, new_plane_state)) { - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { - /* FIXME bollocks */ - new_crtc_state->update_wm_pre = true; - new_crtc_state->update_wm_post = true; - } - } - - if (visible || was_visible) - new_crtc_state->fb_bits |= plane->frontbuffer_bit; - - /* - * ILK/SNB DVSACNTR/Sprite Enable - * IVB SPR_CTL/Sprite Enable - * "When in Self Refresh Big FIFO mode, a write to enable the - * plane will be internally buffered and delayed while Big FIFO - * mode is exiting." - * - * Which means that enabling the sprite can take an extra frame - * when we start in big FIFO mode (LP1+). Thus we need to drop - * down to LP0 and wait for vblank in order to make sure the - * sprite gets enabled on the next vblank after the register write. - * Doing otherwise would risk enabling the sprite one frame after - * we've already signalled flip completion. We can resume LP1+ - * once the sprite has been enabled. - * - * - * WaCxSRDisabledForSpriteScaling:ivb - * IVB SPR_SCALE/Scaling Enable - * "Low Power watermarks must be disabled for at least one - * frame before enabling sprite scaling, and kept disabled - * until sprite scaling is disabled." - * - * ILK/SNB DVSASCALE/Scaling Enable - * "When in Self Refresh Big FIFO mode, scaling enable will be - * masked off while Big FIFO mode is exiting." - * - * Despite the w/a only being listed for IVB we assume that - * the ILK/SNB note has similar ramifications, hence we apply - * the w/a on all three platforms. - * - * With experimental results seems this is needed also for primary - * plane, not only sprite plane. - */ - if (plane->id != PLANE_CURSOR && - (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || - IS_IVYBRIDGE(dev_priv)) && - (turn_on || (!needs_scaling(old_plane_state) && - needs_scaling(new_plane_state)))) - new_crtc_state->disable_lp_wm = true; - - return 0; -} - static bool encoders_cloneable(const struct intel_encoder *a, const struct intel_encoder *b) { @@ -5289,7 +4926,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, if (mode_changed && crtc_state->hw.enable && !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { - ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state); + ret = intel_dpll_crtc_compute_clock(crtc_state); if (ret) return ret; } @@ -5340,7 +4977,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, } if (HAS_IPS(dev_priv)) { - ret = hsw_compute_ips_config(crtc_state); + ret = hsw_ips_compute_config(state, crtc); if (ret) return ret; } @@ -5491,9 +5128,9 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); drm_dbg_kms(&i915->drm, - "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", + "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n", id, lane_count, - m_n->gmch_m, m_n->gmch_n, + m_n->data_m, m_n->data_n, m_n->link_m, m_n->link_n, m_n->tu); } @@ -5642,9 +5279,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, transcoder_name(pipe_config->master_transcoder), pipe_config->sync_mode_slaves_mask); - drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n", - pipe_config->bigjoiner_slave ? "slave" : - pipe_config->bigjoiner ? "master" : "no"); + drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n", + intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" : + intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no", + pipe_config->bigjoiner_pipes); drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n", enableddisabled(pipe_config->splitter.enable), @@ -5658,11 +5296,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, if (intel_crtc_has_dp_encoder(pipe_config)) { intel_dump_m_n_config(pipe_config, "dp m_n", - pipe_config->lane_count, &pipe_config->dp_m_n); - if (pipe_config->has_drrs) - intel_dump_m_n_config(pipe_config, "dp m2_n2", - pipe_config->lane_count, - &pipe_config->dp_m2_n2); + pipe_config->lane_count, + &pipe_config->dp_m_n); + intel_dump_m_n_config(pipe_config, "dp m2_n2", + pipe_config->lane_count, + &pipe_config->dp_m2_n2); } drm_dbg_kms(&dev_priv->drm, @@ -5841,35 +5479,42 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) static void intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - const struct intel_crtc_state *master_crtc_state; - struct intel_crtc *master_crtc; + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); - master_crtc = intel_master_crtc(crtc_state); - master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); + WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); - /* No need to copy state if the master state is unchanged */ - if (master_crtc_state) - intel_crtc_copy_color_blobs(crtc_state, master_crtc_state); + drm_property_replace_blob(&crtc_state->hw.degamma_lut, + crtc_state->uapi.degamma_lut); + drm_property_replace_blob(&crtc_state->hw.gamma_lut, + crtc_state->uapi.gamma_lut); + drm_property_replace_blob(&crtc_state->hw.ctm, + crtc_state->uapi.ctm); } static void -intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) +intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); + crtc_state->hw.enable = crtc_state->uapi.enable; crtc_state->hw.active = crtc_state->uapi.active; crtc_state->hw.mode = crtc_state->uapi.mode; crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; - intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state); + intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); } static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) { - if (crtc_state->bigjoiner_slave) + if (intel_crtc_is_bigjoiner_slave(crtc_state)) return; crtc_state->uapi.enable = crtc_state->hw.enable; @@ -5880,7 +5525,6 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter; - /* copy color blobs to uapi */ drm_property_replace_blob(&crtc_state->uapi.degamma_lut, crtc_state->hw.degamma_lut); drm_property_replace_blob(&crtc_state->uapi.gamma_lut, @@ -5889,51 +5533,79 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state crtc_state->hw.ctm); } +static void +copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state, + struct intel_crtc *slave_crtc) +{ + struct intel_crtc_state *slave_crtc_state = + intel_atomic_get_new_crtc_state(state, slave_crtc); + struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); + const struct intel_crtc_state *master_crtc_state = + intel_atomic_get_new_crtc_state(state, master_crtc); + + drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut, + master_crtc_state->hw.degamma_lut); + drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut, + master_crtc_state->hw.gamma_lut); + drm_property_replace_blob(&slave_crtc_state->hw.ctm, + master_crtc_state->hw.ctm); + + slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed; +} + static int -copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, - const struct intel_crtc_state *from_crtc_state) +copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, + struct intel_crtc *slave_crtc) { + struct intel_crtc_state *slave_crtc_state = + intel_atomic_get_new_crtc_state(state, slave_crtc); + struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); + const struct intel_crtc_state *master_crtc_state = + intel_atomic_get_new_crtc_state(state, master_crtc); struct intel_crtc_state *saved_state; - saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL); + saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL); if (!saved_state) return -ENOMEM; - saved_state->uapi = crtc_state->uapi; - saved_state->scaler_state = crtc_state->scaler_state; - saved_state->shared_dpll = crtc_state->shared_dpll; - saved_state->dpll_hw_state = crtc_state->dpll_hw_state; - saved_state->crc_enabled = crtc_state->crc_enabled; + /* preserve some things from the slave's original crtc state */ + saved_state->uapi = slave_crtc_state->uapi; + saved_state->scaler_state = slave_crtc_state->scaler_state; + saved_state->shared_dpll = slave_crtc_state->shared_dpll; + saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state; + saved_state->crc_enabled = slave_crtc_state->crc_enabled; - intel_crtc_free_hw_state(crtc_state); - memcpy(crtc_state, saved_state, sizeof(*crtc_state)); + intel_crtc_free_hw_state(slave_crtc_state); + memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); kfree(saved_state); /* Re-init hw state */ - memset(&crtc_state->hw, 0, sizeof(saved_state->hw)); - crtc_state->hw.enable = from_crtc_state->hw.enable; - crtc_state->hw.active = from_crtc_state->hw.active; - crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode; - crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode; + memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw)); + slave_crtc_state->hw.enable = master_crtc_state->hw.enable; + slave_crtc_state->hw.active = master_crtc_state->hw.active; + slave_crtc_state->hw.mode = master_crtc_state->hw.mode; + slave_crtc_state->hw.pipe_mode = master_crtc_state->hw.pipe_mode; + slave_crtc_state->hw.adjusted_mode = master_crtc_state->hw.adjusted_mode; + slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; + + copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); /* Some fixups */ - crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed; - crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed; - crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed; - crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0; - crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc); - crtc_state->bigjoiner_slave = true; - crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder; - crtc_state->has_audio = from_crtc_state->has_audio; + slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; + slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed; + slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed; + slave_crtc_state->cpu_transcoder = master_crtc_state->cpu_transcoder; + slave_crtc_state->has_audio = master_crtc_state->has_audio; return 0; } static int intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, - struct intel_crtc_state *crtc_state) + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *saved_state; @@ -5963,7 +5635,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, memcpy(crtc_state, saved_state, sizeof(*crtc_state)); kfree(saved_state); - intel_crtc_copy_uapi_to_hw_state(state, crtc_state); + intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); return 0; } @@ -6189,8 +5861,8 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, bool exact) { return m_n->tu == m2_n2->tu && - intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, - m2_n2->gmch_m, m2_n2->gmch_n, exact) && + intel_compare_m_n(m_n->data_m, m_n->data_n, + m2_n2->data_m, m2_n2->data_n, exact) && intel_compare_m_n(m_n->link_m, m_n->link_n, m2_n2->link_m, m2_n2->link_n, exact); } @@ -6389,16 +6061,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, &pipe_config->name,\ !fastset)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ - "(expected tu %i gmch %i/%i link %i/%i, " \ - "found tu %i, gmch %i/%i link %i/%i)", \ + "(expected tu %i data %i/%i link %i/%i, " \ + "found tu %i, data %i/%i link %i/%i)", \ current_config->name.tu, \ - current_config->name.gmch_m, \ - current_config->name.gmch_n, \ + current_config->name.data_m, \ + current_config->name.data_n, \ current_config->name.link_m, \ current_config->name.link_n, \ pipe_config->name.tu, \ - pipe_config->name.gmch_m, \ - pipe_config->name.gmch_n, \ + pipe_config->name.data_m, \ + pipe_config->name.data_n, \ pipe_config->name.link_m, \ pipe_config->name.link_n); \ ret = false; \ @@ -6416,22 +6088,22 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, !intel_compare_link_m_n(¤t_config->alt_name, \ &pipe_config->name, !fastset)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ - "(expected tu %i gmch %i/%i link %i/%i, " \ - "or tu %i gmch %i/%i link %i/%i, " \ - "found tu %i, gmch %i/%i link %i/%i)", \ + "(expected tu %i data %i/%i link %i/%i, " \ + "or tu %i data %i/%i link %i/%i, " \ + "found tu %i, data %i/%i link %i/%i)", \ current_config->name.tu, \ - current_config->name.gmch_m, \ - current_config->name.gmch_n, \ + current_config->name.data_m, \ + current_config->name.data_n, \ current_config->name.link_m, \ current_config->name.link_n, \ current_config->alt_name.tu, \ - current_config->alt_name.gmch_m, \ - current_config->alt_name.gmch_n, \ + current_config->alt_name.data_m, \ + current_config->alt_name.data_n, \ current_config->alt_name.link_m, \ current_config->alt_name.link_n, \ pipe_config->name.tu, \ - pipe_config->name.gmch_m, \ - pipe_config->name.gmch_n, \ + pipe_config->name.data_m, \ + pipe_config->name.data_n, \ pipe_config->name.link_m, \ pipe_config->name.link_n); \ ret = false; \ @@ -6510,13 +6182,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); - if (DISPLAY_VER(dev_priv) < 8) { - PIPE_CONF_CHECK_M_N(dp_m_n); - - if (current_config->has_drrs) - PIPE_CONF_CHECK_M_N(dp_m2_n2); - } else + if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); + } else { + PIPE_CONF_CHECK_M_N(dp_m_n); + PIPE_CONF_CHECK_M_N(dp_m2_n2); + } PIPE_CONF_CHECK_X(output_types); @@ -6642,6 +6313,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); + PIPE_CONF_CHECK_X(dpll_hw_state.div0); PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); PIPE_CONF_CHECK_X(dpll_hw_state.pll0); @@ -6693,8 +6365,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(sync_mode_slaves_mask); PIPE_CONF_CHECK_I(master_transcoder); PIPE_CONF_CHECK_BOOL(bigjoiner); - PIPE_CONF_CHECK_BOOL(bigjoiner_slave); - PIPE_CONF_CHECK_P(bigjoiner_linked_crtc); + PIPE_CONF_CHECK_X(bigjoiner_pipes); PIPE_CONF_CHECK_I(dsc.compression_enable); PIPE_CONF_CHECK_I(dsc.dsc_split); @@ -7480,20 +7151,25 @@ static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - int ret; + struct intel_crtc *other; - if (!crtc_state->bigjoiner) - continue; + for_each_intel_crtc_in_pipe_mask(&i915->drm, other, + crtc_state->bigjoiner_pipes) { + int ret; - ret = intel_crtc_add_bigjoiner_planes(state, crtc, - crtc_state->bigjoiner_linked_crtc); - if (ret) - return ret; + if (crtc == other) + continue; + + ret = intel_crtc_add_bigjoiner_planes(state, crtc, other); + if (ret) + return ret; + } } return 0; @@ -7595,67 +7271,123 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, return false; } -static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) +static bool intel_pipes_need_modeset(struct intel_atomic_state *state, + u8 pipes) { - struct intel_crtc_state *slave_crtc_state, *master_crtc_state; - struct intel_crtc *slave_crtc, *master_crtc; + const struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i; - /* slave being enabled, is master is still claiming this crtc? */ - if (old_crtc_state->bigjoiner_slave) { - slave_crtc = crtc; - master_crtc = old_crtc_state->bigjoiner_linked_crtc; - master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); - if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) - goto claimed; + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (new_crtc_state->hw.enable && + pipes & BIT(crtc->pipe) && + intel_crtc_needs_modeset(new_crtc_state)) + return true; } - if (!new_crtc_state->bigjoiner) + return false; +} + +static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, + struct intel_crtc *master_crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *master_crtc_state = + intel_atomic_get_new_crtc_state(state, master_crtc); + struct intel_crtc *slave_crtc; + u8 slave_pipes; + + /* + * TODO: encoder.compute_config() may be the best + * place to populate the bitmask for the master crtc. + * For now encoder.compute_config() just flags things + * as needing bigjoiner and we populate the bitmask + * here. + */ + WARN_ON(master_crtc_state->bigjoiner_pipes); + + if (!master_crtc_state->bigjoiner) return 0; - slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc); - if (!slave_crtc) { - DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " - "CRTC + 1 to be used, doesn't exist\n", - crtc->base.base.id, crtc->base.name); + slave_pipes = BIT(master_crtc->pipe + 1); + + if (slave_pipes & ~bigjoiner_pipes(i915)) { + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] Cannot act as big joiner master " + "(need 0x%x as slave pipes, only 0x%x possible)\n", + master_crtc->base.base.id, master_crtc->base.name, + slave_pipes, bigjoiner_pipes(i915)); return -EINVAL; } - new_crtc_state->bigjoiner_linked_crtc = slave_crtc; - slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); - master_crtc = crtc; - if (IS_ERR(slave_crtc_state)) - return PTR_ERR(slave_crtc_state); + for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, slave_pipes) { + struct intel_crtc_state *slave_crtc_state; + int ret; + + slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); + if (IS_ERR(slave_crtc_state)) + return PTR_ERR(slave_crtc_state); - /* master being enabled, slave was already configured? */ - if (slave_crtc_state->uapi.enable) - goto claimed; + /* master being enabled, slave was already configured? */ + if (slave_crtc_state->uapi.enable) { + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " + "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", + slave_crtc->base.base.id, slave_crtc->base.name, + master_crtc->base.base.id, master_crtc->base.name); + return -EINVAL; + } + + /* + * The state copy logic assumes the master crtc gets processed + * before the slave crtc during the main compute_config loop. + * This works because the crtcs are created in pipe order, + * and the hardware requires master pipe < slave pipe as well. + * Should that change we need to rethink the logic. + */ + if (WARN_ON(drm_crtc_index(&master_crtc->base) > + drm_crtc_index(&slave_crtc->base))) + return -EINVAL; - DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", - slave_crtc->base.base.id, slave_crtc->base.name); + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n", + slave_crtc->base.base.id, slave_crtc->base.name, + master_crtc->base.base.id, master_crtc->base.name); - return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); + master_crtc_state->bigjoiner_pipes = + BIT(master_crtc->pipe) | BIT(slave_crtc->pipe); + slave_crtc_state->bigjoiner_pipes = + BIT(master_crtc->pipe) | BIT(slave_crtc->pipe); -claimed: - DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " - "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", - slave_crtc->base.base.id, slave_crtc->base.name, - master_crtc->base.base.id, master_crtc->base.name); - return -EINVAL; + ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc); + if (ret) + return ret; + } + + return 0; } static void kill_bigjoiner_slave(struct intel_atomic_state *state, - struct intel_crtc_state *master_crtc_state) + struct intel_crtc *master_crtc) { - struct intel_crtc_state *slave_crtc_state = - intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc); + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *master_crtc_state = + intel_atomic_get_new_crtc_state(state, master_crtc); + struct intel_crtc *slave_crtc; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { + struct intel_crtc_state *slave_crtc_state = + intel_atomic_get_new_crtc_state(state, slave_crtc); - slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false; - slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false; - slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL; - intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state); + slave_crtc_state->bigjoiner = false; + slave_crtc_state->bigjoiner_pipes = 0; + + intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc); + } + + master_crtc_state->bigjoiner = false; + master_crtc_state->bigjoiner_pipes = 0; } /** @@ -7805,34 +7537,37 @@ static int intel_atomic_check_async(struct intel_atomic_state *state, struct int static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; + u8 affected_pipes = 0; + u8 modeset_pipes = 0; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - struct intel_crtc_state *linked_crtc_state; - struct intel_crtc *linked_crtc; - int ret; + affected_pipes |= crtc_state->bigjoiner_pipes; + if (intel_crtc_needs_modeset(crtc_state)) + modeset_pipes |= crtc_state->bigjoiner_pipes; + } - if (!crtc_state->bigjoiner) - continue; + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } - linked_crtc = crtc_state->bigjoiner_linked_crtc; - linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc); - if (IS_ERR(linked_crtc_state)) - return PTR_ERR(linked_crtc_state); + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { + int ret; - if (!intel_crtc_needs_modeset(crtc_state)) - continue; + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - linked_crtc_state->uapi.mode_changed = true; + crtc_state->uapi.mode_changed = true; - ret = drm_atomic_add_affected_connectors(&state->base, - &linked_crtc->base); + ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); if (ret) return ret; - ret = intel_atomic_add_affected_planes(state, linked_crtc); + ret = intel_atomic_add_affected_planes(state, crtc); if (ret) return ret; } @@ -7840,8 +7575,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { /* Kill old bigjoiner link, we may re-establish afterwards */ if (intel_crtc_needs_modeset(crtc_state) && - crtc_state->bigjoiner && !crtc_state->bigjoiner_slave) - kill_bigjoiner_slave(state, crtc_state); + intel_crtc_is_bigjoiner_master(crtc_state)) + kill_bigjoiner_slave(state, crtc); } return 0; @@ -7866,6 +7601,10 @@ static int intel_atomic_check(struct drm_device *dev, new_crtc_state, i) { if (new_crtc_state->inherited != old_crtc_state->inherited) new_crtc_state->uapi.mode_changed = true; + + if (new_crtc_state->uapi.scaling_filter != + old_crtc_state->uapi.scaling_filter) + new_crtc_state->uapi.mode_changed = true; } intel_vrr_check_modeset(state); @@ -7881,30 +7620,30 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) { - /* Light copy */ - intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state); - + if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) + copy_bigjoiner_crtc_state_nomodeset(state, crtc); + else + intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); continue; } - if (!new_crtc_state->uapi.enable) { - if (!new_crtc_state->bigjoiner_slave) { - intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state); - any_ms = true; - } + if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { + drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); continue; } - ret = intel_crtc_prepare_cleared_state(state, new_crtc_state); + ret = intel_crtc_prepare_cleared_state(state, crtc); if (ret) goto fail; + if (!new_crtc_state->hw.enable) + continue; + ret = intel_modeset_pipe_config(state, new_crtc_state); if (ret) goto fail; - ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state, - new_crtc_state); + ret = intel_atomic_check_bigjoiner(state, crtc); if (ret) goto fail; } @@ -7958,10 +7697,7 @@ static int intel_atomic_check(struct drm_device *dev, } if (new_crtc_state->bigjoiner) { - struct intel_crtc_state *linked_crtc_state = - intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc); - - if (intel_crtc_needs_modeset(linked_crtc_state)) { + if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) { new_crtc_state->uapi.mode_changed = true; new_crtc_state->update_pipe = false; } @@ -8141,9 +7877,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) hsw_set_linetime_wm(new_crtc_state); - - if (DISPLAY_VER(dev_priv) >= 11) - icl_set_pipe_chicken(new_crtc_state); } static void commit_pipe_pre_planes(struct intel_atomic_state *state, @@ -8208,7 +7941,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state, dev_priv->display->crtc_enable(state, crtc); - if (new_crtc_state->bigjoiner_slave) + if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) return; /* vblanks work again, re-enable pipe CRC. */ @@ -8218,7 +7951,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state, static void intel_update_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -8235,21 +7968,22 @@ static void intel_update_crtc(struct intel_atomic_state *state, if (new_crtc_state->update_pipe) intel_encoders_update_pipe(state, crtc); + + if (DISPLAY_VER(i915) >= 11 && + new_crtc_state->update_pipe) + icl_set_pipe_chicken(new_crtc_state); } intel_fbc_update(state, crtc); - intel_update_planes_on_crtc(state, crtc); + intel_crtc_planes_update_noarm(state, crtc); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); commit_pipe_pre_planes(state, crtc); - if (DISPLAY_VER(dev_priv) >= 9) - skl_arm_planes_on_crtc(state, crtc); - else - i9xx_arm_planes_on_crtc(state, crtc); + intel_crtc_planes_update_arm(state, crtc); commit_pipe_post_planes(state, crtc); @@ -8325,7 +8059,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) */ if (!is_trans_port_sync_slave(old_crtc_state) && !intel_dp_mst_is_slave_trans(old_crtc_state) && - !old_crtc_state->bigjoiner_slave) + !intel_crtc_is_bigjoiner_slave(old_crtc_state)) continue; intel_old_crtc_state_disables(state, old_crtc_state, @@ -8440,7 +8174,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if (intel_dp_mst_is_slave_trans(new_crtc_state) || is_trans_port_sync_master(new_crtc_state) || - (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave)) + intel_crtc_is_bigjoiner_master(new_crtc_state)) continue; modeset_pipes &= ~BIT(pipe); @@ -8967,10 +8701,8 @@ static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) struct intel_crtc *crtc; u32 possible_crtcs = 0; - for_each_intel_crtc(dev, crtc) { - if (encoder->pipe_mask & BIT(crtc->pipe)) - possible_crtcs |= drm_crtc_mask(&crtc->base); - } + for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) + possible_crtcs |= drm_crtc_mask(&crtc->base); return possible_crtcs; } @@ -9026,6 +8758,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); intel_ddi_init(dev_priv, PORT_D_XELPD); + intel_ddi_init(dev_priv, PORT_TC1); } else if (IS_ALDERLAKE_P(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); @@ -9478,7 +9211,7 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state); intel_update_cdclk(i915); - intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK"); cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; } @@ -9980,8 +9713,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) udelay(150); /* wait for warmup */ } - intel_de_write(dev_priv, PIPECONF(pipe), - PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); + intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE); intel_de_posting_read(dev_priv, PIPECONF(pipe)); intel_wait_for_pipe_scanline_moving(crtc); @@ -9995,18 +9727,15 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) pipe_name(pipe)); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & - DISPLAY_PLANE_ENABLE); + intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & - DISPLAY_PLANE_ENABLE); + intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & - DISPLAY_PLANE_ENABLE); + intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE); + intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE); + intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); intel_de_write(dev_priv, PIPECONF(pipe), 0); intel_de_posting_read(dev_priv, PIPECONF(pipe)); @@ -10156,7 +9885,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) && - !crtc_state->bigjoiner_slave) + !intel_crtc_is_bigjoiner_slave(crtc_state)) intel_crtc_disable_noatomic(crtc, ctx); if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { @@ -10369,12 +10098,18 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) /* read out to slave crtc as well for bigjoiner */ if (crtc_state->bigjoiner) { + struct intel_crtc *slave_crtc; + /* encoder should read be linked to bigjoiner master */ - WARN_ON(crtc_state->bigjoiner_slave); + WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); - crtc = crtc_state->bigjoiner_linked_crtc; - crtc_state = to_intel_crtc_state(crtc->base.state); - intel_encoder_get_config(encoder, crtc_state); + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(crtc_state)) { + struct intel_crtc_state *slave_crtc_state; + + slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state); + intel_encoder_get_config(encoder, slave_crtc_state); + } } } else { encoder->base.crtc = NULL; @@ -10673,6 +10408,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, vlv_wm_sanitize(dev_priv); } else if (DISPLAY_VER(dev_priv) >= 9) { skl_wm_get_hw_state(dev_priv); + skl_wm_sanitize(dev_priv); } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_wm_get_hw_state(dev_priv); } @@ -10688,6 +10424,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, } intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); + + intel_power_domains_sanitize_state(dev_priv); } void intel_display_resume(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index b61b75248ded..11d6134c53c8 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -27,7 +27,8 @@ #include <drm/drm_util.h> -enum link_m_n_set; +#include "i915_reg_defs.h" + enum drm_scaling_filter; struct dpll; struct drm_connector; @@ -317,8 +318,8 @@ enum aux_ch { /* Used by dp and fdi links */ struct intel_link_m_n { u32 tu; - u32 gmch_m; - u32 gmch_n; + u32 data_m; + u32 data_n; u32 link_m; u32 link_n; }; @@ -429,11 +430,11 @@ enum hpd_pin { &(dev)->mode_config.crtc_list, \ base.head) -#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ +#define for_each_intel_crtc_in_pipe_mask(dev, intel_crtc, pipe_mask) \ list_for_each_entry(intel_crtc, \ &(dev)->mode_config.crtc_list, \ base.head) \ - for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base)) + for_each_if((pipe_mask) & BIT(intel_crtc->pipe)) #define for_each_intel_encoder(dev, intel_encoder) \ list_for_each_entry(intel_encoder, \ @@ -554,6 +555,10 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, bool bigjoiner); enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); +bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state); +bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state); +u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state); +struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state); void intel_plane_destroy(struct drm_plane *plane); void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state); @@ -605,18 +610,32 @@ bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); void intel_display_finish_reset(struct drm_i915_private *dev_priv); -void intel_dp_get_m_n(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, - enum link_m_n_set m_n); -void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); +void intel_zero_m_n(struct intel_link_m_n *m_n); +void intel_set_m_n(struct drm_i915_private *i915, + const struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg); +void intel_get_m_n(struct drm_i915_private *i915, + struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg); +bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, + enum transcoder transcoder); +void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, + const struct intel_link_m_n *m_n); +void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, + const struct intel_link_m_n *m_n); +void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, + struct intel_link_m_n *m_n); +void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, + struct intel_link_m_n *m_n); void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); -bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); -void hsw_enable_ips(const struct intel_crtc_state *crtc_state); -void hsw_disable_ips(const struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_port_to_power_domain(enum port port); enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 572445299b04..ffe6822d7414 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -16,6 +16,7 @@ #include "intel_dp_mst.h" #include "intel_drrs.h" #include "intel_fbc.h" +#include "intel_fbdev.h" #include "intel_hdcp.h" #include "intel_hdmi.h" #include "intel_pm.h" @@ -78,7 +79,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) if (DISPLAY_VER(dev_priv) >= 9) /* no global SR status; inspect per-plane WM */; else if (HAS_PCH_SPLIT(dev_priv)) - sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN; + sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE; else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || IS_I945G(dev_priv) || IS_I945GM(dev_priv)) sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; @@ -124,9 +125,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct drm_framebuffer *drm_fb; #ifdef CONFIG_DRM_FBDEV_EMULATION - if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { - fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); - + fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev); + if (fbdev_fb) { seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fbdev_fb->base.width, fbdev_fb->base.height, @@ -474,8 +474,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused) * reg for DC3CO debugging and validation, * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. */ - seq_printf(m, "DC3CO count: %d\n", - intel_de_read(dev_priv, DMC_DEBUG3)); + seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ? + DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3)); } else { dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT : SKL_DMC_DC3_DC5_COUNT; @@ -923,23 +923,23 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) yesno(crtc_state->uapi.active), DRM_MODE_ARG(&crtc_state->uapi.mode)); - if (crtc_state->hw.enable) { - seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", - yesno(crtc_state->hw.active), - DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); + seq_printf(m, "\thw: enable=%s, active=%s\n", + yesno(crtc_state->hw.enable), yesno(crtc_state->hw.active)); + seq_printf(m, "\tadjusted_mode=" DRM_MODE_FMT "\n", + DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); + seq_printf(m, "\tpipe__mode=" DRM_MODE_FMT "\n", + DRM_MODE_ARG(&crtc_state->hw.pipe_mode)); - seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", - crtc_state->pipe_src_w, crtc_state->pipe_src_h, - yesno(crtc_state->dither), crtc_state->pipe_bpp); + seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", + crtc_state->pipe_src_w, crtc_state->pipe_src_h, + yesno(crtc_state->dither), crtc_state->pipe_bpp); - intel_scaler_info(m, crtc); - } + intel_scaler_info(m, crtc); if (crtc_state->bigjoiner) - seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n", - crtc_state->bigjoiner_linked_crtc->base.base.id, - crtc_state->bigjoiner_linked_crtc->base.name, - crtc_state->bigjoiner_slave ? "slave" : "master"); + seq_printf(m, "\tLinked to 0x%x pipes as a %s\n", + crtc_state->bigjoiner_pipes, + intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master"); for_each_intel_encoder_mask(&dev_priv->drm, encoder, crtc_state->uapi.encoder_mask) @@ -1015,6 +1015,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); + seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0); seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", pll->state.hw_state.mg_refclkin_ctl); seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", @@ -2402,6 +2403,9 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector) */ void intel_crtc_debugfs_add(struct drm_crtc *crtc) { - if (crtc->debugfs_entry) - crtc_updates_add(crtc); + if (!crtc->debugfs_entry) + return; + + crtc_updates_add(crtc); + intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc)); } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 05babdcf5f2e..9ebae7ac3235 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -7,6 +7,7 @@ #include "i915_irq.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_crt.h" #include "intel_de.h" #include "intel_display_power.h" @@ -15,6 +16,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_hotplug.h" +#include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pm.h" @@ -682,9 +684,8 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915) int ret, tries = 0; while (1) { - ret = sandybridge_pcode_write_timeout(i915, - ICL_PCODE_EXIT_TCCOLD, - 0, 250, 1); + ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, + 250, 1); if (ret != -EAGAIN || ++tries == 3) break; msleep(1); @@ -4052,8 +4053,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block) * Spec states that we should timeout the request after 200us * but the function below will timeout after 500us */ - ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, - &high_val); + ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); if (ret == 0) { if (block && (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) @@ -5468,8 +5468,7 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) { if (IS_HASWELL(dev_priv)) { - if (sandybridge_pcode_write(dev_priv, - GEN6_PCODE_WRITE_D_COMP, val)) + if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) drm_dbg_kms(&dev_priv->drm, "Failed to write to D_COMP\n"); } else { @@ -5582,7 +5581,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); } /* @@ -6216,6 +6215,37 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915) } /** + * intel_power_domains_sanitize_state - sanitize power domains state + * @i915: i915 device instance + * + * Sanitize the power domains state during driver loading and system resume. + * The function will disable all display power wells that BIOS has enabled + * without a user for it (any user for a power well has taken a reference + * on it by the time this function is called, after the state of all the + * pipe, encoder, etc. HW resources have been sanitized). + */ +void intel_power_domains_sanitize_state(struct drm_i915_private *i915) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_well *power_well; + + mutex_lock(&power_domains->lock); + + for_each_power_well_reverse(i915, power_well) { + if (power_well->desc->always_on || power_well->count || + !power_well->desc->ops->is_enabled(i915, power_well)) + continue; + + drm_dbg_kms(&i915->drm, + "BIOS left unused %s power well enabled, disabling it\n", + power_well->desc->name); + intel_power_well_disable(i915, power_well); + } + + mutex_unlock(&power_domains->lock); +} + +/** * intel_power_domains_enable - enable toggling of display power wells * @i915: i915 device instance * diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 686d18eaa24c..f6d0e6e73c6d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -7,7 +7,6 @@ #define __INTEL_DISPLAY_POWER_H__ #include "intel_runtime_pm.h" -#include "i915_reg.h" enum dpio_channel; enum dpio_phy; @@ -219,6 +218,7 @@ void intel_power_domains_disable(struct drm_i915_private *dev_priv); void intel_power_domains_suspend(struct drm_i915_private *dev_priv, enum i915_drm_suspend_mode); void intel_power_domains_resume(struct drm_i915_private *dev_priv); +void intel_power_domains_sanitize_state(struct drm_i915_private *dev_priv); void intel_display_power_suspend_late(struct drm_i915_private *i915); void intel_display_power_resume_early(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h index 4043e1276383..f05f0f9b5103 100644 --- a/drivers/gpu/drm/i915/display/intel_display_trace.h +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -13,6 +13,7 @@ #include <linux/tracepoint.h> #include "i915_drv.h" +#include "i915_irq.h" #include "intel_crtc.h" #include "intel_display_types.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 41e3dd25a78f..b50d0e6efe21 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -26,7 +26,6 @@ #ifndef __INTEL_DISPLAY_TYPES_H__ #define __INTEL_DISPLAY_TYPES_H__ -#include <linux/async.h> #include <linux/i2c.h> #include <linux/pm_qos.h> #include <linux/pwm.h> @@ -38,7 +37,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_dsc.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> @@ -145,25 +143,6 @@ struct intel_framebuffer { struct i915_address_space *dpt_vm; }; -struct intel_fbdev { - struct drm_fb_helper helper; - struct intel_framebuffer *fb; - struct i915_vma *vma; - unsigned long vma_flags; - async_cookie_t cookie; - int preferred_bpp; - - /* Whether or not fbdev hpd processing is temporarily suspended */ - bool hpd_suspended : 1; - /* Set when a hotplug was received while HPD processing was - * suspended - */ - bool hpd_waiting : 1; - - /* Protects hpd_suspended */ - struct mutex hpd_lock; -}; - enum intel_hotplug_state { INTEL_HOTPLUG_UNCHANGED, INTEL_HOTPLUG_CHANGED, @@ -634,6 +613,9 @@ struct intel_plane_state { struct intel_fb_view view; + /* Indicates if async flip is required */ + bool do_async_flip; + /* Plane pxp decryption state */ bool decrypt; @@ -1165,6 +1147,7 @@ struct intel_crtc_state { /* bitmask of actually visible planes (enum plane_id) */ u8 active_planes; + u8 scaled_planes; u8 nv12_planes; u8 c8_planes; @@ -1199,11 +1182,8 @@ struct intel_crtc_state { /* enable pipe big joiner? */ bool bigjoiner; - /* big joiner slave crtc? */ - bool bigjoiner_slave; - - /* linked crtc for bigjoiner, either slave or master */ - struct intel_crtc *bigjoiner_linked_crtc; + /* big joiner pipe bitmask */ + u8 bigjoiner_pipes; /* Display Stream compression state */ struct { @@ -1442,25 +1422,6 @@ struct intel_hdmi { }; struct intel_dp_mst_encoder; -/* - * enum link_m_n_set: - * When platform provides two set of M_N registers for dp, we can - * program them and switch between them incase of DRRS. - * But When only one such register is provided, we have to program the - * required divider value on that registers itself based on the DRRS state. - * - * M1_N1 : Program dp_m_n on M1_N1 registers - * dp_m2_n2 on M2_N2 registers (If supported) - * - * M2_N2 : Program dp_m2_n2 on M1_N1 registers - * M2_N2 registers are not supported - */ - -enum link_m_n_set { - /* Sets the m1_n1 and m2_n2 */ - M1_N1 = 0, - M2_N2 -}; struct intel_dp_compliance_data { unsigned long edid; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index a69b28d65a9b..7616a3906b9e 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -43,9 +43,9 @@ __stringify(major) "_" \ __stringify(minor) ".bin" -#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 -#define GEN13_DMC_MAX_FW_SIZE 0x20000 +#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 14) #define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 14) @@ -684,23 +684,23 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) if (IS_ALDERLAKE_P(dev_priv)) { dmc->fw_path = ADLP_DMC_PATH; dmc->required_version = ADLP_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN13_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_S(dev_priv)) { dmc->fw_path = ADLS_DMC_PATH; dmc->required_version = ADLS_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (IS_DG1(dev_priv)) { dmc->fw_path = DG1_DMC_PATH; dmc->required_version = DG1_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (IS_ROCKETLAKE(dev_priv)) { dmc->fw_path = RKL_DMC_PATH; dmc->required_version = RKL_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (DISPLAY_VER(dev_priv) >= 12) { dmc->fw_path = TGL_DMC_PATH; dmc->required_version = TGL_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (DISPLAY_VER(dev_priv) == 11) { dmc->fw_path = ICL_DMC_PATH; dmc->required_version = ICL_DMC_VERSION_REQUIRED; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index b20f3441ca60..7c590309a3a9 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -6,7 +6,7 @@ #ifndef __INTEL_DMC_H__ #define __INTEL_DMC_H__ -#include "i915_reg.h" +#include "i915_reg_defs.h" #include "intel_wakeref.h" #include <linux/workqueue.h> diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 36ab58c25b64..1046e7fe310a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -46,6 +46,7 @@ #include "intel_atomic.h" #include "intel_audio.h" #include "intel_backlight.h" +#include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" @@ -72,8 +73,6 @@ #include "intel_vdsc.h" #include "intel_vrr.h" -#define DP_DPRX_ESI_LEN 14 - /* DP DSC throughput values used for slice count calculations KPixels/s */ #define DP_DSC_PEAK_PIXEL_RATE 2720000 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 @@ -705,7 +704,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, i915->max_cdclk_freq * 48 / intel_dp_mode_to_fec_clock(mode_clock); - DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); + drm_dbg_kms(&i915->drm, "Max big joiner bpp: %u\n", max_bpp_bigjoiner); bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); } @@ -887,9 +886,8 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, return MODE_CLOCK_HIGH; /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ - tmds_clock = target_clock; - if (drm_mode_is_420_only(info, mode)) - tmds_clock /= 2; + tmds_clock = intel_hdmi_tmds_clock(target_clock, 8, + drm_mode_is_420_only(info, mode)); if (intel_dp->dfp.min_tmds_clock && tmds_clock < intel_dp->dfp.min_tmds_clock) @@ -1140,21 +1138,12 @@ static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, intel_dp->dfp.ycbcr_444_to_420); } -static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, int bpc) -{ - int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; - - if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) - clock /= 2; - - return clock; -} - static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, int bpc) { - int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); + int clock = crtc_state->hw.adjusted_mode.crtc_clock; + int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, + intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)); if (intel_dp->dfp.min_tmds_clock && tmds_clock < intel_dp->dfp.min_tmds_clock) @@ -1167,14 +1156,13 @@ static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, return true; } -static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - int bpc) +static bool intel_dp_hdmi_bpc_possible(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int bpc) { - return intel_hdmi_deep_color_possible(crtc_state, bpc, - intel_dp->has_hdmi_sink, - intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && + return intel_hdmi_bpc_possible(crtc_state, bpc, intel_dp->has_hdmi_sink, + intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); } @@ -1192,7 +1180,7 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp, if (intel_dp->dfp.min_tmds_clock) { for (; bpc >= 10; bpc -= 2) { - if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) + if (intel_dp_hdmi_bpc_possible(intel_dp, crtc_state, bpc)) break; } } @@ -1897,7 +1885,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* FIXME: abstract this better */ if (pipe_config->splitter.enable) - pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; + pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; if (!HAS_DDI(dev_priv)) g4x_dp_set_clock(encoder, pipe_config); @@ -2813,11 +2801,22 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) } static bool -intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) +intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) +{ + return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; +} + +static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) { - return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, - sink_irq_vector, DP_DPRX_ESI_LEN) == - DP_DPRX_ESI_LEN; + int retry; + + for (retry = 0; retry < 3; retry++) { + if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, + &esi[1], 3) == 3) + return true; + } + + return false; } bool @@ -2909,7 +2908,8 @@ out: } static ssize_t -intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, +intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, + const struct hdmi_drm_infoframe *drm_infoframe, struct dp_sdp *sdp, size_t size) { @@ -2925,12 +2925,12 @@ intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_in len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); if (len < 0) { - DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); + drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n"); return -ENOSPC; } if (len != infoframe_size) { - DRM_DEBUG_KMS("wrong static hdr metadata size\n"); + drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); return -ENOSPC; } @@ -3003,7 +3003,8 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, sizeof(sdp)); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: - len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, + len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, + &crtc_state->infoframes.drm.drm, &sdp, sizeof(sdp)); break; default: @@ -3411,22 +3412,22 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, switch (data->phy_pattern) { case DP_PHY_TEST_PATTERN_NONE: - DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); break; case DP_PHY_TEST_PATTERN_D10_2: - DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); break; case DP_PHY_TEST_PATTERN_ERROR_COUNT: - DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_SCRAMBLED_0); break; case DP_PHY_TEST_PATTERN_PRBS7: - DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); break; @@ -3436,7 +3437,8 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, * current firmware of DPR-100 could not set it, so hardcoding * now for complaince test. */ - DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); + drm_dbg_kms(&dev_priv->drm, + "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); pattern_val = 0x3e0f83e0; intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); pattern_val = 0x0f83e0f8; @@ -3453,7 +3455,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, * current firmware of DPR-100 could not set it, so hardcoding * now for complaince test. */ - DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); pattern_val = 0xFB; intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | @@ -3522,13 +3524,14 @@ intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, static void intel_dp_process_phy_request(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; u8 link_status[DP_LINK_STATUS_SIZE]; if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, link_status) < 0) { - DRM_DEBUG_KMS("failed to get link status\n"); + drm_dbg_kms(&i915->drm, "failed to get link status\n"); return; } @@ -3553,11 +3556,12 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { - DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); + drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); return DP_TEST_NAK; } @@ -3614,15 +3618,63 @@ update_status: "Could not write test response to sink\n"); } +static bool intel_dp_link_ok(struct intel_dp *intel_dp, + u8 link_status[DP_LINK_STATUS_SIZE]) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + bool uhbr = intel_dp->link_rate >= 1000000; + bool ok; + + if (uhbr) + ok = drm_dp_128b132b_lane_channel_eq_done(link_status, + intel_dp->lane_count); + else + ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); + + if (ok) + return true; + + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] %s link not ok, retraining\n", + encoder->base.base.id, encoder->base.name, + uhbr ? "128b/132b" : "8b/10b"); + + return false; +} + static void -intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) +intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) { - drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); + bool handled = false; - if (esi[1] & DP_CP_IRQ) { - intel_hdcp_handle_cp_irq(intel_dp->attached_connector); - *handled = true; - } + drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); + if (handled) + ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); + + if (esi[1] & DP_CP_IRQ) { + intel_hdcp_handle_cp_irq(intel_dp->attached_connector); + ack[1] |= DP_CP_IRQ; + } +} + +static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u8 link_status[DP_LINK_STATUS_SIZE] = {}; + const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status, + esi_link_status_size) != esi_link_status_size) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to read link status\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + return intel_dp_link_ok(intel_dp, link_status); } /** @@ -3647,20 +3699,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); for (;;) { - /* - * The +2 is because DP_DPRX_ESI_LEN is 14, but we then - * pass in "esi+10" to drm_dp_channel_eq_ok(), which - * takes a 6-byte array. So we actually need 16 bytes - * here. - * - * Somebody who knows what the limits actually are - * should check this, but for now this is at least - * harmless and avoids a valid compiler warning about - * using more of the array than we have allocated. - */ - u8 esi[DP_DPRX_ESI_LEN+2] = {}; - bool handled; - int retry; + u8 esi[4] = {}; + u8 ack[4] = {}; if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { drm_dbg_kms(&i915->drm, @@ -3670,30 +3710,22 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) break; } - /* check link status - esi[10] = 0x200c */ + drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); + if (intel_dp->active_mst_links > 0 && link_ok && - !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { - drm_dbg_kms(&i915->drm, - "channel EQ not ok, retraining\n"); - link_ok = false; + esi[3] & LINK_STATUS_CHANGED) { + if (!intel_dp_mst_link_status(intel_dp)) + link_ok = false; + ack[3] |= LINK_STATUS_CHANGED; } - drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); + intel_dp_mst_hpd_irq(intel_dp, esi, ack); - intel_dp_mst_hpd_irq(intel_dp, esi, &handled); - - if (!handled) + if (!memchr_inv(ack, 0, sizeof(ack))) break; - for (retry = 0; retry < 3; retry++) { - int wret; - - wret = drm_dp_dpcd_write(&intel_dp->aux, - DP_SINK_COUNT_ESI+1, - &esi[1], 3); - if (wret == 3) - break; - } + if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) + drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); } return link_ok; @@ -3756,8 +3788,8 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) intel_dp->lane_count)) return false; - /* Retrain if Channel EQ or CR not ok */ - return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); + /* Retrain if link not ok */ + return !intel_dp_link_ok(intel_dp, link_status); } static bool intel_dp_has_connector(struct intel_dp *intel_dp, @@ -3787,14 +3819,14 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp, static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, - u32 *crtc_mask) + u8 *pipe_mask) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; int ret = 0; - *crtc_mask = 0; + *pipe_mask = 0; if (!intel_dp_needs_link_retrain(intel_dp)) return 0; @@ -3828,12 +3860,12 @@ static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, !try_wait_for_completion(&conn_state->commit->hw_done)) continue; - *crtc_mask |= drm_crtc_mask(&crtc->base); + *pipe_mask |= BIT(crtc->pipe); } drm_connector_list_iter_end(&conn_iter); if (!intel_dp_needs_link_retrain(intel_dp)) - *crtc_mask = 0; + *pipe_mask = 0; return ret; } @@ -3852,7 +3884,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc; - u32 crtc_mask; + u8 pipe_mask; int ret; if (!intel_dp_is_connected(intel_dp)) @@ -3863,17 +3895,17 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, if (ret) return ret; - ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); + ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask); if (ret) return ret; - if (crtc_mask == 0) + if (pipe_mask == 0) return 0; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", encoder->base.base.id, encoder->base.name); - for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); @@ -3884,7 +3916,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, intel_crtc_pch_transcoder(crtc), false); } - for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); @@ -3901,7 +3933,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, break; } - for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); @@ -3919,14 +3951,14 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, - u32 *crtc_mask) + u8 *pipe_mask) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; int ret = 0; - *crtc_mask = 0; + *pipe_mask = 0; drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { @@ -3957,7 +3989,7 @@ static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, !try_wait_for_completion(&conn_state->commit->hw_done)) continue; - *crtc_mask |= drm_crtc_mask(&crtc->base); + *pipe_mask |= BIT(crtc->pipe); } drm_connector_list_iter_end(&conn_iter); @@ -3970,7 +4002,7 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc; - u32 crtc_mask; + u8 pipe_mask; int ret; ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, @@ -3978,17 +4010,17 @@ static int intel_dp_do_phy_test(struct intel_encoder *encoder, if (ret) return ret; - ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); + ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); if (ret) return ret; - if (crtc_mask == 0) + if (pipe_mask == 0) return 0; drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", encoder->base.base.id, encoder->base.name); - for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); @@ -4974,6 +5006,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, mutex_lock(&dev->mode_config.mutex); edid = drm_get_edid(connector, &intel_dp->aux.ddc); + if (!edid) { + /* Fallback to EDID from ACPI OpRegion, if any */ + edid = intel_opregion_get_edid(intel_connector); + if (edid) + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] Using OpRegion EDID\n", + connector->base.id, connector->name); + } if (edid) { if (drm_add_edid_modes(connector, edid)) { drm_connector_update_edid_property(connector, edid); @@ -5048,8 +5088,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) intel_connector = container_of(work, typeof(*intel_connector), modeset_retry_work); connector = &intel_connector->base; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, - connector->name); + drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id, + connector->name); /* Grab the locks before changing connector property*/ mutex_lock(&connector->dev->mode_config.mutex); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index b64145a3869a..d457e17bdc57 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -8,8 +8,6 @@ #include <linux/types.h> -#include "i915_reg.h" - enum intel_output_format; enum pipe; enum port; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 5fbb767fcd63..2bc119374555 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -10,7 +10,7 @@ #include "intel_pps.h" #include "intel_tc.h" -u32 intel_dp_pack_aux(const u8 *src, int src_bytes) +static u32 intel_dp_aux_pack(const u8 *src, int src_bytes) { int i; u32 v = 0; @@ -22,7 +22,7 @@ u32 intel_dp_pack_aux(const u8 *src, int src_bytes) return v; } -static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) +static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes) { int i; @@ -267,7 +267,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (i = 0; i < send_bytes; i += 4) intel_uncore_write(uncore, ch_data[i >> 2], - intel_dp_pack_aux(send + i, + intel_dp_aux_pack(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ @@ -352,7 +352,7 @@ done: recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) - intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), + intel_dp_aux_unpack(intel_uncore_read(uncore, ch_data[i >> 2]), recv + i, recv_bytes - i); ret = recv_bytes; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 4afbe76217b9..738577537bc7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -6,12 +6,8 @@ #ifndef __INTEL_DP_AUX_H__ #define __INTEL_DP_AUX_H__ -#include <linux/types.h> - struct intel_dp; -u32 intel_dp_pack_aux(const u8 *src, int src_bytes); - void intel_dp_aux_fini(struct intel_dp *intel_dp); void intel_dp_aux_init(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 9451f336f28f..5d98773efd1b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -712,7 +712,7 @@ static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_ return false; } -static void +void intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]) { @@ -996,6 +996,23 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1; } +static int +intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 sink_status; + int ret; + + ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status); + if (ret != 1) { + drm_dbg_kms(&i915->drm, "Failed to read sink status\n"); + return ret < 0 ? ret : -EIO; + } + + return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0; +} + /** * intel_dp_stop_link_train - stop link training * @intel_dp: DP struct @@ -1015,11 +1032,21 @@ static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, void intel_dp_stop_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + intel_dp->link_trained = true; intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_DISABLE); + + if (intel_dp_is_uhbr(crtc_state) && + wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] 128b/132b intra-hop not clearing\n", + encoder->base.base.id, encoder->base.name); + } } static bool @@ -1083,8 +1110,6 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp, bool ret = true; int i; - intel_dp_prepare_link_train(intel_dp, crtc_state); - for (i = lttpr_count - 1; i >= 0; i--) { enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); @@ -1104,6 +1129,272 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp, return ret; } +/* + * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1) + */ +static bool +intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u8 link_status[DP_LINK_STATUS_SIZE]; + int delay_us; + int try, max_tries = 20; + unsigned long deadline; + bool timeout = false; + + /* + * Reset signal levels. Start transmitting 128b/132b TPS1. + * + * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1 + * in DP_TRAINING_PATTERN_SET. + */ + if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX, + DP_TRAINING_PATTERN_1)) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to start 128b/132b TPS1\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); + + /* Read the initial TX FFE settings. */ + if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to read TX FFE presets\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + /* Update signal levels and training set as requested. */ + intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); + if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to set initial TX FFE settings\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + /* Start transmitting 128b/132b TPS2. */ + if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX, + DP_TRAINING_PATTERN_2)) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to start 128b/132b TPS2\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + /* Time budget for the LANEx_EQ_DONE Sequence */ + deadline = jiffies + msecs_to_jiffies_timeout(400); + + for (try = 0; try < max_tries; try++) { + usleep_range(delay_us, 2 * delay_us); + + /* + * The delay may get updated. The transmitter shall read the + * delay before link status during link training. + */ + delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); + + if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to read link status\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (drm_dp_128b132b_link_training_failed(link_status)) { + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_err(&i915->drm, + "[ENCODER:%d:%s] Downstream link training failure\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) { + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] Lane channel eq done\n", + encoder->base.base.id, encoder->base.name); + break; + } + + if (timeout) { + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_err(&i915->drm, + "[ENCODER:%d:%s] Lane channel eq timeout\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (time_after(jiffies, deadline)) + timeout = true; /* try one last time after deadline */ + + /* Update signal levels and training set as requested. */ + intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); + if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to update TX FFE settings\n", + encoder->base.base.id, encoder->base.name); + return false; + } + } + + if (try == max_tries) { + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_err(&i915->drm, + "[ENCODER:%d:%s] Max loop count reached\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + for (;;) { + if (time_after(jiffies, deadline)) + timeout = true; /* try one last time after deadline */ + + if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to read link status\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (drm_dp_128b132b_link_training_failed(link_status)) { + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_err(&i915->drm, + "[ENCODER:%d:%s] Downstream link training failure\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (drm_dp_128b132b_eq_interlane_align_done(link_status)) { + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] Interlane align done\n", + encoder->base.base.id, encoder->base.name); + break; + } + + if (timeout) { + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_err(&i915->drm, + "[ENCODER:%d:%s] Interlane align timeout\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + usleep_range(2000, 3000); + } + + return true; +} + +/* + * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2) + */ +static bool +intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int lttpr_count) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u8 link_status[DP_LINK_STATUS_SIZE]; + unsigned long deadline; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_2_CDS) != 1) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to start 128b/132b TPS2 CDS\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + /* Time budget for the LANEx_CDS_DONE Sequence */ + deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20); + + for (;;) { + bool timeout = false; + + if (time_after(jiffies, deadline)) + timeout = true; /* try one last time after deadline */ + + usleep_range(2000, 3000); + + if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to read link status\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (drm_dp_128b132b_eq_interlane_align_done(link_status) && + drm_dp_128b132b_cds_interlane_align_done(link_status) && + drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) { + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] CDS interlane align done\n", + encoder->base.base.id, encoder->base.name); + break; + } + + if (drm_dp_128b132b_link_training_failed(link_status)) { + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_err(&i915->drm, + "[ENCODER:%d:%s] Downstream link training failure\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (timeout) { + intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); + drm_err(&i915->drm, + "[ENCODER:%d:%s] CDS timeout\n", + encoder->base.base.id, encoder->base.name); + return false; + } + } + + /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */ + if (intel_dp->set_idle_link_train) + intel_dp->set_idle_link_train(intel_dp, crtc_state); + + return true; +} + +/* + * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.) + */ +static bool +intel_dp_128b132b_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int lttpr_count) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + bool passed = false; + + if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] 128b/132b intra-hop not clear\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) && + intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count)) + passed = true; + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] 128b/132b Link Training %s at link rate = %d, lane count = %d\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + passed ? "passed" : "failed", + crtc_state->port_clock, crtc_state->lane_count); + + return passed; +} + /** * intel_dp_start_link_train - start link training * @intel_dp: DP struct @@ -1117,6 +1408,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp, void intel_dp_start_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + bool passed; /* * TODO: Reiniting LTTPRs here won't be needed once proper connector * HW state readout is added. @@ -1127,6 +1419,13 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, /* Still continue with enabling the port and link training. */ lttpr_count = 0; - if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count)) + intel_dp_prepare_link_train(intel_dp, crtc_state); + + if (intel_dp_is_uhbr(crtc_state)) + passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count); + else + passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count); + + if (!passed) intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index dbfb15705aaa..dc1556b46b85 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -29,6 +29,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, void intel_dp_stop_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); +void +intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, + const u8 link_status[DP_LINK_STATUS_SIZE]); + /* Get the TPSx symbol type of the value programmed to DP_TRAINING_PATTERN_SET */ static inline u8 intel_dp_training_pattern_symbol(u8 pattern) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index b8bc7d397c81..e30e698aa684 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -99,6 +99,29 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, return 0; } +static int intel_dp_mst_update_slots(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr; + struct drm_dp_mst_topology_state *topology_state; + u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ? + DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B; + + topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr); + if (IS_ERR(topology_state)) { + drm_dbg_kms(&i915->drm, "slot update failed\n"); + return PTR_ERR(topology_state); + } + + drm_dp_mst_update_slots(topology_state, link_coding_cap); + + return 0; +} + static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -155,6 +178,10 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, if (ret) return ret; + ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state); + if (ret) + return ret; + pipe_config->limited_color_range = intel_dp_limited_color_range(pipe_config, conn_state); @@ -357,6 +384,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); + int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1; int ret; drm_dbg_kms(&i915->drm, "active links %d\n", @@ -366,7 +394,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot); if (ret) { drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); } @@ -475,6 +503,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector = to_intel_connector(conn_state->connector); + int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1; int ret; bool first_mst_stream; @@ -509,7 +538,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_dp->active_mst_links++; - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot); /* * Before Gen 12 this is not done as part of @@ -522,8 +551,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_pipe_clock(encoder, pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); - - intel_dp_set_m_n(pipe_config, M1_N1); } static void intel_mst_enable_dp(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 1ce0c171f4fb..14f5ffe27d05 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -16,6 +16,10 @@ #include "intel_snps_phy.h" #include "vlv_sideband.h" +struct intel_dpll_funcs { + int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state); +}; + struct intel_limit { struct { int min, max; @@ -1400,6 +1404,14 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = { .crtc_compute_clock = i8xx_crtc_compute_clock, }; +int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + return i915->dpll_funcs->crtc_compute_clock(crtc_state); +} + void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index 1af0ac43cca4..69b06a9e473e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -15,6 +15,7 @@ struct intel_crtc_state; enum pipe; void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); +int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state); int vlv_calc_dpll_params(int refclk, struct dpll *clock); int pnv_calc_dpll_params(int refclk, struct dpll *clock); int i9xx_calc_dpll_params(int refclk, struct dpll *clock); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index fc8fda77483a..569903d47aea 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -28,6 +28,7 @@ #include "intel_dpll_mgr.h" #include "intel_pch_refclk.h" #include "intel_tc.h" +#include "intel_tc_phy_regs.h" /** * DOC: Display PLLs @@ -49,6 +50,41 @@ * commit phase. */ +/* platform specific hooks for managing DPLLs */ +struct intel_shared_dpll_funcs { + /* + * Hook for enabling the pll, called from intel_enable_shared_dpll() if + * the pll is not already enabled. + */ + void (*enable)(struct drm_i915_private *i915, + struct intel_shared_dpll *pll); + + /* + * Hook for disabling the pll, called from intel_disable_shared_dpll() + * only when it is safe to disable the pll, i.e., there are no more + * tracked users for it. + */ + void (*disable)(struct drm_i915_private *i915, + struct intel_shared_dpll *pll); + + /* + * Hook for reading the values currently programmed to the DPLL + * registers. This is used for initial hw state readout and state + * verification after a mode set. + */ + bool (*get_hw_state)(struct drm_i915_private *i915, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state); + + /* + * Hook for calculating the pll's output frequency based on its passed + * in state. + */ + int (*get_freq)(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state); +}; + struct intel_dpll_mgr { const struct dpll_info *dpll_info; @@ -2712,6 +2748,9 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; else pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; + + if (i915->vbt.override_afc_startup) + pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val); } static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, @@ -2913,6 +2952,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | DKL_PLL_DIV0_FBPREDIV(m1div) | DKL_PLL_DIV0_FBDIV_INT(m2div_int); + if (dev_priv->vbt.override_afc_startup) { + u8 val = dev_priv->vbt.override_afc_startup_val; + + pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val); + } pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) | DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt); @@ -3412,10 +3456,10 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); - hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK | - DKL_PLL_DIV0_PROP_COEFF_MASK | - DKL_PLL_DIV0_FBPREDIV_MASK | - DKL_PLL_DIV0_FBDIV_INT_MASK); + val = DKL_PLL_DIV0_MASK; + if (dev_priv->vbt.override_afc_startup) + val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; + hw_state->mg_pll_div0 &= val; hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | @@ -3477,6 +3521,10 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, TGL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, TGL_DPLL_CFGCR1(id)); + if (dev_priv->vbt.override_afc_startup) { + hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id)); + hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK; + } } else { if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) { hw_state->cfgcr0 = intel_de_read(dev_priv, @@ -3518,7 +3566,7 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; const enum intel_dpll_id id = pll->info->id; - i915_reg_t cfgcr0_reg, cfgcr1_reg; + i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG; if (IS_ALDERLAKE_S(dev_priv)) { cfgcr0_reg = ADLS_DPLL_CFGCR0(id); @@ -3532,6 +3580,7 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, } else if (DISPLAY_VER(dev_priv) >= 12) { cfgcr0_reg = TGL_DPLL_CFGCR0(id); cfgcr1_reg = TGL_DPLL_CFGCR1(id); + div0_reg = TGL_DPLL0_DIV0(id); } else { if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) { cfgcr0_reg = ICL_DPLL_CFGCR0(4); @@ -3544,6 +3593,12 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0); intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1); + drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup && + !i915_mmio_reg_valid(div0_reg)); + if (dev_priv->vbt.override_afc_startup && + i915_mmio_reg_valid(div0_reg)) + intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, + hw_state->div0); intel_de_posting_read(dev_priv, cfgcr1_reg); } @@ -3631,13 +3686,11 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, val |= hw_state->mg_clktop2_hsclkctl; intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); - val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); - val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK | - DKL_PLL_DIV0_PROP_COEFF_MASK | - DKL_PLL_DIV0_FBPREDIV_MASK | - DKL_PLL_DIV0_FBDIV_INT_MASK); - val |= hw_state->mg_pll_div0; - intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val); + val = DKL_PLL_DIV0_MASK; + if (dev_priv->vbt.override_afc_startup) + val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; + intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, + hw_state->mg_pll_div0); val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | @@ -3876,13 +3929,14 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { drm_dbg_kms(&dev_priv->drm, - "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, " + "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, " "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, " "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, " "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n", hw_state->cfgcr0, hw_state->cfgcr1, + hw_state->div0, hw_state->mg_refclkin_ctl, hw_state->mg_clktop2_coreclkctl1, hw_state->mg_clktop2_hsclkctl, diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index ef2889753807..ba2fdfce1579 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -44,6 +44,7 @@ struct intel_crtc; struct intel_crtc_state; struct intel_encoder; struct intel_shared_dpll; +struct intel_shared_dpll_funcs; /** * enum intel_dpll_id - possible DPLL ids @@ -207,6 +208,9 @@ struct intel_dpll_hw_state { /* icl */ u32 cfgcr0; + /* tgl */ + u32 div0; + /* bxt */ u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12; @@ -252,51 +256,6 @@ struct intel_shared_dpll_state { }; /** - * struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs - */ -struct intel_shared_dpll_funcs { - /** - * @enable: - * - * Hook for enabling the pll, called from intel_enable_shared_dpll() - * if the pll is not already enabled. - */ - void (*enable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - - /** - * @disable: - * - * Hook for disabling the pll, called from intel_disable_shared_dpll() - * only when it is safe to disable the pll, i.e., there are no more - * tracked users for it. - */ - void (*disable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - - /** - * @get_hw_state: - * - * Hook for reading the values currently programmed to the DPLL - * registers. This is used for initial hw state readout and state - * verification after a mode set. - */ - bool (*get_hw_state)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state); - - /** - * @get_freq: - * - * Hook for calculating the pll's output frequency based on its - * passed in state. - */ - int (*get_freq)(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state); -}; - -/** * struct dpll_info - display PLL platform specific info */ struct dpll_info { diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index c2f8f853db90..05dd7dba3a5c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -3,11 +3,13 @@ * Copyright © 2021 Intel Corporation */ +#include "gem/i915_gem_domain.h" +#include "gt/gen8_ppgtt.h" + #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" -#include "gt/gen8_ppgtt.h" struct i915_dpt { struct i915_address_space vm; diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index c1439fcb5a95..fa715b8ea310 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -47,17 +47,13 @@ * requested by userspace. */ -void -intel_drrs_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - int output_bpp, bool constant_n) +static bool can_enable_drrs(struct intel_connector *connector, + const struct intel_crtc_state *pipe_config) { - struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - int pixel_clock; + const struct drm_i915_private *i915 = to_i915(connector->base.dev); if (pipe_config->vrr.enable) - return; + return false; /* * DRRS and PSR can't be enable together, so giving preference to PSR @@ -66,15 +62,30 @@ intel_drrs_compute_config(struct intel_dp *intel_dp, * after intel_psr_compute_config(). */ if (pipe_config->has_psr) - return; + return false; - if (!intel_connector->panel.downclock_mode || - dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return connector->panel.downclock_mode && + i915->drrs.type == SEAMLESS_DRRS_SUPPORT; +} + +void +intel_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n) +{ + struct intel_connector *connector = intel_dp->attached_connector; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + int pixel_clock; + + if (!can_enable_drrs(connector, pipe_config)) { + if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) + intel_zero_m_n(&pipe_config->dp_m2_n2); return; + } pipe_config->has_drrs = true; - pixel_clock = intel_connector->panel.downclock_mode->clock; + pixel_clock = connector->panel.downclock_mode->clock; if (pipe_config->splitter.enable) pixel_clock /= pipe_config->splitter.link_count; @@ -84,7 +95,42 @@ intel_drrs_compute_config(struct intel_dp *intel_dp, /* FIXME: abstract this better */ if (pipe_config->splitter.enable) - pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; + pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; +} + +static void +intel_drrs_set_refresh_rate_pipeconf(const struct intel_crtc_state *crtc_state, + enum drrs_refresh_rate_type refresh_type) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val, bit; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + bit = PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + bit = PIPECONF_EDP_RR_MODE_SWITCH; + + val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); + + if (refresh_type == DRRS_LOW_RR) + val |= bit; + else + val &= ~bit; + + intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); +} + +static void +intel_drrs_set_refresh_rate_m_n(const struct intel_crtc_state *crtc_state, + enum drrs_refresh_rate_type refresh_type) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + intel_cpu_transcoder_set_m1_n1(crtc, crtc_state->cpu_transcoder, + refresh_type == DRRS_LOW_RR ? + &crtc_state->dp_m2_n2 : &crtc_state->dp_m_n); } static void intel_drrs_set_state(struct drm_i915_private *dev_priv, @@ -120,37 +166,10 @@ static void intel_drrs_set_state(struct drm_i915_private *dev_priv, return; } - if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { - switch (refresh_type) { - case DRRS_HIGH_RR: - intel_dp_set_m_n(crtc_state, M1_N1); - break; - case DRRS_LOW_RR: - intel_dp_set_m_n(crtc_state, M2_N2); - break; - case DRRS_MAX_RR: - default: - drm_err(&dev_priv->drm, - "Unsupported refreshrate type\n"); - } - } else if (DISPLAY_VER(dev_priv) > 6) { - i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); - u32 val; - - val = intel_de_read(dev_priv, reg); - if (refresh_type == DRRS_LOW_RR) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val |= PIPECONF_EDP_RR_MODE_SWITCH; - } else { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val &= ~PIPECONF_EDP_RR_MODE_SWITCH; - } - intel_de_write(dev_priv, reg, val); - } + if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) + intel_drrs_set_refresh_rate_m_n(crtc_state, refresh_type); + else if (DISPLAY_VER(dev_priv) > 6) + intel_drrs_set_refresh_rate_pipeconf(crtc_state, refresh_type); dev_priv->drrs.refresh_rate_type = refresh_type; @@ -405,6 +424,7 @@ intel_drrs_init(struct intel_connector *connector, struct drm_display_mode *fixed_mode) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_encoder *encoder = connector->encoder; struct drm_display_mode *downclock_mode = NULL; INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work); @@ -416,6 +436,13 @@ intel_drrs_init(struct intel_connector *connector, return NULL; } + if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) && + encoder->port != PORT_A) { + drm_dbg_kms(&dev_priv->drm, + "DRRS only supported on eDP port A\n"); + return NULL; + } + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); return NULL; diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 83a69a4a4fea..b34a67309976 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -4,6 +4,8 @@ * */ +#include "gem/i915_gem_internal.h" + #include "i915_drv.h" #include "intel_de.h" #include "intel_display_types.h" diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 654a11f24b80..6cb9c580cdca 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -8,7 +8,7 @@ #include <linux/types.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" struct intel_crtc_state; struct i915_vma; diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index a3a906cb097e..eafef0a87fea 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -79,8 +79,8 @@ struct intel_dsi { */ enum mipi_dsi_pixel_format pixel_format; - /* video mode format for MIPI_VIDEO_MODE_FORMAT register */ - u32 video_mode_format; + /* NON_BURST_SYNC_PULSE, NON_BURST_SYNC_EVENTS, or BURST_MODE */ + int video_mode; /* eot for MIPI_EOT_DISABLE register */ u8 eotp_pkt; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 0da91849efde..6b4a27372c82 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -39,10 +39,12 @@ #include <video/mipi_display.h> #include "i915_drv.h" +#include "i915_reg.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" #include "vlv_dsi.h" +#include "vlv_dsi_regs.h" #include "vlv_sideband.h" #define MIPI_TRANSFER_MODE_SHIFT 0 @@ -426,24 +428,16 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, const u16 slave_addr) { struct drm_device *drm_dev = intel_dsi->base.base.dev; - struct device *dev = drm_dev->dev; - struct acpi_device *acpi_dev; - struct list_head resource_list; - struct i2c_adapter_lookup lookup; - - acpi_dev = ACPI_COMPANION(dev); - if (acpi_dev) { - memset(&lookup, 0, sizeof(lookup)); - lookup.slave_addr = slave_addr; - lookup.intel_dsi = intel_dsi; - lookup.dev_handle = acpi_device_handle(acpi_dev); - - INIT_LIST_HEAD(&resource_list); - acpi_dev_get_resources(acpi_dev, &resource_list, - i2c_adapter_lookup, - &lookup); - acpi_dev_free_resource_list(&resource_list); - } + struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev); + struct i2c_adapter_lookup lookup = { + .slave_addr = slave_addr, + .intel_dsi = intel_dsi, + .dev_handle = acpi_device_handle(adev), + }; + LIST_HEAD(resource_list); + + acpi_dev_get_resources(adev, &resource_list, i2c_adapter_lookup, &lookup); + acpi_dev_free_resource_list(&resource_list); } #else static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, @@ -682,11 +676,11 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi) drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count); drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg); drm_dbg_kms(&i915->drm, "Video mode format %s\n", - intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ? + intel_dsi->video_mode == NON_BURST_SYNC_PULSE ? "non-burst with sync pulse" : - intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ? + intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ? "non-burst with sync events" : - intel_dsi->video_mode_format == VIDEO_MODE_BURST ? + intel_dsi->video_mode == BURST_MODE ? "burst" : "<unknown>"); drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio); @@ -746,7 +740,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->dual_link = mipi_config->dual_link; intel_dsi->pixel_overlap = mipi_config->pixel_overlap; intel_dsi->operation_mode = mipi_config->is_cmd_mode; - intel_dsi->video_mode_format = mipi_config->video_transfer_mode; + intel_dsi->video_mode = mipi_config->video_transfer_mode; intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout; intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout; @@ -777,7 +771,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) * Target ddr frequency from VBT / non burst ddr freq * multiply by 100 to preserve remainder */ - if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { + if (intel_dsi->video_mode == BURST_MODE) { if (mipi_config->target_burst_mode_freq) { u32 bitrate = intel_dsi_bitrate(intel_dsi); diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h index 94a6ae1e0292..d96c3cc46e50 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h +++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h @@ -27,7 +27,7 @@ #include <drm/drm_crtc.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" struct intel_dvo_device { const char *name; diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index c4b3d76341f3..a307b4993bcf 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -7,6 +7,7 @@ * DOC: display pinning helpers */ +#include "gem/i915_gem_domain.h" #include "gem/i915_gem_object.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index c0a973eeb405..87f4af3fd523 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -49,6 +49,14 @@ #include "intel_fbc.h" #include "intel_frontbuffer.h" +#define for_each_fbc_id(__dev_priv, __fbc_id) \ + for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ + for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id)) + +#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ + for_each_fbc_id((__dev_priv), (__fbc_id)) \ + for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)]) + struct intel_fbc_funcs { void (*activate)(struct intel_fbc *fbc); void (*deactivate)(struct intel_fbc *fbc); @@ -85,6 +93,8 @@ struct intel_fbc { struct drm_mm_node compressed_fb; struct drm_mm_node compressed_llb; + enum intel_fbc_id id; + u8 limit; bool false_color; @@ -454,10 +464,10 @@ static void ilk_fbc_activate(struct intel_fbc *fbc) struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; - intel_de_write(i915, ILK_DPFC_FENCE_YOFF, + intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id), fbc_state->fence_y_offset); - intel_de_write(i915, ILK_DPFC_CONTROL, + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } @@ -467,28 +477,28 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc) u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL); + dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id)); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl); + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); } } static bool ilk_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN; } static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK; + return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK; } static void ilk_fbc_program_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; - intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); + intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start); } static const struct intel_fbc_funcs ilk_fbc_funcs = { @@ -524,8 +534,8 @@ static void snb_fbc_nuke(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; - intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE); - intel_de_posting_read(i915, MSG_FBC_REND_STATE); + intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE); + intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id)); } static const struct intel_fbc_funcs snb_fbc_funcs = { @@ -547,7 +557,7 @@ static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) val |= FBC_STRIDE_OVERRIDE | FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); - intel_de_write(i915, GLK_FBC_STRIDE, val); + intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val); } static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) @@ -598,19 +608,19 @@ static void ivb_fbc_activate(struct intel_fbc *fbc) if (to_gt(i915)->ggtt->num_fences) snb_fbc_program_fence(fbc); - intel_de_write(i915, ILK_DPFC_CONTROL, + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); } static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB; + return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB; } static void ivb_fbc_set_false_color(struct intel_fbc *fbc, bool enable) { - intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL, + intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); } @@ -810,16 +820,16 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) void intel_fbc_cleanup(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - mutex_lock(&fbc->lock); - __intel_fbc_cleanup_cfb(fbc); - mutex_unlock(&fbc->lock); + for_each_intel_fbc(i915, fbc, fbc_id) { + mutex_lock(&fbc->lock); + __intel_fbc_cleanup_cfb(fbc); + mutex_unlock(&fbc->lock); - kfree(fbc); + kfree(fbc); + } } static bool stride_is_valid(const struct intel_plane_state *plane_state) @@ -1115,7 +1125,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ if (DISPLAY_VER(i915) >= 11 && - (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { + (plane_state->view.color_plane[0].y + + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; return false; } @@ -1305,15 +1316,10 @@ static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) return fbc->possible_framebuffer_bits; } -void intel_fbc_invalidate(struct drm_i915_private *i915, - unsigned int frontbuffer_bits, - enum fb_op_origin origin) +static void __intel_fbc_invalidate(struct intel_fbc *fbc, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; - if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) return; @@ -1327,14 +1333,22 @@ void intel_fbc_invalidate(struct drm_i915_private *i915, mutex_unlock(&fbc->lock); } -void intel_fbc_flush(struct drm_i915_private *i915, - unsigned int frontbuffer_bits, enum fb_op_origin origin) +void intel_fbc_invalidate(struct drm_i915_private *i915, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) { - struct intel_fbc *fbc = i915->fbc; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - if (!fbc) - return; + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_invalidate(fbc, frontbuffer_bits, origin); + +} +static void __intel_fbc_flush(struct intel_fbc *fbc, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ mutex_lock(&fbc->lock); fbc->busy_bits &= ~frontbuffer_bits; @@ -1354,6 +1368,17 @@ out: mutex_unlock(&fbc->lock); } +void intel_fbc_flush(struct drm_i915_private *i915, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; + + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_flush(fbc, frontbuffer_bits, origin); +} + int intel_fbc_atomic_check(struct intel_atomic_state *state) { struct intel_plane_state *plane_state; @@ -1483,15 +1508,15 @@ void intel_fbc_update(struct intel_atomic_state *state, */ void intel_fbc_global_disable(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - mutex_lock(&fbc->lock); - if (fbc->state.plane) - __intel_fbc_disable(fbc); - mutex_unlock(&fbc->lock); + for_each_intel_fbc(i915, fbc, fbc_id) { + mutex_lock(&fbc->lock); + if (fbc->state.plane) + __intel_fbc_disable(fbc); + mutex_unlock(&fbc->lock); + } } static void intel_fbc_underrun_work_fn(struct work_struct *work) @@ -1516,19 +1541,9 @@ out: mutex_unlock(&fbc->lock); } -/* - * intel_fbc_reset_underrun - reset FBC fifo underrun status. - * @i915: the i915 device - * - * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we - * want to re-enable FBC after an underrun to increase test coverage. - */ -void intel_fbc_reset_underrun(struct drm_i915_private *i915) +static void __intel_fbc_reset_underrun(struct intel_fbc *fbc) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; + struct drm_i915_private *i915 = fbc->i915; cancel_work_sync(&fbc->underrun_work); @@ -1544,6 +1559,38 @@ void intel_fbc_reset_underrun(struct drm_i915_private *i915) mutex_unlock(&fbc->lock); } +/* + * intel_fbc_reset_underrun - reset FBC fifo underrun status. + * @i915: the i915 device + * + * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we + * want to re-enable FBC after an underrun to increase test coverage. + */ +void intel_fbc_reset_underrun(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; + + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_reset_underrun(fbc); +} + +static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) +{ + /* + * There's no guarantee that underrun_detected won't be set to true + * right after this check and before the work is scheduled, but that's + * not a problem since we'll check it again under the work function + * while FBC is locked. This check here is just to prevent us from + * unnecessarily scheduling the work, and it relies on the fact that we + * never switch underrun_detect back to false after it's true. + */ + if (READ_ONCE(fbc->underrun_detected)) + return; + + schedule_work(&fbc->underrun_work); +} + /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun * @i915: i915 device @@ -1560,21 +1607,11 @@ void intel_fbc_reset_underrun(struct drm_i915_private *i915) */ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; - - /* There's no guarantee that underrun_detected won't be set to true - * right after this check and before the work is scheduled, but that's - * not a problem since we'll check it again under the work function - * while FBC is locked. This check here is just to prevent us from - * unnecessarily scheduling the work, and it relies on the fact that we - * never switch underrun_detect back to false after it's true. */ - if (READ_ONCE(fbc->underrun_detected)) - return; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - schedule_work(&fbc->underrun_work); + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_handle_fifo_underrun_irq(fbc); } /* @@ -1622,7 +1659,8 @@ void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; } -static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) +static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, + enum intel_fbc_id fbc_id) { struct intel_fbc *fbc; @@ -1630,6 +1668,7 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) if (!fbc) return NULL; + fbc->id = fbc_id; fbc->i915 = i915; INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); @@ -1658,32 +1697,35 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) */ void intel_fbc_init(struct drm_i915_private *i915) { - struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; if (!drm_mm_initialized(&i915->mm.stolen)) - mkwrite_device_info(i915)->display.has_fbc = false; + mkwrite_device_info(i915)->display.fbc_mask = 0; if (need_fbc_vtd_wa(i915)) - mkwrite_device_info(i915)->display.has_fbc = false; + mkwrite_device_info(i915)->display.fbc_mask = 0; i915->params.enable_fbc = intel_sanitize_fbc_option(i915); drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", i915->params.enable_fbc); - if (!HAS_FBC(i915)) - return; + for_each_fbc_id(i915, fbc_id) { + struct intel_fbc *fbc; - fbc = intel_fbc_create(i915); - if (!fbc) - return; + fbc = intel_fbc_create(i915, fbc_id); + if (!fbc) + continue; - /* We still don't have any sort of hardware state readout for FBC, so - * deactivate it in case the BIOS activated it to make sure software - * matches the hardware state. */ - if (intel_fbc_hw_is_active(fbc)) - intel_fbc_hw_deactivate(fbc); + /* + * We still don't have any sort of hardware state readout + * for FBC, so deactivate it in case the BIOS activated it + * to make sure software matches the hardware state. + */ + if (intel_fbc_hw_is_active(fbc)) + intel_fbc_hw_deactivate(fbc); - i915->fbc = fbc; + i915->fbc[fbc->id] = fbc; + } } static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) @@ -1759,25 +1801,32 @@ DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, intel_fbc_debugfs_false_color_set, "%llu\n"); -static void intel_fbc_debugfs_add(struct intel_fbc *fbc) +static void intel_fbc_debugfs_add(struct intel_fbc *fbc, + struct dentry *parent) { - struct drm_i915_private *i915 = fbc->i915; - struct drm_minor *minor = i915->drm.primary; - - debugfs_create_file("i915_fbc_status", 0444, - minor->debugfs_root, fbc, - &intel_fbc_debugfs_status_fops); + debugfs_create_file("i915_fbc_status", 0444, parent, + fbc, &intel_fbc_debugfs_status_fops); if (fbc->funcs->set_false_color) - debugfs_create_file("i915_fbc_false_color", 0644, - minor->debugfs_root, fbc, - &intel_fbc_debugfs_false_color_fops); + debugfs_create_file("i915_fbc_false_color", 0644, parent, + fbc, &intel_fbc_debugfs_false_color_fops); } +void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc) +{ + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + + if (plane->fbc) + intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry); +} + +/* FIXME: remove this once igt is on board with per-crtc stuff */ void intel_fbc_debugfs_register(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; + struct drm_minor *minor = i915->drm.primary; + struct intel_fbc *fbc; + fbc = i915->fbc[INTEL_FBC_A]; if (fbc) - intel_fbc_debugfs_add(fbc); + intel_fbc_debugfs_add(fbc, minor->debugfs_root); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index 07ad0411fcc3..8c5a7339a27f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -17,6 +17,12 @@ struct intel_fbc; struct intel_plane; struct intel_plane_state; +enum intel_fbc_id { + INTEL_FBC_A, + + I915_MAX_FBCS, +}; + int intel_fbc_atomic_check(struct intel_atomic_state *state); bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -36,6 +42,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane); void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915); void intel_fbc_reset_underrun(struct drm_i915_private *i915); +void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc); void intel_fbc_debugfs_register(struct drm_i915_private *i915); #endif /* __INTEL_FBC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 41d279db2be6..fd5bc7acf08d 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -50,6 +50,23 @@ #include "intel_fbdev.h" #include "intel_frontbuffer.h" +struct intel_fbdev { + struct drm_fb_helper helper; + struct intel_framebuffer *fb; + struct i915_vma *vma; + unsigned long vma_flags; + async_cookie_t cookie; + int preferred_bpp; + + /* Whether or not fbdev hpd processing is temporarily suspended */ + bool hpd_suspended: 1; + /* Set when a hotplug was received while HPD processing was suspended */ + bool hpd_waiting: 1; + + /* Protects hpd_suspended */ + struct mutex hpd_lock; +}; + static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev) { return ifbdev->fb->frontbuffer; @@ -680,3 +697,11 @@ void intel_fbdev_restore_mode(struct drm_device *dev) if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) intel_fbdev_invalidate(ifbdev); } + +struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev) +{ + if (!fbdev || !fbdev->helper.fb) + return NULL; + + return to_intel_framebuffer(fbdev->helper.fb); +} diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h index de7c84250eb5..0e95e9472fa3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev.h @@ -10,6 +10,8 @@ struct drm_device; struct drm_i915_private; +struct intel_fbdev; +struct intel_framebuffer; #ifdef CONFIG_DRM_FBDEV_EMULATION int intel_fbdev_init(struct drm_device *dev); @@ -19,6 +21,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv); void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); void intel_fbdev_output_poll_changed(struct drm_device *dev); void intel_fbdev_restore_mode(struct drm_device *dev); +struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev); #else static inline int intel_fbdev_init(struct drm_device *dev) { @@ -48,6 +51,10 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) static inline void intel_fbdev_restore_mode(struct drm_device *dev) { } +static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev) +{ + return NULL; +} #endif #endif /* __INTEL_FBDEV_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 3d6e22923601..4e4b43669b14 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -10,6 +10,11 @@ #include "intel_display_types.h" #include "intel_fdi.h" +struct intel_fdi_funcs { + void (*fdi_link_train)(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state); +}; + static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 3b8b84177085..2fad03250661 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -98,11 +98,21 @@ static const struct gmbus_pin gmbus_pins_dg1[] = { [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, }; +static const struct gmbus_pin gmbus_pins_dg2[] = { + [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, + [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, + [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, + [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, +}; + /* pin is expected to be valid */ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, unsigned int pin) { - if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG2) + return &gmbus_pins_dg2[pin]; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) return &gmbus_pins_dg1[pin]; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) return &gmbus_pins_icp[pin]; @@ -123,7 +133,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, { unsigned int size; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG2) + size = ARRAY_SIZE(gmbus_pins_dg2); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) size = ARRAY_SIZE(gmbus_pins_dg1); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) size = ARRAY_SIZE(gmbus_pins_icp); @@ -931,13 +943,6 @@ struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, return &dev_priv->gmbus[pin].adapter; } -void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) -{ - struct intel_gmbus *bus = to_intel_gmbus(adapter); - - bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed; -} - void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h index b96212b85425..8edc2e99cf53 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus.h @@ -41,7 +41,6 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter); struct i2c_adapter * intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); -void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter); void intel_gmbus_reset(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 4509fe7438e8..e1ecf38db0ef 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -297,8 +297,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) * Mailbox interface. */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { - ret = sandybridge_pcode_write(dev_priv, - SKL_PCODE_LOAD_HDCP_KEYS, 1); + ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { drm_err(&dev_priv->drm, "Failed to initiate HDCP key load (%d)\n", diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 52f6dc248453..1aa5bdc7b0dc 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1869,7 +1869,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, return MODE_OK; } -static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) +int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) { /* YCBCR420 TMDS rate requirement is half the pixel clock */ if (ycbcr420_output) @@ -1935,25 +1935,30 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); - enum drm_mode_status status; + enum drm_mode_status status = MODE_OK; + int bpc; + + /* + * Try all color depths since valid port clock range + * can have holes. Any mode that can be used with at + * least one color depth is accepted. + */ + for (bpc = 12; bpc >= 8; bpc -= 2) { + int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output); - /* check if we can do 8bpc */ - status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output), - true, has_hdmi_sink); + if (!intel_hdmi_source_bpc_possible(i915, bpc)) + continue; + + if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) + continue; - /* if we can't do 8bpc we may still be able to do 12bpc */ - if (status != MODE_OK && - intel_hdmi_source_bpc_possible(i915, 12) && - intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output), - true, has_hdmi_sink); + status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink); + if (status == MODE_OK) + return MODE_OK; + } - /* if we can't do 8,12bpc we may still be able to do 10bpc */ - if (status != MODE_OK && - intel_hdmi_source_bpc_possible(i915, 10) && - intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output), - true, has_hdmi_sink); + /* can never happen */ + drm_WARN_ON(&i915->drm, status == MODE_OK); return status; } @@ -2002,17 +2007,14 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return intel_mode_valid_max_plane_size(dev_priv, mode, false); } -bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, - int bpc, bool has_hdmi_sink, bool ycbcr420_output) +bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, + int bpc, bool has_hdmi_sink, bool ycbcr420_output) { struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_connector_state *connector_state; struct drm_connector *connector; int i; - if (crtc_state->pipe_bpp < bpc * 3) - return false; - for_each_new_connector_in_state(state, connector, connector_state, i) { if (connector_state->crtc != crtc_state->uapi.crtc) continue; @@ -2024,8 +2026,7 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return true; } -static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, - int bpc) +static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -2039,7 +2040,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, * HDMI deep color affects the clocks, so it's only possible * when not cloning with other encoder types. */ - if (crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI)) + if (bpc > 8 && crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI)) return false; /* Display Wa_1405510057:icl,ehl */ @@ -2049,35 +2050,50 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, adjusted_mode->crtc_hblank_start) % 8 == 2) return false; - return intel_hdmi_deep_color_possible(crtc_state, bpc, - crtc_state->has_hdmi_sink, - intel_hdmi_is_ycbcr420(crtc_state)); + return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink, + intel_hdmi_is_ycbcr420(crtc_state)); } static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, - int clock) + int clock, bool respect_downstream_limits) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state); int bpc; - for (bpc = 12; bpc >= 10; bpc -= 2) { - if (hdmi_deep_color_possible(crtc_state, bpc) && - hdmi_port_clock_valid(intel_hdmi, - intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output), - true, crtc_state->has_hdmi_sink) == MODE_OK) + /* + * pipe_bpp could already be below 8bpc due to FDI + * bandwidth constraints. HDMI minimum is 8bpc however. + */ + bpc = max(crtc_state->pipe_bpp / 3, 8); + + /* + * We will never exceed downstream TMDS clock limits while + * attempting deep color. If the user insists on forcing an + * out of spec mode they will have to be satisfied with 8bpc. + */ + if (!respect_downstream_limits) + bpc = 8; + + for (; bpc >= 8; bpc -= 2) { + int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output); + + if (hdmi_bpc_possible(crtc_state, bpc) && + hdmi_port_clock_valid(intel_hdmi, tmds_clock, + respect_downstream_limits, + crtc_state->has_hdmi_sink) == MODE_OK) return bpc; } - return 8; + return -EINVAL; } static int intel_hdmi_compute_clock(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state) + struct intel_crtc_state *crtc_state, + bool respect_downstream_limits) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpc, clock = adjusted_mode->crtc_clock; @@ -2085,31 +2101,25 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; - bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock); + bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock, + respect_downstream_limits); + if (bpc < 0) + return bpc; - crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc, - intel_hdmi_is_ycbcr420(crtc_state)); + crtc_state->port_clock = + intel_hdmi_tmds_clock(clock, bpc, intel_hdmi_is_ycbcr420(crtc_state)); /* * pipe_bpp could already be below 8bpc due to * FDI bandwidth constraints. We shouldn't bump it - * back up to 8bpc in that case. + * back up to the HDMI minimum 8bpc in that case. */ - if (crtc_state->pipe_bpp > bpc * 3) - crtc_state->pipe_bpp = bpc * 3; + crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3); drm_dbg_kms(&i915->drm, "picking %d bpc for HDMI output (pipe bpp: %d)\n", bpc, crtc_state->pipe_bpp); - if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock, - false, crtc_state->has_hdmi_sink) != MODE_OK) { - drm_dbg_kms(&i915->drm, - "unsupported HDMI clock (%d kHz), rejecting mode\n", - crtc_state->port_clock); - return -EINVAL; - } - return 0; } @@ -2170,7 +2180,8 @@ intel_hdmi_output_format(struct intel_connector *connector, static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, + bool respect_downstream_limits) { struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -2187,7 +2198,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; } - ret = intel_hdmi_compute_clock(encoder, crtc_state); + ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); if (ret) { if (intel_hdmi_is_ycbcr420(crtc_state) || !connector->base.ycbcr_420_allowed || @@ -2195,7 +2206,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, return ret; crtc_state->output_format = intel_hdmi_output_format(connector, true); - ret = intel_hdmi_compute_clock(encoder, crtc_state); + ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); } return ret; @@ -2231,9 +2242,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->has_audio = intel_hdmi_has_audio(encoder, pipe_config, conn_state); - ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state); + /* + * Try to respect downstream TMDS clock limits first, if + * that fails assume the user might know something we don't. + */ + ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, true); if (ret) + ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "unsupported HDMI clock (%d kHz), rejecting mode\n", + pipe_config->hw.adjusted_mode.crtc_clock); return ret; + } if (intel_hdmi_is_ycbcr420(pipe_config)) { ret = intel_panel_fitting(pipe_config, conn_state); @@ -2359,6 +2380,14 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", drm_dp_get_dual_mode_type_name(type), hdmi->dp_dual_mode.max_tmds_clock); + + /* Older VBTs are often buggy and can't be trusted :( Play it safe. */ + if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && + !intel_bios_is_port_dp_dual_mode(dev_priv, port)) { + drm_dbg_kms(&dev_priv->drm, + "Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n"); + hdmi->dp_dual_mode.max_tmds_clock = 0; + } } static bool diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 2bf440eb400a..93f65a917c36 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -9,8 +9,6 @@ #include <linux/hdmi.h> #include <linux/types.h> -#include "i915_reg.h" - struct drm_connector; struct drm_encoder; struct drm_i915_private; @@ -46,8 +44,9 @@ void intel_read_infoframe(struct intel_encoder *encoder, union hdmi_infoframe *frame); bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc, - bool has_hdmi_sink, bool ycbcr420_output); +bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, + int bpc, bool has_hdmi_sink, bool ycbcr420_output); +int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output); int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices, int output_format, bool hdmi_all_bpp, int hdmi_max_chunk_bytes); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 955f6d07b0e1..8204126d17f9 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -24,6 +24,7 @@ #include <linux/kernel.h> #include "i915_drv.h" +#include "i915_irq.h" #include "intel_display_types.h" #include "intel_hotplug.h" @@ -213,12 +214,6 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) } } -static void intel_hpd_irq_setup(struct drm_i915_private *i915) -{ - if (i915->display_irqs_enabled && i915->hotplug_funcs) - i915->hotplug_funcs->hpd_irq_setup(i915); -} - static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -281,13 +276,13 @@ intel_encoder_hotplug(struct intel_encoder *encoder, ret = true; if (ret) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", - connector->base.base.id, - connector->base.name, - drm_get_connector_status_name(old_status), - drm_get_connector_status_name(connector->base.status), - old_epoch_counter, - connector->base.epoch_counter); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", + connector->base.base.id, + connector->base.name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->base.status), + old_epoch_counter, + connector->base.epoch_counter); return INTEL_HOTPLUG_CHANGED; } return INTEL_HOTPLUG_UNCHANGED; diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 012f13e034bf..76357c9b76e4 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -78,11 +78,12 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode) static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) { struct intel_dp *dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(dp); struct drm_dp_dpcd_ident *ident; u32 vendor_oui; if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) { - DRM_ERROR("Can't read description\n"); + drm_err(&i915->drm, "Can't read description\n"); return false; } @@ -93,16 +94,16 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) switch (vendor_oui) { case LSPCON_VENDOR_MCA_OUI: lspcon->vendor = LSPCON_VENDOR_MCA; - DRM_DEBUG_KMS("Vendor: Mega Chips\n"); + drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n"); break; case LSPCON_VENDOR_PARADE_OUI: lspcon->vendor = LSPCON_VENDOR_PARADE; - DRM_DEBUG_KMS("Vendor: Parade Tech\n"); + drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n"); break; default: - DRM_ERROR("Invalid/Unknown vendor OUI\n"); + drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n"); return false; } @@ -119,21 +120,19 @@ static u32 get_hdr_status_reg(struct intel_lspcon *lspcon) void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) { - struct intel_digital_port *dig_port = - container_of(lspcon, struct intel_digital_port, lspcon); - struct drm_device *dev = dig_port->base.base.dev; - struct intel_dp *dp = lspcon_to_intel_dp(lspcon); + struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 hdr_caps; int ret; - ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon), + ret = drm_dp_dpcd_read(&intel_dp->aux, get_hdr_status_reg(lspcon), &hdr_caps, 1); if (ret < 0) { - drm_dbg_kms(dev, "HDR capability detection failed\n"); + drm_dbg_kms(&i915->drm, "HDR capability detection failed\n"); lspcon->hdr_supported = false; } else if (hdr_caps & 0x1) { - drm_dbg_kms(dev, "LSPCON capable of HDR\n"); + drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n"); lspcon->hdr_supported = true; } } @@ -141,11 +140,12 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum drm_lspcon_mode current_mode; struct i2c_adapter *adapter = &intel_dp->aux.ddc; if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, ¤t_mode)) { - DRM_DEBUG_KMS("Error reading LSPCON mode\n"); + drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n"); return DRM_LSPCON_MODE_INVALID; } return current_mode; @@ -154,22 +154,24 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { + struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum drm_lspcon_mode current_mode; current_mode = lspcon_get_current_mode(lspcon); if (current_mode == mode) goto out; - DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", - lspcon_mode_name(mode)); + drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n", + lspcon_mode_name(mode)); wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); if (current_mode != mode) - DRM_ERROR("LSPCON mode hasn't settled\n"); + drm_err(&i915->drm, "LSPCON mode hasn't settled\n"); out: - DRM_DEBUG_KMS("Current LSPCON mode %s\n", - lspcon_mode_name(current_mode)); + drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n", + lspcon_mode_name(current_mode)); return current_mode; } @@ -178,44 +180,47 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int err; enum drm_lspcon_mode current_mode; struct i2c_adapter *adapter = &intel_dp->aux.ddc; err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, ¤t_mode); if (err) { - DRM_ERROR("Error reading LSPCON mode\n"); + drm_err(&i915->drm, "Error reading LSPCON mode\n"); return err; } if (current_mode == mode) { - DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n"); + drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n"); return 0; } err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, adapter, mode); if (err < 0) { - DRM_ERROR("LSPCON mode change failed\n"); + drm_err(&i915->drm, "LSPCON mode change failed\n"); return err; } lspcon->mode = mode; - DRM_DEBUG_KMS("LSPCON mode changed done\n"); + drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n"); return 0; } static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) { + struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 rev; if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV, &rev) != 1) { - DRM_DEBUG_KMS("Native AUX CH down\n"); + drm_dbg_kms(&i915->drm, "Native AUX CH down\n"); return false; } - DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n", - rev >> 4, rev & 0xf); + drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n", + rev >> 4, rev & 0xf); return true; } @@ -225,6 +230,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) int retry; enum drm_dp_dual_mode_type adaptor_type; struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct i2c_adapter *adapter = &intel_dp->aux.ddc; enum drm_lspcon_mode expected_mode; @@ -242,13 +248,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) } if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) { - DRM_DEBUG_KMS("No LSPCON detected, found %s\n", - drm_dp_get_dual_mode_type_name(adaptor_type)); + drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n", + drm_dp_get_dual_mode_type_name(adaptor_type)); return false; } /* Yay ... got a LSPCON device */ - DRM_DEBUG_KMS("LSPCON detected\n"); + drm_dbg_kms(&i915->drm, "LSPCON detected\n"); lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); /* @@ -258,7 +264,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) */ if (lspcon->mode != DRM_LSPCON_MODE_PCON) { if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { - DRM_ERROR("LSPCON mode change to PCON failed\n"); + drm_err(&i915->drm, "LSPCON mode change to PCON failed\n"); return false; } } @@ -268,13 +274,14 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); unsigned long start = jiffies; while (1) { if (intel_digital_port_connected(&dig_port->base)) { - DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n", - jiffies_to_msecs(jiffies - start)); + drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n", + jiffies_to_msecs(jiffies - start)); return; } @@ -284,7 +291,7 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) usleep_range(10000, 15000); } - DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n"); + drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n"); } static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) @@ -301,7 +308,7 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL, &avi_if_ctrl, 1); if (ret < 0) { - DRM_ERROR("Failed to read AVI IF control\n"); + drm_err(aux->drm_dev, "Failed to read AVI IF control\n"); return false; } @@ -309,7 +316,7 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) return true; } - DRM_ERROR("Parade FW not ready to accept AVI IF\n"); + drm_err(aux->drm_dev, "Parade FW not ready to accept AVI IF\n"); return false; } @@ -324,8 +331,8 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, while (block_count < 4) { if (!lspcon_parade_fw_ready(aux)) { - DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n", - block_count); + drm_dbg_kms(aux->drm_dev, "LSPCON FW not ready, block %d\n", + block_count); return false; } @@ -333,8 +340,8 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, data = avi_buf + block_count * 8; ret = drm_dp_dpcd_write(aux, reg, data, 8); if (ret < 0) { - DRM_ERROR("Failed to write AVI IF block %d\n", - block_count); + drm_err(aux->drm_dev, "Failed to write AVI IF block %d\n", + block_count); return false; } @@ -348,15 +355,15 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count; ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1); if (ret < 0) { - DRM_ERROR("Failed to update (0x%x), block %d\n", - reg, block_count); + drm_err(aux->drm_dev, "Failed to update (0x%x), block %d\n", + reg, block_count); return false; } block_count++; } - DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n"); + drm_dbg_kms(aux->drm_dev, "Wrote AVI IF blocks successfully\n"); return true; } @@ -378,14 +385,14 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, */ if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) { - DRM_ERROR("Invalid length of infoframes\n"); + drm_err(aux->drm_dev, "Invalid length of infoframes\n"); return false; } memcpy(&avi_if[1], frame, len); if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) { - DRM_DEBUG_KMS("Failed to write infoframe blocks\n"); + drm_dbg_kms(aux->drm_dev, "Failed to write infoframe blocks\n"); return false; } @@ -412,7 +419,7 @@ static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, mdelay(50); continue; } else { - DRM_ERROR("DPCD write failed at:0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD write failed at:0x%x\n", reg); return false; } } @@ -423,7 +430,7 @@ static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, reg = LSPCON_MCA_AVI_IF_CTRL; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } @@ -433,19 +440,19 @@ static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, ret = drm_dp_dpcd_write(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } val = 0; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } if (val == LSPCON_MCA_AVI_IF_HANDLED) - DRM_DEBUG_KMS("AVI IF handled by FW\n"); + drm_dbg_kms(aux->drm_dev, "AVI IF handled by FW\n"); return true; } @@ -457,6 +464,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, { bool ret = true; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); switch (type) { @@ -469,7 +477,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, frame, len); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: - drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n"); + drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n"); /* It uses the legacy hsw implementation for the same */ hsw_write_infoframe(encoder, crtc_state, type, frame, len); break; @@ -478,7 +486,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, } if (!ret) { - DRM_ERROR("Failed to write infoframes\n"); + drm_err(&i915->drm, "Failed to write infoframes\n"); return; } } @@ -504,11 +512,12 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, u8 buf[VIDEO_DIP_DATA_SIZE]; struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_lspcon *lspcon = &dig_port->lspcon; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; if (!lspcon->active) { - DRM_ERROR("Writing infoframes while LSPCON disabled ?\n"); + drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n"); return; } @@ -518,7 +527,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, conn_state->connector, adjusted_mode); if (ret < 0) { - DRM_ERROR("couldn't fill AVI infoframe\n"); + drm_err(&i915->drm, "couldn't fill AVI infoframe\n"); return; } @@ -559,7 +568,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); if (ret < 0) { - DRM_ERROR("Failed to pack AVI IF\n"); + drm_err(&i915->drm, "Failed to pack AVI IF\n"); return; } @@ -575,7 +584,7 @@ static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux) ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } @@ -590,7 +599,7 @@ static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux) ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } @@ -634,31 +643,32 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) bool lspcon_init(struct intel_digital_port *dig_port) { - struct intel_dp *dp = &dig_port->dp; + struct intel_dp *intel_dp = &dig_port->dp; struct intel_lspcon *lspcon = &dig_port->lspcon; - struct drm_connector *connector = &dp->attached_connector->base; + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct drm_connector *connector = &intel_dp->attached_connector->base; lspcon->active = false; lspcon->mode = DRM_LSPCON_MODE_INVALID; if (!lspcon_probe(lspcon)) { - DRM_ERROR("Failed to probe lspcon\n"); + drm_err(&i915->drm, "Failed to probe lspcon\n"); return false; } - if (drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd) != 0) { - DRM_ERROR("LSPCON DPCD read failed\n"); + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) { + drm_err(&i915->drm, "LSPCON DPCD read failed\n"); return false; } if (!lspcon_detect_vendor(lspcon)) { - DRM_ERROR("LSPCON vendor detection failed\n"); + drm_err(&i915->drm, "LSPCON vendor detection failed\n"); return false; } connector->ycbcr_420_allowed = true; lspcon->active = true; - DRM_DEBUG_KMS("Success: LSPCON init\n"); + drm_dbg_kms(&i915->drm, "Success: LSPCON init\n"); return true; } @@ -674,16 +684,16 @@ void lspcon_resume(struct intel_digital_port *dig_port) { struct intel_lspcon *lspcon = &dig_port->lspcon; struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); enum drm_lspcon_mode expected_mode; - if (!intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) + if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) return; if (!lspcon->active) { if (!lspcon_init(dig_port)) { - DRM_ERROR("LSPCON init failed on port %c\n", - port_name(dig_port->base.port)); + drm_err(&i915->drm, "LSPCON init failed on port %c\n", + port_name(dig_port->base.port)); return; } } @@ -699,7 +709,7 @@ void lspcon_resume(struct intel_digital_port *dig_port) return; if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) - DRM_ERROR("LSPCON resume failed\n"); + drm_err(&i915->drm, "LSPCON resume failed\n"); else - DRM_DEBUG_KMS("LSPCON resume success\n"); + drm_dbg_kms(&i915->drm, "LSPCON resume success\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h index bc9c8b84ba2f..9d3372dc503f 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.h +++ b/drivers/gpu/drm/i915/display/intel_lvds.h @@ -8,7 +8,7 @@ #include <linux/types.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" enum pipe; struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 0065111593a6..f31e8c3f8ce0 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -35,6 +35,7 @@ #include "intel_backlight.h" #include "intel_display_types.h" #include "intel_opregion.h" +#include "intel_pci_config.h" #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 @@ -46,10 +47,11 @@ #define OPREGION_ASLE_EXT_OFFSET 0x1C00 #define OPREGION_SIGNATURE "IntelGraphicsMem" -#define MBOX_ACPI (1<<0) -#define MBOX_SWSCI (1<<1) -#define MBOX_ASLE (1<<2) -#define MBOX_ASLE_EXT (1<<4) +#define MBOX_ACPI BIT(0) /* Mailbox #1 */ +#define MBOX_SWSCI BIT(1) /* Mailbox #2 (obsolete from v2.x) */ +#define MBOX_ASLE BIT(2) /* Mailbox #3 */ +#define MBOX_ASLE_EXT BIT(4) /* Mailbox #5 */ +#define MBOX_BACKLIGHT BIT(5) /* Mailbox #2 (valid from v3.x) */ struct opregion_header { u8 signature[16]; @@ -195,6 +197,8 @@ struct opregion_asle_ext { #define ASLE_IUER_WINDOWS_BTN (1 << 1) #define ASLE_IUER_POWER_BTN (1 << 0) +#define ASLE_PHED_EDID_VALID_MASK 0x3 + /* Software System Control Interrupt (SWSCI) */ #define SWSCI_SCIC_INDICATOR (1 << 0) #define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1 @@ -242,14 +246,10 @@ struct opregion_asle_ext { #define MAX_DSLP 1500 -static int swsci(struct drm_i915_private *dev_priv, - u32 function, u32 parm, u32 *parm_out) +static int check_swsci_function(struct drm_i915_private *i915, u32 function) { - struct opregion_swsci *swsci = dev_priv->opregion.swsci; - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - u32 main_function, sub_function, scic; - u16 swsci_val; - u32 dslp; + struct opregion_swsci *swsci = i915->opregion.swsci; + u32 main_function, sub_function; if (!swsci) return -ENODEV; @@ -261,15 +261,31 @@ static int swsci(struct drm_i915_private *dev_priv, /* Check if we can call the function. See swsci_setup for details. */ if (main_function == SWSCI_SBCB) { - if ((dev_priv->opregion.swsci_sbcb_sub_functions & + if ((i915->opregion.swsci_sbcb_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } else if (main_function == SWSCI_GBDA) { - if ((dev_priv->opregion.swsci_gbda_sub_functions & + if ((i915->opregion.swsci_gbda_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } + return 0; +} + +static int swsci(struct drm_i915_private *dev_priv, + u32 function, u32 parm, u32 *parm_out) +{ + struct opregion_swsci *swsci = dev_priv->opregion.swsci; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + u32 scic, dslp; + u16 swsci_val; + int ret; + + ret = check_swsci_function(dev_priv, function); + if (ret) + return ret; + /* Driver sleep timeout in ms. */ dslp = swsci->dslp; if (!dslp) { @@ -343,11 +359,17 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, u32 parm = 0; u32 type = 0; u32 port; + int ret; /* don't care about old stuff for now */ if (!HAS_DDI(dev_priv)) return 0; + /* Avoid port out of bounds checks if SWSCI isn't there. */ + ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE); + if (ret) + return ret; + if (intel_encoder->type == INTEL_OUTPUT_DSI) port = 0; else @@ -360,6 +382,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, port++; } + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + if (!enable) parm |= 4 << 8; @@ -896,9 +933,17 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) } if (mboxes & MBOX_SWSCI) { - drm_dbg(&dev_priv->drm, "SWSCI supported\n"); - opregion->swsci = base + OPREGION_SWSCI_OFFSET; - swsci_setup(dev_priv); + u8 major = opregion->header->over.major; + + if (major >= 3) { + drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n"); + } else { + if (major >= 2) + drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n"); + drm_dbg(&dev_priv->drm, "SWSCI supported\n"); + opregion->swsci = base + OPREGION_SWSCI_OFFSET; + swsci_setup(dev_priv); + } } if (mboxes & MBOX_ASLE) { @@ -908,8 +953,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) opregion->asle->ardy = ASLE_ARDY_NOT_READY; } - if (mboxes & MBOX_ASLE_EXT) + if (mboxes & MBOX_ASLE_EXT) { drm_dbg(&dev_priv->drm, "ASLE extension supported\n"); + opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET; + } + + if (mboxes & MBOX_BACKLIGHT) { + drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n"); + } if (intel_load_vbt_firmware(dev_priv) == 0) goto out; @@ -1036,6 +1087,54 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) return ret - 1; } +/** + * intel_opregion_get_edid - Fetch EDID from ACPI OpRegion mailbox #5 + * @intel_connector: eDP connector + * + * This reads the ACPI Opregion mailbox #5 to extract the EDID that is passed + * to it. + * + * Returns: + * The EDID in the OpRegion, or NULL if there is none or it's invalid. + * + */ +struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector) +{ + struct drm_connector *connector = &intel_connector->base; + struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_opregion *opregion = &i915->opregion; + const void *in_edid; + const struct edid *edid; + struct edid *new_edid; + int len; + + if (!opregion->asle_ext) + return NULL; + + in_edid = opregion->asle_ext->bddc; + + /* Validity corresponds to number of 128-byte blocks */ + len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128; + if (!len || !memchr_inv(in_edid, 0, len)) + return NULL; + + edid = in_edid; + + if (len < EDID_LENGTH * (1 + edid->extensions)) { + drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5): too short\n"); + return NULL; + } + new_edid = drm_edid_duplicate(edid); + if (!new_edid) + return NULL; + if (!drm_edid_is_valid(new_edid)) { + kfree(new_edid); + drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n"); + return NULL; + } + return new_edid; +} + void intel_opregion_register(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->opregion; @@ -1129,6 +1228,7 @@ void intel_opregion_unregister(struct drm_i915_private *i915) opregion->acpi = NULL; opregion->swsci = NULL; opregion->asle = NULL; + opregion->asle_ext = NULL; opregion->vbt = NULL; opregion->lid_state = NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h index 4aa68ffbd30e..82cc0ba34af7 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.h +++ b/drivers/gpu/drm/i915/display/intel_opregion.h @@ -29,12 +29,14 @@ #include <linux/pci.h> struct drm_i915_private; +struct intel_connector; struct intel_encoder; struct opregion_header; struct opregion_acpi; struct opregion_swsci; struct opregion_asle; +struct opregion_asle_ext; struct intel_opregion { struct opregion_header *header; @@ -43,6 +45,7 @@ struct intel_opregion { u32 swsci_gbda_sub_functions; u32 swsci_sbcb_sub_functions; struct opregion_asle *asle; + struct opregion_asle_ext *asle_ext; void *rvda; void *vbt_firmware; const void *vbt; @@ -71,6 +74,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, pci_power_t state); int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); +struct edid *intel_opregion_get_edid(struct intel_connector *connector); #else /* CONFIG_ACPI*/ @@ -117,6 +121,12 @@ static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) return -ENODEV; } +static inline struct edid * +intel_opregion_get_edid(struct intel_connector *connector) +{ + return NULL; +} + #endif /* CONFIG_ACPI */ #endif diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 1a376e9a1ff3..76845d34ad0c 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -28,6 +28,7 @@ #include <drm/drm_fourcc.h> +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" @@ -38,6 +39,7 @@ #include "intel_display_types.h" #include "intel_frontbuffer.h" #include "intel_overlay.h" +#include "intel_pci_config.h" /* Limits for overlay size. According to intel doc, the real limits are: * Y width: 4095, UV width (planar): 2047, Y height: 2047, @@ -959,6 +961,9 @@ static int check_overlay_dst(struct intel_overlay *overlay, const struct intel_crtc_state *pipe_config = overlay->crtc->config; + if (rec->dst_height == 0 || rec->dst_width == 0) + return -EINVAL; + if (rec->dst_x < pipe_config->pipe_src_w && rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && rec->dst_y < pipe_config->pipe_src_h && diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index a55c4bfacd0d..9192769e3337 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -88,6 +88,50 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); } +static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc, + const struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_set_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), + PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); +} + +static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc, + const struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_set_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe), + PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe)); +} + +void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_get_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), + PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); +} + +void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_get_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe), + PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe)); +} + static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, enum pipe pch_transcoder) { @@ -157,20 +201,20 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) */ val &= ~PIPECONF_BPC_MASK; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - val |= PIPECONF_8BPC; + val |= PIPECONF_BPC_8; else val |= pipeconf_val & PIPECONF_BPC_MASK; } val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - val |= TRANS_LEGACY_INTERLACED_ILK; + val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX; else - val |= TRANS_INTERLACED; + val |= TRANS_INTERLACE_INTERLACED; } else { - val |= TRANS_PROGRESSIVE; + val |= TRANS_INTERLACE_PROGRESSIVE; } intel_de_write(dev_priv, reg, val | TRANS_ENABLE); @@ -211,6 +255,20 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) } } +void ilk_pch_pre_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + /* + * Note: FDI PLL enabling _must_ be done before we enable the + * cpu pipes, hence this is separate from all the other fdi/pch + * enabling. + */ + ilk_fdi_pll_enable(crtc_state); +} + /* * Enable PCH resources required for PCH ports: * - PCH PLLs @@ -264,6 +322,10 @@ void ilk_pch_enable(struct intel_atomic_state *state, /* set transcoder timing, panel must allow it */ assert_pps_unlocked(dev_priv, pipe); + if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n); + intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2); + } ilk_pch_transcoder_set_timings(crtc_state, pipe); intel_fdi_normal_train(crtc); @@ -279,7 +341,8 @@ void ilk_pch_enable(struct intel_atomic_state *state, temp = intel_de_read(dev_priv, reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK | + TRANS_DP_VSYNC_ACTIVE_HIGH | + TRANS_DP_HSYNC_ACTIVE_HIGH | TRANS_DP_BPC_MASK); temp |= TRANS_DP_OUTPUT_ENABLE; temp |= bpc << 9; /* same format but at 11:9 */ @@ -371,7 +434,8 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state) crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ilk_get_fdi_m_n_config(crtc, crtc_state); + intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, + &crtc_state->fdi_m_n); if (HAS_PCH_IBX(dev_priv)) { /* @@ -422,11 +486,10 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, val = TRANS_ENABLE; pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == - PIPECONF_INTERLACED_ILK) - val |= TRANS_INTERLACED; + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK) + val |= TRANS_INTERLACE_INTERLACED; else - val |= TRANS_PROGRESSIVE; + val |= TRANS_INTERLACE_PROGRESSIVE; intel_de_write(dev_priv, LPT_TRANSCONF, val); if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, @@ -495,7 +558,8 @@ void lpt_pch_get_config(struct intel_crtc_state *crtc_state) crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ilk_get_fdi_m_n_config(crtc, crtc_state); + intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, + &crtc_state->fdi_m_n); crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h index 2c387fe3a467..749473d99320 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.h +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -9,7 +9,10 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_link_m_n; +void ilk_pch_pre_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); void ilk_pch_enable(struct intel_atomic_state *state, struct intel_crtc *crtc); void ilk_pch_disable(struct intel_atomic_state *state, @@ -24,4 +27,9 @@ void lpt_pch_disable(struct intel_atomic_state *state, struct intel_crtc *crtc); void lpt_pch_get_config(struct intel_crtc_state *crtc_state); +void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); +void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index 543877998078..d7b1de4cc205 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -166,8 +166,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = @@ -204,11 +202,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, * pretend the BIOS never had it enabled. */ intel_plane_disable_noatomic(crtc, plane); - if (crtc_state->bigjoiner) { - struct intel_crtc *slave = - crtc_state->bigjoiner_linked_crtc; - intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary)); - } return; diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index e9c679bb1b2e..9c986e8932f8 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -1131,16 +1131,20 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) } static void -intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) +intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, + const struct edp_power_seq *seq) { - DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - state_name, - seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + state_name, + seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); } static void intel_pps_verify_state(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct edp_power_seq hw; struct edp_power_seq *sw = &intel_dp->pps.pps_delays; @@ -1148,9 +1152,9 @@ intel_pps_verify_state(struct intel_dp *intel_dp) if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { - DRM_ERROR("PPS state mismatch\n"); - intel_pps_dump_state("sw", sw); - intel_pps_dump_state("hw", &hw); + drm_err(&i915->drm, "PPS state mismatch\n"); + intel_pps_dump_state(intel_dp, "sw", sw); + intel_pps_dump_state(intel_dp, "hw", &hw); } } @@ -1168,7 +1172,7 @@ static void pps_init_delays(struct intel_dp *intel_dp) intel_pps_readout_hw_state(intel_dp, &cur); - intel_pps_dump_state("cur", &cur); + intel_pps_dump_state(intel_dp, "cur", &cur); vbt = dev_priv->vbt.edp.pps; /* On Toshiba Satellite P50-C-18C system the VBT T12 delay @@ -1200,7 +1204,7 @@ static void pps_init_delays(struct intel_dp *intel_dp) * too. */ spec.t11_t12 = (510 + 100) * 10; - intel_pps_dump_state("vbt", &vbt); + intel_pps_dump_state(intel_dp, "vbt", &vbt); /* Use the max of the register settings and vbt. If both are * unset, fall back to the spec limits. */ diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index a1a663f362e7..2e0b092f4b6b 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1063,31 +1063,28 @@ static void intel_psr_activate(struct intel_dp *intel_dp) intel_dp->psr.active = true; } -static void intel_psr_enable_source(struct intel_dp *intel_dp) +static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp) +{ + switch (intel_dp->psr.pipe) { + case PIPE_A: + return LATENCY_REPORTING_REMOVED_PIPE_A; + case PIPE_B: + return LATENCY_REPORTING_REMOVED_PIPE_B; + case PIPE_C: + return LATENCY_REPORTING_REMOVED_PIPE_C; + default: + MISSING_CASE(intel_dp->psr.pipe); + return 0; + } +} + +static void intel_psr_enable_source(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask; - if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) { - i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); - u32 chicken = intel_de_read(dev_priv, reg); - - chicken |= PSR2_VSC_ENABLE_PROG_HEADER | - PSR2_ADD_VERTICAL_LINE_COUNT; - intel_de_write(dev_priv, reg, chicken); - } - - /* - * Wa_16014451276:adlp - * All supported adlp panels have 1-based X granularity, this may - * cause issues if non-supported panels are used. - */ - if (IS_ALDERLAKE_P(dev_priv) && - intel_dp->psr.psr2_enabled) - intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, - ADLP_1_BASED_X_GRANULARITY); - /* * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also * mask LPSP to avoid dependency on other drivers that might block @@ -1126,18 +1123,47 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp) intel_dp->psr.psr2_sel_fetch_enabled ? IGNORE_PSR2_HW_TRACKING : 0); - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) && - intel_dp->psr.psr2_enabled) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, - TRANS_SET_CONTEXT_LATENCY_VALUE(1)); + if (intel_dp->psr.psr2_enabled) { + if (DISPLAY_VER(dev_priv) == 9) + intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, + PSR2_VSC_ENABLE_PROG_HEADER | + PSR2_ADD_VERTICAL_LINE_COUNT); - /* Wa_16012604467:adlp */ - if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled) - intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0, - CLKGATE_DIS_MISC_DMASC_GATING_DIS); + /* + * Wa_16014451276:adlp + * All supported adlp panels have 1-based X granularity, this may + * cause issues if non-supported panels are used. + */ + if (IS_ALDERLAKE_P(dev_priv)) + intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, + ADLP_1_BASED_X_GRANULARITY); + + /* Wa_16011168373:adl-p */ + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + intel_de_rmw(dev_priv, + TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), + TRANS_SET_CONTEXT_LATENCY_MASK, + TRANS_SET_CONTEXT_LATENCY_VALUE(1)); + + /* Wa_16012604467:adlp */ + if (IS_ALDERLAKE_P(dev_priv)) + intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0, + CLKGATE_DIS_MISC_DMASC_GATING_DIS); + + /* Wa_16013835468:tgl[b0+], dg1 */ + if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) || + IS_DG1(dev_priv)) { + u16 vtotal, vblank; + + vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal - + crtc_state->uapi.adjusted_mode.crtc_vdisplay; + vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end - + crtc_state->uapi.adjusted_mode.crtc_vblank_start; + if (vblank > vtotal) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, + wa_16013835468_bit_get(intel_dp)); + } + } } static bool psr_interrupt_error_check(struct intel_dp *intel_dp) @@ -1202,7 +1228,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc); intel_snps_phy_update_psr_power_state(dev_priv, phy, true); intel_psr_enable_sink(intel_dp); - intel_psr_enable_source(intel_dp); + intel_psr_enable_source(intel_dp, crtc_state); intel_dp->psr.enabled = true; intel_dp->psr.paused = false; @@ -1290,17 +1316,24 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) && - intel_dp->psr.psr2_enabled) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, 0); - - /* Wa_16012604467:adlp */ - if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled) - intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, - CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); + if (intel_dp->psr.psr2_enabled) { + /* Wa_16011168373:adl-p */ + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + intel_de_rmw(dev_priv, + TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), + TRANS_SET_CONTEXT_LATENCY_MASK, 0); + + /* Wa_16012604467:adlp */ + if (IS_ALDERLAKE_P(dev_priv)) + intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, + CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); + + /* Wa_16013835468:tgl[b0+], dg1 */ + if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) || + IS_DG1(dev_priv)) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + wa_16013835468_bit_get(intel_dp), 0); + } intel_snps_phy_update_psr_power_state(dev_priv, phy, false); diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h index 72065e4360d5..2868852c85f2 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.h +++ b/drivers/gpu/drm/i915/display/intel_sdvo.h @@ -8,7 +8,7 @@ #include <linux/types.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" struct drm_i915_private; enum pipe; diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 09f405e4d363..7e6245b97fed 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -10,6 +10,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_snps_phy.h" +#include "intel_snps_phy_regs.h" /** * DOC: Synopsis PHY support @@ -23,18 +24,18 @@ * since it is not handled by the shared DPLL framework as on other platforms. */ -void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv) +void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915) { enum phy phy; for_each_phy_masked(phy, ~0) { - if (!intel_phy_is_snps(dev_priv, phy)) + if (!intel_phy_is_snps(i915, phy)) continue; - if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy), + if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy), DG2_PHY_DP_TX_ACK_MASK, 25)) - DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n", - phy); + drm_err(&i915->drm, "SNPS PHY %c failed to calibrate after 25ms.\n", + phy_name(phy)); } } @@ -250,197 +251,6 @@ static const struct intel_mpllb_state * const dg2_dp_100_tables[] = { }; /* - * Basic DP link rates with 38.4 MHz reference clock. - */ - -static const struct intel_mpllb_state dg2_dp_rbr_38_4 = { - .clock = 162000, - .ref_control = - REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), - .mpllb_cp = - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), - .mpllb_div = - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), - .mpllb_div2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 304), - .mpllb_fracn1 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), - .mpllb_fracn2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 49152), -}; - -static const struct intel_mpllb_state dg2_dp_hbr1_38_4 = { - .clock = 270000, - .ref_control = - REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), - .mpllb_cp = - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), - .mpllb_div = - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), - .mpllb_div2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 248), - .mpllb_fracn1 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), - .mpllb_fracn2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40960), -}; - -static const struct intel_mpllb_state dg2_dp_hbr2_38_4 = { - .clock = 540000, - .ref_control = - REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), - .mpllb_cp = - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), - .mpllb_div = - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), - .mpllb_div2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 248), - .mpllb_fracn1 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), - .mpllb_fracn2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40960), -}; - -static const struct intel_mpllb_state dg2_dp_hbr3_38_4 = { - .clock = 810000, - .ref_control = - REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), - .mpllb_cp = - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), - .mpllb_div = - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), - .mpllb_div2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 388), - .mpllb_fracn1 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), - .mpllb_fracn2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 61440), -}; - -static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = { - .clock = 1000000, - .ref_control = - REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), - .mpllb_cp = - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), - .mpllb_div = - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), - .mpllb_div2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 488), - .mpllb_fracn1 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 3), - .mpllb_fracn2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 27306), - - /* - * SSC will be enabled, DP UHBR has a minimum SSC requirement. - */ - .mpllb_sscen = - REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 76800), - .mpllb_sscstep = - REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 129024), -}; - -static const struct intel_mpllb_state dg2_dp_uhbr13_38_4 = { - .clock = 1350000, - .ref_control = - REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), - .mpllb_cp = - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 56) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), - .mpllb_div = - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3), - .mpllb_div2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 670), - .mpllb_fracn1 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), - .mpllb_fracn2 = - REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36864), - - /* - * SSC will be enabled, DP UHBR has a minimum SSC requirement. - */ - .mpllb_sscen = - REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | - REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 103680), - .mpllb_sscstep = - REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 174182), -}; - -static const struct intel_mpllb_state * const dg2_dp_38_4_tables[] = { - &dg2_dp_rbr_38_4, - &dg2_dp_hbr1_38_4, - &dg2_dp_hbr2_38_4, - &dg2_dp_hbr3_38_4, - &dg2_dp_uhbr10_38_4, - &dg2_dp_uhbr13_38_4, - NULL, -}; - -/* * eDP link rates with 100 MHz reference clock. */ @@ -748,22 +558,7 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { return dg2_edp_tables; } else if (intel_crtc_has_dp_encoder(crtc_state)) { - /* - * FIXME: Initially we're just enabling the "combo" outputs on - * port A-D. The MPLLB for those ports takes an input from the - * "Display Filter PLL" which always has an output frequency - * of 100 MHz, hence the use of the _100 tables below. - * - * Once we enable port TC1 it will either use the same 100 MHz - * "Display Filter PLL" (when strapped to support a native - * display connection) or different 38.4 MHz "Filter PLL" when - * strapped to support a USB connection, so we'll need to check - * that to determine which table to use. - */ - if (0) - return dg2_dp_38_4_tables; - else - return dg2_dp_100_tables; + return dg2_dp_100_tables; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { return dg2_hdmi_tables; } @@ -775,6 +570,7 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state, int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_mpllb_state * const *tables; int i; @@ -786,8 +582,8 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, * until we have a proper algorithm under a valid * license. */ - DRM_DEBUG_KMS("Can't support HDMI link rate %d\n", - crtc_state->port_clock); + drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d\n", + crtc_state->port_clock); return -EINVAL; } } @@ -854,7 +650,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder, * dp_mpllb_state interface signal. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 5)) - DRM_ERROR("Port %c PLL not locked\n", phy_name(phy)); + drm_dbg_kms(&dev_priv->drm, "Port %c PLL not locked\n", phy_name(phy)); /* * 11. If the frequency will result in a change to the voltage @@ -867,8 +663,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder, void intel_mpllb_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); i915_reg_t enable_reg = (phy <= PHY_D ? DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0)); @@ -881,21 +677,21 @@ void intel_mpllb_disable(struct intel_encoder *encoder) */ /* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */ - intel_uncore_rmw(&dev_priv->uncore, enable_reg, PLL_ENABLE, 0); + intel_uncore_rmw(&i915->uncore, enable_reg, PLL_ENABLE, 0); /* * 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0". * This will allow the PLL to stop running. */ - intel_uncore_rmw(&dev_priv->uncore, SNPS_PHY_MPLLB_DIV(phy), + intel_uncore_rmw(&i915->uncore, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0); /* * 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment * (dp_txX_ack) that the new transmitter setting request is completed. */ - if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 5)) - DRM_ERROR("Port %c PLL not locked\n", phy_name(phy)); + if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 5)) + drm_err(&i915->drm, "Port %c PLL not locked\n", phy_name(phy)); /* * 6. If the frequency will result in a change to the voltage diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h b/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h new file mode 100644 index 000000000000..0543465aaf14 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_SNPS_PHY_REGS__ +#define __INTEL_SNPS_PHY_REGS__ + +#include "i915_reg_defs.h" + +#define _SNPS_PHY_A_BASE 0x168000 +#define _SNPS_PHY_B_BASE 0x169000 +#define _SNPS_PHY(phy) _PHY(phy, \ + _SNPS_PHY_A_BASE, \ + _SNPS_PHY_B_BASE) +#define _SNPS2(phy, reg) (_SNPS_PHY(phy) - \ + _SNPS_PHY_A_BASE + (reg)) +#define _MMIO_SNPS(phy, reg) _MMIO(_SNPS2(phy, reg)) +#define _MMIO_SNPS_LN(ln, phy, reg) _MMIO(_SNPS2(phy, \ + (reg) + (ln) * 0x10)) + +#define SNPS_PHY_MPLLB_CP(phy) _MMIO_SNPS(phy, 0x168000) +#define SNPS_PHY_MPLLB_CP_INT REG_GENMASK(31, 25) +#define SNPS_PHY_MPLLB_CP_INT_GS REG_GENMASK(23, 17) +#define SNPS_PHY_MPLLB_CP_PROP REG_GENMASK(15, 9) +#define SNPS_PHY_MPLLB_CP_PROP_GS REG_GENMASK(7, 1) + +#define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004) +#define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31) +#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30) +#define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29) +#define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26) +#define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24) +#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16) +#define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10) +#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9) +#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8) +#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5) +#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0) + +#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008) +#define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31) +#define SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN REG_BIT(30) +#define SNPS_PHY_MPLLB_FRACN_DEN REG_GENMASK(15, 0) + +#define SNPS_PHY_MPLLB_FRACN2(phy) _MMIO_SNPS(phy, 0x16800C) +#define SNPS_PHY_MPLLB_FRACN_REM REG_GENMASK(31, 16) +#define SNPS_PHY_MPLLB_FRACN_QUOT REG_GENMASK(15, 0) + +#define SNPS_PHY_MPLLB_SSCEN(phy) _MMIO_SNPS(phy, 0x168014) +#define SNPS_PHY_MPLLB_SSC_EN REG_BIT(31) +#define SNPS_PHY_MPLLB_SSC_UP_SPREAD REG_BIT(30) +#define SNPS_PHY_MPLLB_SSC_PEAK REG_GENMASK(29, 10) + +#define SNPS_PHY_MPLLB_SSCSTEP(phy) _MMIO_SNPS(phy, 0x168018) +#define SNPS_PHY_MPLLB_SSC_STEPSIZE REG_GENMASK(31, 11) + +#define SNPS_PHY_MPLLB_DIV2(phy) _MMIO_SNPS(phy, 0x16801C) +#define SNPS_PHY_MPLLB_HDMI_PIXEL_CLK_DIV REG_GENMASK(19, 18) +#define SNPS_PHY_MPLLB_HDMI_DIV REG_GENMASK(17, 15) +#define SNPS_PHY_MPLLB_REF_CLK_DIV REG_GENMASK(14, 12) +#define SNPS_PHY_MPLLB_MULTIPLIER REG_GENMASK(11, 0) + +#define SNPS_PHY_REF_CONTROL(phy) _MMIO_SNPS(phy, 0x168188) +#define SNPS_PHY_REF_CONTROL_REF_RANGE REG_GENMASK(31, 27) + +#define SNPS_PHY_TX_REQ(phy) _MMIO_SNPS(phy, 0x168200) +#define SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR REG_GENMASK(31, 30) + +#define SNPS_PHY_TX_EQ(ln, phy) _MMIO_SNPS_LN(ln, phy, 0x168300) +#define SNPS_PHY_TX_EQ_MAIN REG_GENMASK(23, 18) +#define SNPS_PHY_TX_EQ_POST REG_GENMASK(15, 10) +#define SNPS_PHY_TX_EQ_PRE REG_GENMASK(7, 2) + +#endif /* __INTEL_SNPS_PHY_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 2357a1301f48..2d71294aaceb 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -53,6 +53,7 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_rect *src = &plane_state->uapi.src; u32 src_x, src_y, src_w, src_h, hsub, vsub; @@ -94,14 +95,14 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) hsub = vsub = max(hsub, vsub); if (src_x % hsub || src_w % hsub) { - DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", - src_x, src_w, hsub, yesno(rotated)); + drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_x, src_w, hsub, yesno(rotated)); return -EINVAL; } if (src_y % vsub || src_h % vsub) { - DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", - src_y, src_h, vsub, yesno(rotated)); + drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_y, src_h, vsub, yesno(rotated)); return -EINVAL; } @@ -313,7 +314,7 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 sprctl = 0; if (crtc_state->gamma_enable) - sprctl |= SP_GAMMA_ENABLE; + sprctl |= SP_PIPE_GAMMA_ENABLE; return sprctl; } @@ -436,9 +437,9 @@ vlv_sprite_update_noarm(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), - (crtc_y << 16) | crtc_x); + SP_POS_Y(crtc_y) | SP_POS_X(crtc_x)); intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), - ((crtc_h - 1) << 16) | (crtc_w - 1)); + SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -479,7 +480,8 @@ vlv_sprite_update_arm(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); - intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x); + intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), + SP_OFFSET_Y(y) | SP_OFFSET_X(x)); /* * The control register self-arms if the plane was previously @@ -700,7 +702,7 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 sprctl = 0; if (crtc_state->gamma_enable) - sprctl |= SPRITE_GAMMA_ENABLE; + sprctl |= SPRITE_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) sprctl |= SPRITE_PIPE_CSC_ENABLE; @@ -770,7 +772,7 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, } if (!ivb_need_sprite_gamma(plane_state)) - sprctl |= SPRITE_INT_GAMMA_DISABLE; + sprctl |= SPRITE_PLANE_GAMMA_DISABLE; if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709; @@ -863,14 +865,18 @@ ivb_sprite_update_noarm(struct intel_plane *plane, unsigned long irqflags; if (crtc_w != src_w || crtc_h != src_h) - sprscale = SPRITE_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); + sprscale = SPRITE_SCALE_ENABLE | + SPRITE_SRC_WIDTH(src_w - 1) | + SPRITE_SRC_HEIGHT(src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); - intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, SPRSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); + intel_de_write_fw(dev_priv, SPRPOS(pipe), + SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x)); + intel_de_write_fw(dev_priv, SPRSIZE(pipe), + SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1)); if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); @@ -907,10 +913,12 @@ ivb_sprite_update_arm(struct intel_plane *plane, /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - intel_de_write_fw(dev_priv, SPROFFSET(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, SPROFFSET(pipe), + SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } else { intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset); - intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), + SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } /* @@ -1047,7 +1055,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 dvscntr = 0; if (crtc_state->gamma_enable) - dvscntr |= DVS_GAMMA_ENABLE; + dvscntr |= DVS_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) dvscntr |= DVS_PIPE_CSC_ENABLE; @@ -1199,14 +1207,18 @@ g4x_sprite_update_noarm(struct intel_plane *plane, unsigned long irqflags; if (crtc_w != src_w || crtc_h != src_h) - dvsscale = DVS_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); + dvsscale = DVS_SCALE_ENABLE | + DVS_SRC_WIDTH(src_w - 1) | + DVS_SRC_HEIGHT(src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); - intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, DVSSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); + intel_de_write_fw(dev_priv, DVSPOS(pipe), + DVS_POS_Y(crtc_y) | DVS_POS_X(crtc_x)); + intel_de_write_fw(dev_priv, DVSSIZE(pipe), + DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1)); intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -1321,6 +1333,7 @@ static int g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; const struct drm_rect *src = &plane_state->uapi.src; const struct drm_rect *dst = &plane_state->uapi.dst; @@ -1346,7 +1359,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { if (src_h & 1) { - DRM_DEBUG_KMS("Source height must be even with interlaced modes\n"); + drm_dbg_kms(&i915->drm, "Source height must be even with interlaced modes\n"); return -EINVAL; } min_height = 6; @@ -1358,20 +1371,20 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, if (src_w < min_width || src_h < min_height || src_w > 2048 || src_h > 2048) { - DRM_DEBUG_KMS("Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n", - src_w, src_h, min_width, min_height, 2048, 2048); + drm_dbg_kms(&i915->drm, "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n", + src_w, src_h, min_width, min_height, 2048, 2048); return -EINVAL; } if (width_bytes > 4096) { - DRM_DEBUG_KMS("Fetch width (%d) exceeds hardware max with scaling (%u)\n", - width_bytes, 4096); + drm_dbg_kms(&i915->drm, "Fetch width (%d) exceeds hardware max with scaling (%u)\n", + width_bytes, 4096); return -EINVAL; } if (stride > 4096) { - DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n", - stride, 4096); + drm_dbg_kms(&i915->drm, "Stride (%u) exceeds hardware max with scaling (%u)\n", + stride, 4096); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 40faa18947c9..fc037c027ea5 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -4,10 +4,12 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_dp_mst.h" #include "intel_tc.h" +#include "intel_tc_phy_regs.h" static const char *tc_port_mode_name(enum tc_port_mode mode) { @@ -345,10 +347,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; - val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx)); + val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, assuming not complete\n", @@ -690,6 +693,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_encoder *encoder = &dig_port->base; + intel_wakeref_t tc_cold_wref; + enum intel_display_power_domain domain; int active_links = 0; mutex_lock(&dig_port->tc_lock); @@ -701,12 +706,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); - if (active_links) { - enum intel_display_power_domain domain; - intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain); - dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + tc_cold_wref = tc_cold_block(dig_port, &domain); + dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + if (active_links) { if (!icl_tc_phy_is_connected(dig_port)) drm_dbg_kms(&i915->drm, "Port %s: PHY disconnected with %d active link(s)\n", @@ -715,10 +719,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); - - tc_cold_unblock(dig_port, domain, tc_cold_wref); + } else { + /* + * TBT-alt is the default mode in any case the PHY ownership is not + * held (regardless of the sink's connected live state), so + * we'll just switch to disconnected mode from it here without + * a note. + */ + if (dig_port->tc_mode != TC_PORT_TBT_ALT) + drm_dbg_kms(&i915->drm, + "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", + dig_port->tc_port_name, + tc_port_mode_name(dig_port->tc_mode)); + icl_tc_phy_disconnect(dig_port); } + tc_cold_unblock(dig_port, domain, tc_cold_wref); + drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); diff --git a/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h new file mode 100644 index 000000000000..5a545086f959 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_TC_PHY_REGS__ +#define __INTEL_TC_PHY_REGS__ + +#include "i915_reg_defs.h" + +#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \ + _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) + +#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C +#define MG_TX1_LINK_PARAMS(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX1LN1_PORT1) + +#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC +#define MG_TX2_LINK_PARAMS(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX2LN1_PORT1) +#define CRI_USE_FS32 (1 << 5) + +#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C +#define MG_TX1_PISO_READLOAD(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX1LN1_PORT1) + +#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC +#define MG_TX2_PISO_READLOAD(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX2LN1_PORT1) +#define CRI_CALCINIT (1 << 1) + +#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 +#define MG_TX1_SWINGCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ + MG_TX_SWINGCTRL_TX1LN0_PORT2, \ + MG_TX_SWINGCTRL_TX1LN1_PORT1) + +#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 +#define MG_TX2_SWINGCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ + MG_TX_SWINGCTRL_TX2LN0_PORT2, \ + MG_TX_SWINGCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) +#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) + +#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 +#define MG_TX1_DRVCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ + MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ + MG_TX_DRVCTRL_TX1LN1_TXPORT1) + +#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 +#define MG_TX2_DRVCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ + MG_TX_DRVCTRL_TX2LN0_PORT2, \ + MG_TX_DRVCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) +#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) +#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) +#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) +#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) +#define CRI_LOADGEN_SEL(x) ((x) << 12) +#define CRI_LOADGEN_SEL_MASK (0x3 << 12) + +#define MG_CLKHUB_LN0_PORT1 0x16839C +#define MG_CLKHUB_LN1_PORT1 0x16879C +#define MG_CLKHUB_LN0_PORT2 0x16939C +#define MG_CLKHUB_LN1_PORT2 0x16979C +#define MG_CLKHUB(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \ + MG_CLKHUB_LN0_PORT2, \ + MG_CLKHUB_LN1_PORT1) +#define CFG_LOW_RATE_LKREN_EN (1 << 11) + +#define MG_TX_DCC_TX1LN0_PORT1 0x168110 +#define MG_TX_DCC_TX1LN1_PORT1 0x168510 +#define MG_TX_DCC_TX1LN0_PORT2 0x169110 +#define MG_TX_DCC_TX1LN1_PORT2 0x169510 +#define MG_TX1_DCC(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \ + MG_TX_DCC_TX1LN0_PORT2, \ + MG_TX_DCC_TX1LN1_PORT1) +#define MG_TX_DCC_TX2LN0_PORT1 0x168090 +#define MG_TX_DCC_TX2LN1_PORT1 0x168490 +#define MG_TX_DCC_TX2LN0_PORT2 0x169090 +#define MG_TX_DCC_TX2LN1_PORT2 0x169490 +#define MG_TX2_DCC(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \ + MG_TX_DCC_TX2LN0_PORT2, \ + MG_TX_DCC_TX2LN1_PORT1) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) + +#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0 +#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0 +#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0 +#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0 +#define MG_DP_MODE(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \ + MG_DP_MODE_LN0_ACU_PORT2, \ + MG_DP_MODE_LN1_ACU_PORT1) +#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) +#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) + +#define FIA1_BASE 0x163000 +#define FIA2_BASE 0x16E000 +#define FIA3_BASE 0x16F000 +#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) +#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) + +/* ICL PHY DFLEX registers */ +#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) +#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx))) + +#define _MG_REFCLKIN_CTL_PORT1 0x16892C +#define _MG_REFCLKIN_CTL_PORT2 0x16992C +#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) +#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) +#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \ + _MG_REFCLKIN_CTL_PORT1, \ + _MG_REFCLKIN_CTL_PORT2) + +#define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8 +#define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8 +#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) +#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) +#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) +#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) +#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \ + _MG_CLKTOP2_CORECLKCTL1_PORT1, \ + _MG_CLKTOP2_CORECLKCTL1_PORT2) + +#define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4 +#define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4 +#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) +#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) +#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) +#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8 +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) +#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \ + _MG_CLKTOP2_HSCLKCTL_PORT1, \ + _MG_CLKTOP2_HSCLKCTL_PORT2) + +#define _MG_PLL_DIV0_PORT1 0x168A00 +#define _MG_PLL_DIV0_PORT2 0x169A00 +#define MG_PLL_DIV0_FRACNEN_H (1 << 30) +#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8) +#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8 +#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8) +#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0) +#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0) +#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \ + _MG_PLL_DIV0_PORT2) + +#define _MG_PLL_DIV1_PORT1 0x168A04 +#define _MG_PLL_DIV1_PORT2 0x169A04 +#define MG_PLL_DIV1_IREF_NDIVRATIO(x) ((x) << 16) +#define MG_PLL_DIV1_DITHER_DIV_1 (0 << 12) +#define MG_PLL_DIV1_DITHER_DIV_2 (1 << 12) +#define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12) +#define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12) +#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4) +#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0) +#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0) +#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \ + _MG_PLL_DIV1_PORT2) + +#define _MG_PLL_LF_PORT1 0x168A08 +#define _MG_PLL_LF_PORT2 0x169A08 +#define MG_PLL_LF_TDCTARGETCNT(x) ((x) << 24) +#define MG_PLL_LF_AFCCNTSEL_256 (0 << 20) +#define MG_PLL_LF_AFCCNTSEL_512 (1 << 20) +#define MG_PLL_LF_GAINCTRL(x) ((x) << 16) +#define MG_PLL_LF_INT_COEFF(x) ((x) << 8) +#define MG_PLL_LF_PROP_COEFF(x) ((x) << 0) +#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \ + _MG_PLL_LF_PORT2) + +#define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C +#define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C +#define MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 (1 << 18) +#define MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 (1 << 16) +#define MG_PLL_FRAC_LOCK_LOCKTHRESH(x) ((x) << 11) +#define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10) +#define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8) +#define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0) +#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \ + _MG_PLL_FRAC_LOCK_PORT1, \ + _MG_PLL_FRAC_LOCK_PORT2) + +#define _MG_PLL_SSC_PORT1 0x168A10 +#define _MG_PLL_SSC_PORT2 0x169A10 +#define MG_PLL_SSC_EN (1 << 28) +#define MG_PLL_SSC_TYPE(x) ((x) << 26) +#define MG_PLL_SSC_STEPLENGTH(x) ((x) << 16) +#define MG_PLL_SSC_STEPNUM(x) ((x) << 10) +#define MG_PLL_SSC_FLLEN (1 << 9) +#define MG_PLL_SSC_STEPSIZE(x) ((x) << 0) +#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \ + _MG_PLL_SSC_PORT2) + +#define _MG_PLL_BIAS_PORT1 0x168A14 +#define _MG_PLL_BIAS_PORT2 0x169A14 +#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) +#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) +#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) +#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24) +#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) +#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16) +#define MG_PLL_BIAS_BIASCAL_EN (1 << 15) +#define MG_PLL_BIAS_CTRIM(x) ((x) << 8) +#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8) +#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) +#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) +#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) +#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) +#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \ + _MG_PLL_BIAS_PORT2) + +#define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18 +#define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18 +#define MG_PLL_TDC_COLDST_IREFINT_EN (1 << 27) +#define MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x) ((x) << 17) +#define MG_PLL_TDC_COLDST_COLDSTART (1 << 16) +#define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2) +#define MG_PLL_TDC_TDCSEL(x) ((x) << 0) +#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \ + _MG_PLL_TDC_COLDST_BIAS_PORT1, \ + _MG_PLL_TDC_COLDST_BIAS_PORT2) + +#endif /* __INTEL_TC_PHY_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index f043d85ba64d..b9397d9363c5 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -162,6 +162,14 @@ struct bdb_general_features { u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ u8 dp_ssc_dongle_supported:1; u8 rsvd11:2; /* finish byte */ + + /* bits 6 */ + u8 tc_hpd_retry_timeout:7; /* 242 */ + u8 rsvd12:1; + + /* bits 7 */ + u8 afc_startup_config:2;/* 249 */ + u8 rsvd13:6; } __packed; /* @@ -226,32 +234,6 @@ struct bdb_general_features { #define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) #define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) -/* - * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the - * system, the other bits may or may not be set for eDP outputs. - */ -#define DEVICE_TYPE_eDP_BITS \ - (DEVICE_TYPE_INTERNAL_CONNECTOR | \ - DEVICE_TYPE_MIPI_OUTPUT | \ - DEVICE_TYPE_COMPOSITE_OUTPUT | \ - DEVICE_TYPE_DUAL_CHANNEL | \ - DEVICE_TYPE_LVDS_SIGNALING | \ - DEVICE_TYPE_TMDS_DVI_SIGNALING | \ - DEVICE_TYPE_VIDEO_SIGNALING | \ - DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ - DEVICE_TYPE_ANALOG_OUTPUT) - -#define DEVICE_TYPE_DP_DUAL_MODE_BITS \ - (DEVICE_TYPE_INTERNAL_CONNECTOR | \ - DEVICE_TYPE_MIPI_OUTPUT | \ - DEVICE_TYPE_COMPOSITE_OUTPUT | \ - DEVICE_TYPE_LVDS_SIGNALING | \ - DEVICE_TYPE_TMDS_DVI_SIGNALING | \ - DEVICE_TYPE_VIDEO_SIGNALING | \ - DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ - DEVICE_TYPE_DIGITAL_OUTPUT | \ - DEVICE_TYPE_ANALOG_OUTPUT) - #define DEVICE_CFG_NONE 0x00 #define DEVICE_CFG_12BIT_DVOB 0x01 #define DEVICE_CFG_12BIT_DVOC 0x02 diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 9b05f93ed8bc..545eff5bf158 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -341,19 +341,14 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - enum pipe pipe = crtc->pipe; if (!INTEL_INFO(i915)->display.has_dsc) return false; - /* On TGL, DSC is supported on all Pipes */ if (DISPLAY_VER(i915) >= 12) return true; - if (DISPLAY_VER(i915) >= 11 && - (pipe != PIPE_A || cpu_transcoder == TRANSCODER_EDP || - cpu_transcoder == TRANSCODER_DSI_0 || - cpu_transcoder == TRANSCODER_DSI_1)) + if (DISPLAY_VER(i915) >= 11 && cpu_transcoder != TRANSCODER_A) return true; return false; @@ -1112,18 +1107,6 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2; } -struct intel_crtc * -intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc) -{ - return intel_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1); -} - -static struct intel_crtc * -intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc) -{ - return intel_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1); -} - void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1131,7 +1114,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) u32 dss_ctl1_val = 0; if (crtc_state->bigjoiner && !crtc_state->dsc.compression_enable) { - if (crtc_state->bigjoiner_slave) + if (intel_crtc_is_bigjoiner_slave(crtc_state)) dss_ctl1_val |= UNCOMPRESSED_JOINER_SLAVE; else dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER; @@ -1159,7 +1142,7 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state) } if (crtc_state->bigjoiner) { dss_ctl1_val |= BIG_JOINER_ENABLE; - if (!crtc_state->bigjoiner_slave) + if (!intel_crtc_is_bigjoiner_slave(crtc_state)) dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE; } intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val); @@ -1179,25 +1162,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) } } -void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 dss_ctl1; - - dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder)); - if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) { - crtc_state->bigjoiner = true; - crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc); - drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc); - } else if (dss_ctl1 & UNCOMPRESSED_JOINER_SLAVE) { - crtc_state->bigjoiner = true; - crtc_state->bigjoiner_slave = true; - crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc); - drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc); - } -} - void intel_dsc_get_config(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1228,18 +1192,6 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state) crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) && (dss_ctl1 & JOINER_ENABLE); - if (dss_ctl1 & BIG_JOINER_ENABLE) { - crtc_state->bigjoiner = true; - - if (!(dss_ctl1 & MASTER_BIG_JOINER_ENABLE)) { - crtc_state->bigjoiner_slave = true; - crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_primary(crtc); - } else { - crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc); - } - drm_WARN_ON(&dev_priv->drm, !crtc_state->bigjoiner_linked_crtc); - } - /* FIXME: add more state readout as needed */ /* PPS1 */ diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index 4ec75f715986..8763f00fa7e2 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -18,7 +18,6 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) void intel_dsc_enable(const struct intel_crtc_state *crtc_state); void intel_dsc_disable(const struct intel_crtc_state *crtc_state); int intel_dsc_compute_params(struct intel_crtc_state *pipe_config); -void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state); void intel_dsc_get_config(struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder); diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index fa779f7ea415..b5d058404c14 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -7,6 +7,7 @@ #include <linux/vgaarb.h> #include <drm/i915_drm.h> +#include <video/vga.h> #include "i915_drv.h" #include "intel_de.h" @@ -34,9 +35,9 @@ void intel_vga_disable(struct drm_i915_private *dev_priv) /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); - outb(SR01, VGA_SR_INDEX); - sr1 = inb(VGA_SR_DATA); - outb(sr1 | 1 << 5, VGA_SR_DATA); + outb(0x01, VGA_SEQ_I); + sr1 = inb(VGA_SEQ_D); + outb(sr1 | VGA_SR01_SCREEN_OFF, VGA_SEQ_D); vga_put(pdev, VGA_RSRC_LEGACY_IO); udelay(300); @@ -92,7 +93,7 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915) * and error messages. */ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); - outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); + outb(inb(VGA_MIS_R), VGA_MIS_W); vga_put(pdev, VGA_RSRC_LEGACY_IO); } diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 93a385396512..1223075595ff 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -961,6 +961,7 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, static u32 skl_surf_address(const struct intel_plane_state *plane_state, int color_plane) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; u32 offset = plane_state->view.color_plane[color_plane].offset; @@ -969,11 +970,11 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, * The DPT object contains only one vma, so the VMA's offset * within the DPT is always 0. */ - WARN_ON(plane_state->dpt_vma->node.start); - WARN_ON(offset & 0x1fffff); + drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start); + drm_WARN_ON(&i915->drm, offset & 0x1fffff); return offset >> 9; } else { - WARN_ON(offset & 0xfff); + drm_WARN_ON(&i915->drm, offset & 0xfff); return offset; } } @@ -992,6 +993,54 @@ static u32 skl_plane_surf(const struct intel_plane_state *plane_state, return plane_surf; } +static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = skl_main_to_aux_plane(fb, color_plane); + u32 aux_dist; + + if (!aux_plane) + return 0; + + aux_dist = skl_surf_address(plane_state, aux_plane) - + skl_surf_address(plane_state, color_plane); + + if (DISPLAY_VER(i915) < 12) + aux_dist |= PLANE_AUX_STRIDE(skl_plane_stride(plane_state, aux_plane)); + + return aux_dist; +} + +static u32 skl_plane_keyval(const struct intel_plane_state *plane_state) +{ + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + + return key->min_value; +} + +static u32 skl_plane_keymax(const struct intel_plane_state *plane_state) +{ + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u8 alpha = plane_state->hw.alpha >> 8; + + return (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); +} + +static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state) +{ + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u8 alpha = plane_state->hw.alpha >> 8; + u32 keymsk; + + keymsk = key->channel_mask & 0x7ffffff; + if (alpha < 0xff) + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; + + return keymsk; +} + static void icl_plane_csc_load_black(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); @@ -1016,15 +1065,24 @@ static void icl_plane_csc_load_black(struct intel_plane *plane) intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } +static int skl_plane_color_plane(const struct intel_plane_state *plane_state) +{ + /* Program the UV plane on planar master */ + if (plane_state->planar_linked_plane && !plane_state->planar_slave) + return 1; + else + return 0; +} + static void -skl_program_plane_noarm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +skl_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; + int color_plane = skl_plane_color_plane(plane_state); u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; int crtc_x = plane_state->uapi.dst.x1; @@ -1048,11 +1106,12 @@ skl_program_plane_noarm(struct intel_plane *plane, if (plane_state->force_black) icl_plane_csc_load_black(plane); - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); + intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - (crtc_y << 16) | crtc_x); + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - ((src_h - 1) << 16) | (src_w - 1)); + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), @@ -1076,21 +1135,17 @@ skl_program_plane_noarm(struct intel_plane *plane, } static void -skl_program_plane_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +skl_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - const struct drm_framebuffer *fb = plane_state->hw.fb; - int aux_plane = skl_main_to_aux_plane(fb, color_plane); + int color_plane = skl_plane_color_plane(plane_state); u32 x = plane_state->view.color_plane[color_plane].x; u32 y = plane_state->view.color_plane[color_plane].y; - u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0; - u8 alpha = plane_state->hw.alpha >> 8; + u32 plane_color_ctl = 0; u32 plane_ctl = plane_state->ctl; unsigned long irqflags; @@ -1100,36 +1155,22 @@ skl_program_plane_arm(struct intel_plane *plane, plane_color_ctl = plane_state->color_ctl | glk_plane_color_ctl_crtc(crtc_state); - keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - - keymsk = key->channel_mask & 0x7ffffff; - if (alpha < 0xff) - keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; - - if (aux_plane) { - aux_dist = skl_surf_address(plane_state, aux_plane) - - skl_surf_address(plane_state, color_plane); - - if (DISPLAY_VER(dev_priv) < 12) - aux_dist |= skl_plane_stride(plane_state, aux_plane); - } - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), - key->min_value); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax); + intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); + intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); + intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - (y << 16) | x); + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, color_plane)); if (DISPLAY_VER(dev_priv) < 11) intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), - (plane_state->view.color_plane[1].y << 16) | - plane_state->view.color_plane[1].x); + PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | + PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); if (DISPLAY_VER(dev_priv) >= 10) intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); @@ -1182,34 +1223,6 @@ skl_plane_async_flip(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void -skl_plane_update_noarm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - int color_plane = 0; - - if (plane_state->planar_linked_plane && !plane_state->planar_slave) - /* Program the UV plane on planar master */ - color_plane = 1; - - skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane); -} - -static void -skl_plane_update_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - int color_plane = 0; - - if (plane_state->planar_linked_plane && !plane_state->planar_slave) - /* Program the UV plane on planar master */ - color_plane = 1; - - skl_program_plane_arm(plane, crtc_state, plane_state, color_plane); -} - static bool intel_format_is_p01x(u32 format) { switch (format) { @@ -1338,6 +1351,7 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; @@ -1347,7 +1361,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { - DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); + drm_dbg_kms(&i915->drm, "src width must be multiple of 4 for rotated planar YUV\n"); return -EINVAL; } @@ -1816,20 +1830,27 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, return 0; } +static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) +{ + return pipe - PIPE_A + INTEL_FBC_A; +} + static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) + enum intel_fbc_id fbc_id, enum plane_id plane_id) { - if (!HAS_FBC(dev_priv)) + if ((INTEL_INFO(dev_priv)->display.fbc_mask & BIT(fbc_id)) == 0) return false; - return pipe == PIPE_A && plane_id == PLANE_PRIMARY; + return plane_id == PLANE_PRIMARY; } static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { - if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) - return dev_priv->fbc; + enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe); + + if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id)) + return dev_priv->fbc[fbc_id]; else return NULL; } @@ -2282,16 +2303,17 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); if (DISPLAY_VER(dev_priv) >= 11) - pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; + pixel_format = val & PLANE_CTL_FORMAT_MASK_ICL; else - pixel_format = val & PLANE_CTL_FORMAT_MASK; + pixel_format = val & PLANE_CTL_FORMAT_MASK_SKL; if (DISPLAY_VER(dev_priv) >= 10) { - alpha = intel_de_read(dev_priv, - PLANE_COLOR_CTL(pipe, plane_id)); - alpha &= PLANE_COLOR_ALPHA_MASK; + u32 color_ctl; + + color_ctl = intel_de_read(dev_priv, PLANE_COLOR_CTL(pipe, plane_id)); + alpha = REG_FIELD_GET(PLANE_COLOR_ALPHA_MASK, color_ctl); } else { - alpha = val & PLANE_CTL_ALPHA_MASK; + alpha = REG_FIELD_GET(PLANE_CTL_ALPHA_MASK, val); } fourcc = skl_format_to_fourcc(pixel_format, @@ -2355,22 +2377,19 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, if (drm_rotation_90_or_270(plane_config->rotation)) goto error; - base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; + base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & PLANE_SURF_ADDR_MASK; plane_config->base = base; offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); - fb->height = ((val >> 16) & 0xffff) + 1; - fb->width = ((val >> 0) & 0xffff) + 1; + fb->height = REG_FIELD_GET(PLANE_HEIGHT_MASK, val) + 1; + fb->width = REG_FIELD_GET(PLANE_WIDTH_MASK, val) + 1; val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); - if (DISPLAY_VER(dev_priv) >= 13) - fb->pitches[0] = (val & PLANE_STRIDE_MASK_XELPD) * stride_mult; - else - fb->pitches[0] = (val & PLANE_STRIDE_MASK) * stride_mult; + fb->pitches[0] = REG_FIELD_GET(PLANE_STRIDE__MASK, val) * stride_mult; aligned_height = intel_fb_align_height(fb, 0, fb->height); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 20141f33ed64..0d936f658b3f 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -44,6 +44,7 @@ #include "skl_scaler.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" +#include "vlv_dsi_regs.h" #include "vlv_sideband.h" /* return pixels in terms of txbyteclkhs */ @@ -1492,7 +1493,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, */ if (is_vid_mode(intel_dsi) && - intel_dsi->video_mode_format == VIDEO_MODE_BURST) { + intel_dsi->video_mode == BURST_MODE) { intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port), txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1); } else { @@ -1568,12 +1569,33 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port), intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); - if (is_vid_mode(intel_dsi)) - /* Some panels might have resolution which is not a + if (is_vid_mode(intel_dsi)) { + u32 fmt = intel_dsi->video_frmt_cfg_bits | IP_TG_CONFIG; + + /* + * Some panels might have resolution which is not a * multiple of 64 like 1366 x 768. Enable RANDOM - * resolution support for such panels by default */ - intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), - intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION); + * resolution support for such panels by default. + */ + fmt |= RANDOM_DPI_DISPLAY_RESOLUTION; + + switch (intel_dsi->video_mode) { + default: + MISSING_CASE(intel_dsi->video_mode); + fallthrough; + case NON_BURST_SYNC_EVENTS: + fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS; + break; + case NON_BURST_SYNC_PULSE: + fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE; + break; + case BURST_MODE: + fmt |= VIDEO_MODE_BURST; + break; + } + + intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt); + } } } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 1b81797dd02e..df880f44700a 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -32,6 +32,7 @@ #include "intel_display_types.h" #include "intel_dsi.h" #include "vlv_dsi_pll.h" +#include "vlv_dsi_pll_regs.h" #include "vlv_sideband.h" static const u16 lfsr_converts[] = { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h new file mode 100644 index 000000000000..45590e14e54b --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __VLV_DSI_PLL_REGS_H__ +#define __VLV_DSI_PLL_REGS_H__ + +#include "vlv_dsi_regs.h" + +#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) +#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF +#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) +#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF + +#define BXT_MAX_VAR_OUTPUT_KHZ 39500 + +#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090) +#define BXT_MIPI1_DIV_SHIFT 26 +#define BXT_MIPI2_DIV_SHIFT 10 +#define BXT_MIPI_DIV_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \ + BXT_MIPI2_DIV_SHIFT) + +/* TX control divider to select actual TX clock output from (8x/var) */ +#define BXT_MIPI1_TX_ESCLK_SHIFT 26 +#define BXT_MIPI2_TX_ESCLK_SHIFT 10 +#define BXT_MIPI_TX_ESCLK_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \ + BXT_MIPI2_TX_ESCLK_SHIFT) +#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26) +#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10) +#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \ + _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \ + BXT_MIPI2_TX_ESCLK_FIXDIV_MASK) +#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \ + (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port)) +/* RX upper control divider to select actual RX clock output from 8x */ +#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21 +#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5 +#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \ + BXT_MIPI2_RX_ESCLK_UPPER_SHIFT) +#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21) +#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5) +#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \ + BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK) +#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \ + (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port)) +/* 8/3X divider to select the actual 8/3X clock output from 8x */ +#define BXT_MIPI1_8X_BY3_SHIFT 19 +#define BXT_MIPI2_8X_BY3_SHIFT 3 +#define BXT_MIPI_8X_BY3_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \ + BXT_MIPI2_8X_BY3_SHIFT) +#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19) +#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3) +#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \ + _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \ + BXT_MIPI2_8X_BY3_DIVIDER_MASK) +#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \ + (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port)) +/* RX lower control divider to select actual RX clock output from 8x */ +#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16 +#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0 +#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \ + BXT_MIPI2_RX_ESCLK_LOWER_SHIFT) +#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16) +#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0) +#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \ + BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK) +#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \ + (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port)) + +#define RX_DIVIDER_BIT_1_2 0x3 +#define RX_DIVIDER_BIT_3_4 0xC + +#define BXT_DSI_PLL_CTL _MMIO(0x161000) +#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16 +#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT) +#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT) +#define BXT_DSIC_16X_BY1 (0 << 10) +#define BXT_DSIC_16X_BY2 (1 << 10) +#define BXT_DSIC_16X_BY3 (2 << 10) +#define BXT_DSIC_16X_BY4 (3 << 10) +#define BXT_DSIC_16X_MASK (3 << 10) +#define BXT_DSIA_16X_BY1 (0 << 8) +#define BXT_DSIA_16X_BY2 (1 << 8) +#define BXT_DSIA_16X_BY3 (2 << 8) +#define BXT_DSIA_16X_BY4 (3 << 8) +#define BXT_DSIA_16X_MASK (3 << 8) +#define BXT_DSI_FREQ_SEL_SHIFT 8 +#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT) + +#define BXT_DSI_PLL_RATIO_MAX 0x7D +#define BXT_DSI_PLL_RATIO_MIN 0x22 +#define GLK_DSI_PLL_RATIO_MAX 0x6F +#define GLK_DSI_PLL_RATIO_MIN 0x22 +#define BXT_DSI_PLL_RATIO_MASK 0xFF +#define BXT_REF_CLOCK_KHZ 19200 + +#define BXT_DSI_PLL_ENABLE _MMIO(0x46080) +#define BXT_DSI_PLL_DO_ENABLE (1 << 31) +#define BXT_DSI_PLL_LOCKED (1 << 30) + +#endif /* __VLV_DSI_PLL_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h new file mode 100644 index 000000000000..356e51515346 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h @@ -0,0 +1,480 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __VLV_DSI_REGS_H__ +#define __VLV_DSI_REGS_H__ + +#include "i915_reg_defs.h" + +#define VLV_MIPI_BASE VLV_DISPLAY_BASE +#define BXT_MIPI_BASE 0x60000 + +#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ +#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) + +/* BXT MIPI mode configure */ +#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 +#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8 +#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \ + _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE) + +#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC +#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC +#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \ + _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE) + +#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100 +#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900 +#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \ + _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL) + +#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) +#define STAP_SELECT (1 << 0) + +#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054) +#define HS_IO_CTRL_SELECT (1 << 0) + +#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) +#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) +#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) + + /* BXT port control */ +#define _BXT_MIPIA_PORT_CTRL 0x6B0C0 +#define _BXT_MIPIC_PORT_CTRL 0x6B8C0 +#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) + +#define DPI_ENABLE (1 << 31) /* A + C */ +#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 +#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) +#define DUAL_LINK_MODE_SHIFT 26 +#define DUAL_LINK_MODE_MASK (1 << 26) +#define DUAL_LINK_MODE_FRONT_BACK (0 << 26) +#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26) +#define DITHERING_ENABLE (1 << 25) /* A + C */ +#define FLOPPED_HSTX (1 << 23) +#define DE_INVERT (1 << 19) /* XXX */ +#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18 +#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18) +#define AFE_LATCHOUT (1 << 17) +#define LP_OUTPUT_HOLD (1 << 16) +#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15 +#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15) +#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11 +#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11) +#define CSB_SHIFT 9 +#define CSB_MASK (3 << 9) +#define CSB_20MHZ (0 << 9) +#define CSB_10MHZ (1 << 9) +#define CSB_40MHZ (2 << 9) +#define BANDGAP_MASK (1 << 8) +#define BANDGAP_PNW_CIRCUIT (0 << 8) +#define BANDGAP_LNC_CIRCUIT (1 << 8) +#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5 +#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5) +#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */ +#define TEARING_EFFECT_SHIFT 2 /* A + C */ +#define TEARING_EFFECT_MASK (3 << 2) +#define TEARING_EFFECT_OFF (0 << 2) +#define TEARING_EFFECT_DSI (1 << 2) +#define TEARING_EFFECT_GPIO (2 << 2) +#define LANE_CONFIGURATION_SHIFT 0 +#define LANE_CONFIGURATION_MASK (3 << 0) +#define LANE_CONFIGURATION_4LANE (0 << 0) +#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0) +#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) + +#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) +#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) +#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL) +#define TEARING_EFFECT_DELAY_SHIFT 0 +#define TEARING_EFFECT_DELAY_MASK (0xffff << 0) + +/* XXX: all bits reserved */ +#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) + +/* MIPI DSI Controller and D-PHY registers */ + +#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) +#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) +#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY) +#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ +#define ULPS_STATE_MASK (3 << 1) +#define ULPS_STATE_ENTER (2 << 1) +#define ULPS_STATE_EXIT (1 << 1) +#define ULPS_STATE_NORMAL_OPERATION (0 << 1) +#define DEVICE_READY (1 << 0) + +#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) +#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) +#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT) +#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) +#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) +#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN) +#define TEARING_EFFECT (1 << 31) +#define SPL_PKT_SENT_INTERRUPT (1 << 30) +#define GEN_READ_DATA_AVAIL (1 << 29) +#define LP_GENERIC_WR_FIFO_FULL (1 << 28) +#define HS_GENERIC_WR_FIFO_FULL (1 << 27) +#define RX_PROT_VIOLATION (1 << 26) +#define RX_INVALID_TX_LENGTH (1 << 25) +#define ACK_WITH_NO_ERROR (1 << 24) +#define TURN_AROUND_ACK_TIMEOUT (1 << 23) +#define LP_RX_TIMEOUT (1 << 22) +#define HS_TX_TIMEOUT (1 << 21) +#define DPI_FIFO_UNDERRUN (1 << 20) +#define LOW_CONTENTION (1 << 19) +#define HIGH_CONTENTION (1 << 18) +#define TXDSI_VC_ID_INVALID (1 << 17) +#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16) +#define TXCHECKSUM_ERROR (1 << 15) +#define TXECC_MULTIBIT_ERROR (1 << 14) +#define TXECC_SINGLE_BIT_ERROR (1 << 13) +#define TXFALSE_CONTROL_ERROR (1 << 12) +#define RXDSI_VC_ID_INVALID (1 << 11) +#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10) +#define RXCHECKSUM_ERROR (1 << 9) +#define RXECC_MULTIBIT_ERROR (1 << 8) +#define RXECC_SINGLE_BIT_ERROR (1 << 7) +#define RXFALSE_CONTROL_ERROR (1 << 6) +#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5) +#define RX_LP_TX_SYNC_ERROR (1 << 4) +#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3) +#define RXEOT_SYNC_ERROR (1 << 2) +#define RXSOT_SYNC_ERROR (1 << 1) +#define RXSOT_ERROR (1 << 0) + +#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) +#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) +#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG) +#define CMD_MODE_DATA_WIDTH_MASK (7 << 13) +#define CMD_MODE_NOT_SUPPORTED (0 << 13) +#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) +#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13) +#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13) +#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13) +#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13) +#define VID_MODE_FORMAT_MASK (0xf << 7) +#define VID_MODE_NOT_SUPPORTED (0 << 7) +#define VID_MODE_FORMAT_RGB565 (1 << 7) +#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7) +#define VID_MODE_FORMAT_RGB666 (3 << 7) +#define VID_MODE_FORMAT_RGB888 (4 << 7) +#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5 +#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5) +#define VID_MODE_CHANNEL_NUMBER_SHIFT 3 +#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3) +#define DATA_LANES_PRG_REG_SHIFT 0 +#define DATA_LANES_PRG_REG_MASK (7 << 0) + +#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) +#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) +#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT) +#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff + +#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) +#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) +#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT) +#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff + +#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) +#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) +#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) +#define TURN_AROUND_TIMEOUT_MASK 0x3f + +#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) +#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) +#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) +#define DEVICE_RESET_TIMER_MASK 0xffff + +#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) +#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) +#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION) +#define VERTICAL_ADDRESS_SHIFT 16 +#define VERTICAL_ADDRESS_MASK (0xffff << 16) +#define HORIZONTAL_ADDRESS_SHIFT 0 +#define HORIZONTAL_ADDRESS_MASK 0xffff + +#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) +#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) +#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) +#define DBI_FIFO_EMPTY_HALF (0 << 0) +#define DBI_FIFO_EMPTY_QUARTER (1 << 0) +#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) + +/* regs below are bits 15:0 */ +#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) +#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) +#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) + +#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) +#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) +#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT) + +#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) +#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) +#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT) + +#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) +#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) +#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) + +#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) +#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) +#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) + +#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) +#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) +#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT) + +#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) +#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) +#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT) + +#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) +#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) +#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) + +#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) +#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) +#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL) +#define DPI_LP_MODE (1 << 6) +#define BACKLIGHT_OFF (1 << 5) +#define BACKLIGHT_ON (1 << 4) +#define COLOR_MODE_OFF (1 << 3) +#define COLOR_MODE_ON (1 << 2) +#define TURN_ON (1 << 1) +#define SHUTDOWN (1 << 0) + +#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) +#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) +#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA) +#define COMMAND_BYTE_SHIFT 0 +#define COMMAND_BYTE_MASK (0x3f << 0) + +#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) +#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) +#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT) +#define MASTER_INIT_TIMER_SHIFT 0 +#define MASTER_INIT_TIMER_MASK (0xffff << 0) + +#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) +#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) +#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \ + _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) +#define MAX_RETURN_PKT_SIZE_SHIFT 0 +#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) + +#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) +#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) +#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) +#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) +#define DISABLE_VIDEO_BTA (1 << 3) +#define IP_TG_CONFIG (1 << 2) +#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0) +#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) +#define VIDEO_MODE_BURST (3 << 0) + +#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) +#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) +#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) +#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) +#define BXT_DPHY_DEFEATURE_EN (1 << 8) +#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) +#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) +#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) +#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4) +#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3) +#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2) +#define CLOCKSTOP (1 << 1) +#define EOT_DISABLE (1 << 0) + +#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) +#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) +#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK) +#define LP_BYTECLK_SHIFT 0 +#define LP_BYTECLK_MASK (0xffff << 0) + +#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4) +#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4) +#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT) + +#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098) +#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898) +#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING) + +/* bits 31:0 */ +#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) +#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) +#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA) + +/* bits 31:0 */ +#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) +#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) +#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA) + +#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) +#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) +#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL) +#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) +#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) +#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL) +#define LONG_PACKET_WORD_COUNT_SHIFT 8 +#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) +#define SHORT_PACKET_PARAM_SHIFT 8 +#define SHORT_PACKET_PARAM_MASK (0xffff << 8) +#define VIRTUAL_CHANNEL_SHIFT 6 +#define VIRTUAL_CHANNEL_MASK (3 << 6) +#define DATA_TYPE_SHIFT 0 +#define DATA_TYPE_MASK (0x3f << 0) +/* data type values, see include/video/mipi_display.h */ + +#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) +#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) +#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT) +#define DPI_FIFO_EMPTY (1 << 28) +#define DBI_FIFO_EMPTY (1 << 27) +#define LP_CTRL_FIFO_EMPTY (1 << 26) +#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25) +#define LP_CTRL_FIFO_FULL (1 << 24) +#define HS_CTRL_FIFO_EMPTY (1 << 18) +#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17) +#define HS_CTRL_FIFO_FULL (1 << 16) +#define LP_DATA_FIFO_EMPTY (1 << 10) +#define LP_DATA_FIFO_HALF_EMPTY (1 << 9) +#define LP_DATA_FIFO_FULL (1 << 8) +#define HS_DATA_FIFO_EMPTY (1 << 2) +#define HS_DATA_FIFO_HALF_EMPTY (1 << 1) +#define HS_DATA_FIFO_FULL (1 << 0) + +#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) +#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) +#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) +#define DBI_HS_LP_MODE_MASK (1 << 0) +#define DBI_LP_MODE (1 << 0) +#define DBI_HS_MODE (0 << 0) + +#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) +#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) +#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM) +#define EXIT_ZERO_COUNT_SHIFT 24 +#define EXIT_ZERO_COUNT_MASK (0x3f << 24) +#define TRAIL_COUNT_SHIFT 16 +#define TRAIL_COUNT_MASK (0x1f << 16) +#define CLK_ZERO_COUNT_SHIFT 8 +#define CLK_ZERO_COUNT_MASK (0xff << 8) +#define PREPARE_COUNT_SHIFT 0 +#define PREPARE_COUNT_MASK (0x3f << 0) + +#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) +#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) +#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL) + +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088) +#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888) +#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) +#define LP_HS_SSW_CNT_SHIFT 16 +#define LP_HS_SSW_CNT_MASK (0xffff << 16) +#define HS_LP_PWR_SW_CNT_SHIFT 0 +#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) + +#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) +#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) +#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) +#define STOP_STATE_STALL_COUNTER_SHIFT 0 +#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) + +#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) +#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) +#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) +#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) +#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) +#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1) +#define RX_CONTENTION_DETECTED (1 << 0) + +/* XXX: only pipe A ?!? */ +#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100) +#define DBI_TYPEC_ENABLE (1 << 31) +#define DBI_TYPEC_WIP (1 << 30) +#define DBI_TYPEC_OPTION_SHIFT 28 +#define DBI_TYPEC_OPTION_MASK (3 << 28) +#define DBI_TYPEC_FREQ_SHIFT 24 +#define DBI_TYPEC_FREQ_MASK (0xf << 24) +#define DBI_TYPEC_OVERRIDE (1 << 8) +#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0 +#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0) + +/* MIPI adapter registers */ + +#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) +#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) +#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL) +#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ +#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) +#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) +#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5) +#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5) +#define READ_REQUEST_PRIORITY_SHIFT 3 +#define READ_REQUEST_PRIORITY_MASK (3 << 3) +#define READ_REQUEST_PRIORITY_LOW (0 << 3) +#define READ_REQUEST_PRIORITY_HIGH (3 << 3) +#define RGB_FLIP_TO_BGR (1 << 2) + +#define BXT_PIPE_SELECT_SHIFT 7 +#define BXT_PIPE_SELECT_MASK (7 << 7) +#define BXT_PIPE_SELECT(pipe) ((pipe) << 7) +#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */ +#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */ +#define GLK_MIPIIO_RESET_RELEASED (1 << 28) +#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */ +#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */ +#define GLK_LP_WAKE (1 << 22) +#define GLK_LP11_LOW_PWR_MODE (1 << 21) +#define GLK_LP00_LOW_PWR_MODE (1 << 20) +#define GLK_FIREWALL_ENABLE (1 << 16) +#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10) +#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10 +#define BXT_DSC_ENABLE (1 << 3) +#define BXT_RGB_FLIP (1 << 2) +#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */ +#define GLK_MIPIIO_ENABLE (1 << 0) + +#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) +#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) +#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS) +#define DATA_MEM_ADDRESS_SHIFT 5 +#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) +#define DATA_VALID (1 << 0) + +#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) +#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) +#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH) +#define DATA_LENGTH_SHIFT 0 +#define DATA_LENGTH_MASK (0xfffff << 0) + +#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) +#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) +#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) +#define COMMAND_MEM_ADDRESS_SHIFT 5 +#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) +#define AUTO_PWG_ENABLE (1 << 2) +#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) +#define COMMAND_VALID (1 << 0) + +#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) +#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) +#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH) +#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ +#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) + +#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) +#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) +#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ + +#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) +#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) +#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) +#define READ_DATA_VALID(n) (1 << (n)) + +#endif /* __VLV_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 8a248003dfae..ce91b23385cf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -4,6 +4,8 @@ * Copyright © 2016 Intel Corporation */ +#include <drm/drm_cache.h> + #include "display/intel_frontbuffer.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index ebbac2ea0833..bc6d59df064d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -67,6 +67,7 @@ #include <linux/log2.h> #include <linux/nospec.h> +#include <drm/drm_cache.h> #include <drm/drm_syncobj.h> #include "gt/gen6_ppgtt.h" @@ -79,6 +80,7 @@ #include "pxp/intel_pxp.h" +#include "i915_file_private.h" #include "i915_gem_context.h" #include "i915_trace.h" #include "i915_user_extensions.h" @@ -343,6 +345,20 @@ static int proto_context_register(struct drm_i915_file_private *fpriv, return ret; } +static struct i915_address_space * +i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) +{ + struct i915_address_space *vm; + + xa_lock(&file_priv->vm_xa); + vm = xa_load(&file_priv->vm_xa, id); + if (vm) + kref_get(&vm->ref); + xa_unlock(&file_priv->vm_xa); + + return vm; +} + static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, const struct drm_i915_gem_context_param *args) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 9402d4bf4ffc..c6eb023d3d86 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -3,12 +3,15 @@ * Copyright © 2020 Intel Corporation */ +#include <drm/drm_fourcc.h> + #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "pxp/intel_pxp.h" #include "i915_drv.h" +#include "i915_gem_create.h" #include "i915_trace.h" #include "i915_user_extensions.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.h b/drivers/gpu/drm/i915/gem/i915_gem_create.h new file mode 100644 index 000000000000..9536aa906001 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_GEM_CREATE_H__ +#define __I915_GEM_CREATE_H__ + +struct drm_file; +struct drm_device; +struct drm_mode_create_dumb; + +int i915_gem_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +#endif /* __I915_GEM_CREATE_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 1b526039a60d..13917231ae81 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -11,6 +11,7 @@ #include <asm/smp.h> +#include "gem/i915_gem_dmabuf.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" @@ -74,7 +75,8 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, kfree(sg); } -static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, + struct iosys_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); void *vaddr; @@ -83,12 +85,13 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } -static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, + struct iosys_map *map) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h new file mode 100644 index 000000000000..6e0405d47ce1 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_GEM_DMABUF_H__ +#define __I915_GEM_DMABUF_H__ + +struct drm_gem_object; +struct drm_device; +struct dma_buf; + +struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags); + +#endif /* __I915_GEM_DMABUF_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 26532c07d467..3e5d6057b3ef 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -9,12 +9,13 @@ #include "i915_drv.h" #include "i915_gem_clflush.h" +#include "i915_gem_domain.h" #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" -#include "i915_gem_object.h" -#include "i915_vma.h" #include "i915_gem_lmem.h" #include "i915_gem_mman.h" +#include "i915_gem_object.h" +#include "i915_vma.h" static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.h b/drivers/gpu/drm/i915/gem/i915_gem_domain.h new file mode 100644 index 000000000000..9622df962bfc --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_GEM_DOMAIN_H__ +#define __I915_GEM_DOMAIN_H__ + +struct drm_i915_gem_object; +enum i915_cache_level; + +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level); + +#endif /* __I915_GEM_DOMAIN_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index ae6805b37806..d42f437149c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -23,9 +23,12 @@ #include "pxp/intel_pxp.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" +#include "i915_file_private.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" +#include "i915_gem_evict.h" #include "i915_gem_ioctls.h" #include "i915_trace.h" #include "i915_user_extensions.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index c5150a1ee3d2..c698f95af15f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -10,6 +10,7 @@ #include "i915_drv.h" #include "i915_gem.h" +#include "i915_gem_internal.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" #include "i915_utils.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.h b/drivers/gpu/drm/i915/gem/i915_gem_internal.h new file mode 100644 index 000000000000..6664e06112fc --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_GEM_INTERNAL_H__ +#define __I915_GEM_INTERNAL_H__ + +#include <linux/types.h> + +struct drm_i915_gem_object; +struct drm_i915_gem_object_ops; +struct drm_i915_private; + +struct drm_i915_gem_object * +i915_gem_object_create_internal(struct drm_i915_private *i915, + phys_addr_t size); +struct drm_i915_gem_object * +__i915_gem_object_create_internal(struct drm_i915_private *i915, + const struct drm_i915_gem_object_ops *ops, + phys_addr_t size); + +#endif /* __I915_GEM_INTERNAL_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 4afad1604a6a..efe69d6b86f4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -9,10 +9,13 @@ #include <linux/pfn_t.h> #include <linux/sizes.h> +#include <drm/drm_cache.h> + #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index e03e362d320b..2d593d573ef1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -24,11 +24,16 @@ #include <linux/sched/mm.h> +#include <drm/drm_cache.h> + #include "display/intel_frontbuffer.h" #include "pxp/intel_pxp.h" + #include "i915_drv.h" +#include "i915_file_private.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" +#include "i915_gem_dmabuf.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" #include "i915_gem_ttm.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 060fe29f5929..183b861620b8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -4,6 +4,8 @@ * Copyright © 2014-2016 Intel Corporation */ +#include <drm/drm_cache.h> + #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 6da68b38f00f..00359ec9d58b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -10,6 +10,7 @@ #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" +#include "i915_driver.h" #include "i915_drv.h" #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 6c57b0a79c8a..4efa821f3cb1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -5,8 +5,11 @@ */ #include <linux/pagevec.h> +#include <linux/shmem_fs.h> #include <linux/swap.h> +#include <drm/drm_cache.h> + #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_gemfs.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 26975d857776..b9c3196b91ca 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -14,7 +14,9 @@ #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_gem_stolen.h" +#include "i915_reg.h" #include "i915_vgpu.h" +#include "intel_mchbar_regs.h" /* * The BIOS typically reserves some of the system's memory for the exclusive diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index 75501db71041..af85d0c28168 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -9,6 +9,7 @@ #include <drm/drm_file.h> #include "i915_drv.h" +#include "i915_file_private.h" #include "i915_gem_context.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index c3d432e314c9..d6adda5bf96b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -12,6 +12,8 @@ #include "i915_gem_ioctls.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" +#include "i915_gem_tiling.h" +#include "i915_reg.h" /** * DOC: buffer object tiling diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.h b/drivers/gpu/drm/i915/gem/i915_gem_tiling.h new file mode 100644 index 000000000000..9924196a8139 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_GEM_TILING_H__ +#define __I915_GEM_TILING_H__ + +#include <linux/types.h> + +struct drm_i915_private; + +u32 i915_gem_fence_size(struct drm_i915_private *i915, u32 size, + unsigned int tiling, unsigned int stride); +u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, + unsigned int tiling, unsigned int stride); + +#endif /* __I915_GEM_TILING_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 1eb2fd81c5b6..8419096d4056 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include <linux/shmem_fs.h> + #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 3cc01c30dd62..6d1a71d6404c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -42,6 +42,7 @@ #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" +#include "i915_gem_userptr.h" #include "i915_scatterlist.h" #ifdef CONFIG_MMU_NOTIFIER diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h new file mode 100644 index 000000000000..8dadb2f8436d --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_GEM_USERPTR_H__ +#define __I915_GEM_USERPTR_H__ + +struct drm_i915_private; + +int i915_gem_init_userptr(struct drm_i915_private *dev_priv); +void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); + +#endif /* __I915_GEM_USERPTR_H__ */ diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index f36191ebf964..8424ee8c5eb8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -8,9 +8,10 @@ #include "i915_selftest.h" -#include "gem/i915_gem_region.h" +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_pm.h" +#include "gem/i915_gem_region.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index c8ff8bf0986d..8f28e46e8ee5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -7,8 +7,9 @@ #include "gt/intel_context.h" #include "gt/intel_engine_user.h" -#include "gt/intel_gt.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_gt.h" +#include "gt/intel_gt_regs.h" #include "gem/i915_gem_lmem.h" #include "selftests/igt_flush_test.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 80d99b9c694f..bd60d42238fb 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -6,8 +6,10 @@ #include <linux/prime_numbers.h> +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" @@ -894,7 +896,7 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *v return PTR_ERR(cmd); *cmd++ = MI_STORE_REGISTER_MEM_GEN8; - *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); + *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE)); *cmd++ = lower_32_bits(vma->node.start); *cmd++ = upper_32_bits(vma->node.start); *cmd = MI_BATCH_BUFFER_END; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 3cc74b0fed06..b071a58dd6da 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -266,7 +266,7 @@ static int igt_dmabuf_import(void *arg) struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; void *obj_map, *dma_map; - struct dma_buf_map map; + struct iosys_map map; u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; int err, i; @@ -349,7 +349,7 @@ static int igt_dmabuf_import_ownership(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; - struct dma_buf_map map; + struct iosys_map map; void *ptr; int err; @@ -400,7 +400,7 @@ static int igt_dmabuf_export_vmap(void *arg) struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; - struct dma_buf_map map; + struct iosys_map map; void *ptr; int err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index ba29767348be..8ae1a1530bd8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -6,11 +6,13 @@ #include <linux/prime_numbers.h> +#include "gem/i915_gem_internal.h" +#include "gem/i915_gem_region.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" -#include "gem/i915_gem_region.h" + #include "huge_gem_object.h" #include "i915_selftest.h" #include "selftests/i915_random.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index b35c1219c852..3c55e77b0f1b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -7,6 +7,7 @@ #include "igt_gem_utils.h" #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index c0a8ef368044..6d6082b5f31f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -4,6 +4,7 @@ * Copyright © 2016 Intel Corporation */ +#include "i915_file_private.h" #include "mock_context.h" #include "selftests/mock_drm.h" #include "selftests/mock_gtt.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index 2855d11c7a51..b2a5882b8f81 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -61,7 +61,7 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf) kfree(mock); } -static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); void *vaddr; @@ -69,12 +69,12 @@ static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) vaddr = vm_map_ram(mock->pages, mock->npages, 0); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } -static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) +static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct mock_dmabuf *mock = to_mock(dma_buf); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 61383830505e..1c82caf525c3 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -5,7 +5,9 @@ #include "gen2_engine_cs.h" #include "i915_drv.h" +#include "i915_reg.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_irq.h" diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c index b388ceeeb1c9..5e65550b4dfb 100644 --- a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c @@ -5,6 +5,7 @@ #include "gen6_engine_cs.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_irq.h" diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index d657ffd6c86a..871fe7bda0e0 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -5,10 +5,14 @@ #include <linux/log2.h> +#include "gem/i915_gem_internal.h" + #include "gen6_ppgtt.h" #include "i915_scatterlist.h" #include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_gt_regs.h" +#include "intel_engine_regs.h" #include "intel_gt.h" /* Write pde (index) from the page directory @pd to the page table @pt */ diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index 21f08e53889c..317efb145787 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -6,6 +6,7 @@ #include "gen7_renderclear.h" #include "i915_drv.h" #include "intel_gpu_commands.h" +#include "intel_gt_regs.h" #define GT3_INLINE_DATA_DELAYS 0x1E00 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index e320610dd0b8..1f8cf4f790b2 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -5,8 +5,9 @@ #include "gen8_engine_cs.h" #include "i915_drv.h" -#include "intel_lrc.h" #include "intel_gpu_commands.h" +#include "intel_gt_regs.h" +#include "intel_lrc.h" #include "intel_ring.h" int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c index e86d8255feec..ece16c2b5b8e 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c @@ -9,6 +9,7 @@ #include "intel_engine_pm.h" #include "intel_gpu_commands.h" #include "intel_lrc.h" +#include "intel_lrc_reg.h" #include "intel_ring.h" #include "intel_sseu.h" diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 08559ace0ada..be4b1e65442f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -11,7 +11,6 @@ #include <linux/seqlock.h> #include "i915_pmu.h" -#include "i915_reg.h" #include "i915_request.h" #include "i915_selftest.h" #include "intel_engine_types.h" @@ -183,6 +182,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_HWS_CSB_BUF0_INDEX 0x10 #define I915_HWS_CSB_WRITE_INDEX 0x1f #define ICL_HWS_CSB_WRITE_INDEX 0x2f +#define INTEL_HWS_CSB_WRITE_INDEX(__i915) \ + (GRAPHICS_VER(__i915) >= 11 ? ICL_HWS_CSB_WRITE_INDEX : I915_HWS_CSB_WRITE_INDEX) void intel_engine_stop(struct intel_engine_cs *engine); void intel_engine_cleanup(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index d1daa4cc2895..e53008b4dd05 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -6,13 +6,16 @@ #include <drm/drm_print.h> #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" +#include "gt/intel_gt_regs.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" - #include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_engine_user.h" #include "intel_execlists_submission.h" #include "intel_gt.h" @@ -1227,17 +1230,6 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); } -const char *i915_cache_level_str(struct drm_i915_private *i915, int type) -{ - switch (type) { - case I915_CACHE_NONE: return " uncached"; - case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; - case I915_CACHE_L3_LLC: return " L3+LLC"; - case I915_CACHE_WT: return " WT"; - default: return ""; - } -} - static u32 read_subslice_reg(const struct intel_engine_cs *engine, int slice, int subslice, i915_reg_t reg) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h new file mode 100644 index 000000000000..0bf8b45c9319 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_ENGINE_REGS__ +#define __INTEL_ENGINE_REGS__ + +#include "i915_reg_defs.h" + +#define RING_TAIL(base) _MMIO((base) + 0x30) +#define TAIL_ADDR 0x001FFFF8 +#define RING_HEAD(base) _MMIO((base) + 0x34) +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START(base) _MMIO((base) + 0x38) +#define RING_CTL(base) _MMIO((base) + 0x3c) +#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 +#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ +#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ +#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ +#define RING_SYNC_0(base) _MMIO((base) + 0x40) +#define RING_SYNC_1(base) _MMIO((base) + 0x44) +#define RING_SYNC_2(base) _MMIO((base) + 0x48) +#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) +#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) +#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) +#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) +#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) +#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE)) +#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) +#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) +#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE)) +#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) +#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) +#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) +#define RING_PSMI_CTL(base) _MMIO((base) + 0x50) +#define GEN8_RC_SEMA_IDLE_MSG_DISABLE REG_BIT(12) +#define GEN8_FF_DOP_CLOCK_GATE_DISABLE REG_BIT(10) +#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) +#define GEN6_BSD_GO_INDICATOR REG_BIT(4) +#define GEN6_BSD_SLEEP_INDICATOR REG_BIT(3) +#define GEN6_BSD_SLEEP_FLUSH_DISABLE REG_BIT(2) +#define GEN6_PSMI_SLEEP_MSG_DISABLE REG_BIT(0) +#define RING_MAX_IDLE(base) _MMIO((base) + 0x54) +#define PWRCTX_MAXCNT(base) _MMIO((base) + 0x54) +#define IDLE_TIME_MASK 0xFFFFF +#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c) +#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */ +#define RING_IPEIR(base) _MMIO((base) + 0x64) +#define RING_IPEHR(base) _MMIO((base) + 0x68) +#define RING_INSTDONE(base) _MMIO((base) + 0x6c) +#define RING_INSTPS(base) _MMIO((base) + 0x70) +#define RING_DMA_FADD(base) _MMIO((base) + 0x78) +#define RING_ACTHD(base) _MMIO((base) + 0x74) +#define RING_HWS_PGA(base) _MMIO((base) + 0x80) +#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84) +#define IPEIR(base) _MMIO((base) + 0x88) +#define IPEHR(base) _MMIO((base) + 0x8c) +#define RING_ID(base) _MMIO((base) + 0x8c) +#define RING_NOPID(base) _MMIO((base) + 0x94) +#define RING_HWSTAM(base) _MMIO((base) + 0x98) +#define RING_MI_MODE(base) _MMIO((base) + 0x9c) +#define ASYNC_FLIP_PERF_DISABLE REG_BIT(14) +#define MI_FLUSH_ENABLE REG_BIT(12) +#define TGL_NESTED_BB_EN REG_BIT(12) +#define MODE_IDLE REG_BIT(9) +#define STOP_RING REG_BIT(8) +#define VS_TIMER_DISPATCH REG_BIT(6) +#define RING_IMR(base) _MMIO((base) + 0xa8) +#define RING_EIR(base) _MMIO((base) + 0xb0) +#define RING_EMR(base) _MMIO((base) + 0xb4) +#define RING_ESR(base) _MMIO((base) + 0xb8) +#define RING_INSTPM(base) _MMIO((base) + 0xc0) +#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4) +#define ACTHD(base) _MMIO((base) + 0xc8) +#define GEN8_R_PWR_CLK_STATE(base) _MMIO((base) + 0xc8) +#define GEN8_RPCS_ENABLE (1 << 31) +#define GEN8_RPCS_S_CNT_ENABLE (1 << 18) +#define GEN8_RPCS_S_CNT_SHIFT 15 +#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT) +#define GEN11_RPCS_S_CNT_SHIFT 12 +#define GEN11_RPCS_S_CNT_MASK (0x3f << GEN11_RPCS_S_CNT_SHIFT) +#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11) +#define GEN8_RPCS_SS_CNT_SHIFT 8 +#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT) +#define GEN8_RPCS_EU_MAX_SHIFT 4 +#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT) +#define GEN8_RPCS_EU_MIN_SHIFT 0 +#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT) + +#define RING_RESET_CTL(base) _MMIO((base) + 0xd0) +#define RESET_CTL_CAT_ERROR REG_BIT(2) +#define RESET_CTL_READY_TO_RESET REG_BIT(1) +#define RESET_CTL_REQUEST_RESET REG_BIT(0) +#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0) +#define RING_BBSTATE(base) _MMIO((base) + 0x110) +#define RING_BB_PPGTT (1 << 5) +#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */ +#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ +#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ +#define RING_BBADDR(base) _MMIO((base) + 0x140) +#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ +#define CCID(base) _MMIO((base) + 0x180) +#define CCID_EN BIT(0) +#define CCID_EXTENDED_STATE_RESTORE BIT(2) +#define CCID_EXTENDED_STATE_SAVE BIT(3) +#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */ +#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */ +#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */ +#define ECOSKPD(base) _MMIO((base) + 0x1d0) +#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4) +#define ECO_GATING_CX_ONLY REG_BIT(3) +#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3) +#define ECO_FLIP_DONE REG_BIT(0) +#define GEN6_BLITTER_LOCK_SHIFT 16 + +#define BLIT_CCTL(base) _MMIO((base) + 0x204) +#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8) +#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0) +#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \ + BLIT_CCTL_SRC_MOCS_MASK) +#define BLIT_CCTL_MOCS(dst, src) \ + (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \ + REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1)) + +/* + * CMD_CCTL read/write fields take a MOCS value and _not_ a table index. + * The lsb of each can be considered a separate enabling bit for encryption. + * 6:0 == default MOCS value for reads => 6:1 == table index for reads. + * 13:7 == default MOCS value for writes => 13:8 == table index for writes. + * 15:14 == Reserved => 31:30 are set to 0. + */ +#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7) +#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0) +#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \ + CMD_CCTL_READ_OVERRIDE_MASK) +#define CMD_CCTL_MOCS_OVERRIDE(write, read) \ + (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \ + REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1)) + +#define MI_PREDICATE_RESULT_2(base) _MMIO((base) + 0x3bc) +#define LOWER_SLICE_ENABLED (1 << 0) +#define LOWER_SLICE_DISABLED (0 << 0) +#define MI_PREDICATE_SRC0(base) _MMIO((base) + 0x400) +#define MI_PREDICATE_SRC0_UDW(base) _MMIO((base) + 0x400 + 4) +#define MI_PREDICATE_SRC1(base) _MMIO((base) + 0x408) +#define MI_PREDICATE_SRC1_UDW(base) _MMIO((base) + 0x408 + 4) +#define MI_PREDICATE_DATA(base) _MMIO((base) + 0x410) +#define MI_PREDICATE_RESULT(base) _MMIO((base) + 0x418) +#define MI_PREDICATE_RESULT_1(base) _MMIO((base) + 0x41c) + +#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) +#define PP_DIR_DCLV_2G 0xffffffff +#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) +#define RING_ELSP(base) _MMIO((base) + 0x230) +#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) +#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) +#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) +#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0) +#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1) +#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2) +#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3) +#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8) +#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c) +#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4) +#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8) +#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c) +#define GFX_RUN_LIST_ENABLE (1 << 15) +#define GFX_INTERRUPT_STEERING (1 << 14) +#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13) +#define GFX_SURFACE_FAULT_ENABLE (1 << 12) +#define GFX_REPLAY_MODE (1 << 11) +#define GFX_PSMI_GRANULARITY (1 << 10) +#define GFX_PPGTT_ENABLE (1 << 9) +#define GEN8_GFX_PPGTT_48B (1 << 7) +#define GFX_FORWARD_VBLANK_MASK (3 << 5) +#define GFX_FORWARD_VBLANK_NEVER (0 << 5) +#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5) +#define GFX_FORWARD_VBLANK_COND (2 << 5) +#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3) +#define RING_TIMESTAMP(base) _MMIO((base) + 0x358) +#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4) +#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) +#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */ +#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) +#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2) +#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ +#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28) +#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */ +#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0) +#define RING_FORCE_TO_NONPRIV_MASK_VALID \ + (RING_FORCE_TO_NONPRIV_RANGE_MASK | RING_FORCE_TO_NONPRIV_ACCESS_MASK) +#define RING_MAX_NONPRIV_SLOTS 12 + +#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) +#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) +#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) +#define EL_CTRL_LOAD REG_BIT(0) + +/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ +#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) +#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) + +#define GEN11_VCS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x88c) +#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0) +#define GEN11_VCS_SFC_LOCK_STATUS(base) _MMIO((base) + 0x890) +#define GEN11_VCS_SFC_USAGE_BIT (1 << 0) +#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1) + +#define GEN11_VECS_SFC_FORCED_LOCK(base) _MMIO((base) + 0x201c) +#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0) +#define GEN11_VECS_SFC_LOCK_ACK(base) _MMIO((base) + 0x2018) +#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0) +#define GEN11_VECS_SFC_USAGE(base) _MMIO((base) + 0x2014) +#define GEN11_VECS_SFC_USAGE_BIT (1 << 0) + +#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080) + +#define GEN12_HCP_SFC_LOCK_STATUS(base) _MMIO((base) + 0x2914) +#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1) +#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0) + +#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10) +#define IECPUNIT_CLKGATE_DIS REG_BIT(22) + +#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18) +#define ALNUNIT_CLKGATE_DIS REG_BIT(13) + + +#endif /* __INTEL_ENGINE_REGS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index be56d0b41892..961d795220a3 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -116,11 +116,13 @@ #include "intel_context.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_engine_stats.h" #include "intel_execlists_submission.h" #include "intel_gt.h" #include "intel_gt_irq.h" #include "intel_gt_pm.h" +#include "intel_gt_regs.h" #include "intel_gt_requests.h" #include "intel_lrc.h" #include "intel_lrc_reg.h" @@ -3501,7 +3503,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; execlists->csb_write = - &engine->status_page.addr[intel_hws_csb_write_index(i915)]; + &engine->status_page.addr[INTEL_HWS_CSB_WRITE_INDEX(i915)]; if (GRAPHICS_VER(i915) < 11) execlists->csb_size = GEN8_CSB_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 536b0995b595..8850d4e0f9cc 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -15,6 +15,7 @@ #include "gem/i915_gem_lmem.h" #include "intel_gt.h" +#include "intel_gt_regs.h" #include "i915_drv.h" #include "i915_scatterlist.h" #include "i915_vgpu.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index c52d255e8ef3..76880fb8fc19 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -4,9 +4,12 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "i915_scatterlist.h" #include "i915_pvinfo.h" #include "i915_vgpu.h" +#include "intel_gt_regs.h" +#include "intel_mchbar_regs.h" /** * DOC: fence register handling diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 3a355b50082d..e8403fa53909 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -6,15 +6,19 @@ #include <drm/drm_managed.h> #include <drm/intel-gtt.h> -#include "intel_gt_debugfs.h" - +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" +#include "pxp/intel_pxp.h" + #include "i915_drv.h" #include "intel_context.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_buffer_pool.h" #include "intel_gt_clock_utils.h" +#include "intel_gt_debugfs.h" #include "intel_gt_pm.h" +#include "intel_gt_regs.h" #include "intel_gt_requests.h" #include "intel_migrate.h" #include "intel_mocs.h" @@ -24,7 +28,6 @@ #include "intel_rps.h" #include "intel_uncore.h" #include "shmem_utils.h" -#include "pxp/intel_pxp.h" void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { @@ -210,7 +213,7 @@ int intel_gt_init_hw(struct intel_gt *gt) if (IS_HASWELL(i915)) intel_uncore_write(uncore, - MI_PREDICATE_RESULT_2, + HSW_MI_PREDICATE_RESULT_2, IS_HSW_GT3(i915) ? LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index 9db3dcbd917f..cadfd85785b1 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -3,6 +3,7 @@ * Copyright © 2014-2018 Intel Corporation */ +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_object.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 3513d6f90747..0db822c3b7e5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -4,8 +4,10 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" +#include "intel_gt_regs.h" static u32 read_reference_ts_freq(struct intel_uncore *uncore) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 699a74582d32..983264e10e0a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -10,7 +10,7 @@ #include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_irq.h" -#include "intel_lrc_reg.h" +#include "intel_gt_regs.h" #include "intel_uncore.h" #include "intel_rps.h" #include "pxp/intel_pxp_irq.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 404dfa7673c6..37765919fe32 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -7,12 +7,15 @@ #include <linux/seq_file.h> #include "i915_drv.h" +#include "i915_reg.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" #include "intel_gt_debugfs.h" #include "intel_gt_pm.h" #include "intel_gt_pm_debugfs.h" +#include "intel_gt_regs.h" #include "intel_llc.h" +#include "intel_mchbar_regs.h" #include "intel_pcode.h" #include "intel_rc6.h" #include "intel_rps.h" @@ -134,8 +137,7 @@ static int gen6_drpc(struct seq_file *m) } if (GRAPHICS_VER(i915) <= 7) - sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, - &rc6vids, NULL); + snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); seq_printf(m, "RC1e Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); @@ -557,9 +559,8 @@ static int llc_show(struct seq_file *m, void *data) wakeref = intel_runtime_pm_get(gt->uncore->rpm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { ia_freq = gpu_freq; - sandybridge_pcode_read(i915, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq, NULL); + snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + &ia_freq, NULL); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", intel_gpu_freq(rps, (gpu_freq * diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c index fe51f894b073..11060f5a4c89 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c @@ -8,6 +8,7 @@ #include "intel_gt.h" #include "intel_gt_irq.h" #include "intel_gt_pm_irq.h" +#include "intel_gt_regs.h" static void write_pm_imr(struct intel_gt *gt) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h new file mode 100644 index 000000000000..18d158d77aba --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -0,0 +1,1526 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_GT_REGS__ +#define __INTEL_GT_REGS__ + +#include "i915_reg_defs.h" + +/* RPM unit config (Gen8+) */ +#define RPM_CONFIG0 _MMIO(0xd00) +#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3 +#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT) +#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0 +#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT) +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3 +#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1 +#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT) + +#define RPM_CONFIG1 _MMIO(0xd04) +#define GEN10_GT_NOA_ENABLE (1 << 9) + +/* RCP unit config (Gen8+) */ +#define RCP_CONFIG _MMIO(0xd08) + +#define RC6_LOCATION _MMIO(0xd40) +#define RC6_CTX_IN_DRAM (1 << 0) +#define RC6_CTX_BASE _MMIO(0xd48) +#define RC6_CTX_BASE_MASK 0xFFFFFFF0 + +#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0xd50 + (n) * 4) +#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0xd70 + (n) * 4) +#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0xd84) +#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0xd88) + +#define MCFG_MCR_SELECTOR _MMIO(0xfd0) +#define SF_MCR_SELECTOR _MMIO(0xfd8) +#define GEN8_MCR_SELECTOR _MMIO(0xfdc) +#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26) +#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3) +#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24) +#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3) +#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27) +#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf) +#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24) +#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7) + +#define IPEIR_I965 _MMIO(0x2064) +#define IPEHR_I965 _MMIO(0x2068) + +/* + * On GEN4, only the render ring INSTDONE exists and has a different + * layout than the GEN7+ version. + * The GEN2 counterpart of this register is GEN2_INSTDONE. + */ +#define INSTPS _MMIO(0x2070) /* 965+ only */ +#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */ +#define ACTHD_I965 _MMIO(0x2074) +#define HWS_PGA _MMIO(0x2080) +#define HWS_ADDRESS_MASK 0xfffff000 +#define HWS_START_ADDRESS_SHIFT 4 + +#define _3D_CHICKEN _MMIO(0x2084) +#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) + +#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */ +#define PWRCTX_EN (1 << 0) + +#define FF_SLICE_CHICKEN _MMIO(0x2088) +#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1) + +/* GM45+ chicken bits -- debug workaround bits that may be required + * for various sorts of correct behavior. The top 16 bits of each are + * the enables for writing to the corresponding low bit. + */ +#define _3D_CHICKEN2 _MMIO(0x208c) +/* Disables pipelining of read flushes past the SF-WIZ interface. + * Required on all Ironlake steppings according to the B-Spec, but the + * particular danger of not doing so is not specified. + */ +#define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) + +#define _3D_CHICKEN3 _MMIO(0x2090) +#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12) +#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) +#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) +#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) +#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */ +#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ + +#define GEN2_INSTDONE _MMIO(0x2090) +#define NOPID _MMIO(0x2094) +#define HWSTAM _MMIO(0x2098) + +#define WAIT_FOR_RC6_EXIT _MMIO(0x20cc) +/* HSW only */ +#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2 +#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT) +#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4 +#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT) +/* HSW+ */ +#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0) +#define HSW_RCS_CONTEXT_ENABLE (1 << 7) +#define HSW_RCS_INHIBIT (1 << 8) +/* Gen8 */ +#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4 +#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT) +#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4 +#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT) +#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6) +#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9 +#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT) +#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11 +#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT) +#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13) + +#define GEN6_GT_MODE _MMIO(0x20d0) +#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) +#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) +#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) +#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) +#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) +#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) + +/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ +#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20d4) +#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) +#define GEN11_ENABLE_32_PLANE_MODE (1 << 7) + +#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) +#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14) + +#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) +#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8) +#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10) + +#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) +#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1) +#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20ec) +#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0) + +/* WaClearTdlStateAckDirtyBits */ +#define GEN8_STATE_ACK _MMIO(0x20f0) +#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20f8) +#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100) +#define GEN9_STATE_ACK_TDL0 (1 << 12) +#define GEN9_STATE_ACK_TDL1 (1 << 13) +#define GEN9_STATE_ACK_TDL2 (1 << 14) +#define GEN9_STATE_ACK_TDL3 (1 << 15) +#define GEN9_SUBSLICE_TDL_ACK_BITS \ + (GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \ + GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0) + +#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */ +#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8) +#define CM0_IZ_OPT_DISABLE (1 << 6) +#define CM0_ZR_OPT_DISABLE (1 << 5) +#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5) +#define CM0_DEPTH_EVICT_DISABLE (1 << 4) +#define CM0_COLOR_EVICT_DISABLE (1 << 3) +#define CM0_DEPTH_WRITE_DISABLE (1 << 1) +#define CM0_RC_OP_FLUSH_DISABLE (1 << 0) + +#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */ + +/* + * Logical Context regs + */ +/* + * Notes on SNB/IVB/VLV context size: + * - Power context is saved elsewhere (LLC or stolen) + * - Ring/execlist context is saved on SNB, not on IVB + * - Extended context size already includes render context size + * - We always need to follow the extended context size. + * SNB BSpec has comments indicating that we should use the + * render context size instead if execlists are disabled, but + * based on empirical testing that's just nonsense. + * - Pipelined/VF state is saved on SNB/IVB respectively + * - GT1 size just indicates how much of render context + * doesn't need saving on GT1 + */ +#define CXT_SIZE _MMIO(0x21a0) +#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f) +#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f) +#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f) +#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f) +#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f) +#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ + GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ + GEN6_CXT_PIPELINE_SIZE(cxt_reg)) +#define GEN7_CXT_SIZE _MMIO(0x21a8) +#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f) +#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7) +#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f) +#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f) +#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7) +#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f) +#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ + GEN7_CXT_VFSTATE_SIZE(ctx_reg)) + +#define HSW_MI_PREDICATE_RESULT_2 _MMIO(0x2214) + +#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) +#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11) + +#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) +#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) + +#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) +#define GEN6_RCS_PWR_FSM _MMIO(0x22ac) + +#define HS_INVOCATION_COUNT _MMIO(0x2300) +#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4) +#define DS_INVOCATION_COUNT _MMIO(0x2308) +#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4) +#define IA_VERTICES_COUNT _MMIO(0x2310) +#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4) +#define IA_PRIMITIVES_COUNT _MMIO(0x2318) +#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4) +#define VS_INVOCATION_COUNT _MMIO(0x2320) +#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4) +#define GS_INVOCATION_COUNT _MMIO(0x2328) +#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4) +#define GS_PRIMITIVES_COUNT _MMIO(0x2330) +#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4) +#define CL_INVOCATION_COUNT _MMIO(0x2338) +#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4) +#define CL_PRIMITIVES_COUNT _MMIO(0x2340) +#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4) +#define PS_INVOCATION_COUNT _MMIO(0x2348) +#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4) +#define PS_DEPTH_COUNT _MMIO(0x2350) +#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4) +#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420) +#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430) +#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434) +#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438) +#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243c) +#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440) +#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500) +#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504) +#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508) + +#define GFX_MODE _MMIO(0x2520) + +#define GEN8_CS_CHICKEN1 _MMIO(0x2580) +#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0) +#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) +#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0) +#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1) +#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0) +#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1) + +#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */ + +#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080) + +#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) +#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF +#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7) + +#define GAM_ECOCHK _MMIO(0x4090) +#define BDW_DISABLE_HDC_INVALIDATION (1 << 25) +#define ECOCHK_SNB_BIT (1 << 10) +#define ECOCHK_DIS_TLB (1 << 8) +#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6) +#define ECOCHK_PPGTT_CACHE64B (0x3 << 3) +#define ECOCHK_PPGTT_CACHE4B (0x0 << 3) +#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4) +#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3) +#define ECOCHK_PPGTT_UC_HSW (0x1 << 3) +#define ECOCHK_PPGTT_WT_HSW (0x2 << 3) +#define ECOCHK_PPGTT_WB_HSW (0x3 << 3) + +#define GEN8_RING_FAULT_REG _MMIO(0x4094) +#define _RING_FAULT_REG_RCS 0x4094 +#define _RING_FAULT_REG_VCS 0x4194 +#define _RING_FAULT_REG_BCS 0x4294 +#define _RING_FAULT_REG_VECS 0x4394 +#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \ + _RING_FAULT_REG_RCS, \ + _RING_FAULT_REG_VCS, \ + _RING_FAULT_REG_VECS, \ + _RING_FAULT_REG_BCS)) + +#define ERROR_GEN6 _MMIO(0x40a0) + +#define DONE_REG _MMIO(0x40b0) +#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) +#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) +#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) +#define BSD_HWS_PGA_GEN7 _MMIO(0x4180) +#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208) +#define GEN12_VD0_AUX_NV _MMIO(0x4218) +#define GEN12_VD1_AUX_NV _MMIO(0x4228) + +#define GEN8_RTCR _MMIO(0x4260) +#define GEN8_M1TCR _MMIO(0x4264) +#define GEN8_M2TCR _MMIO(0x4268) +#define GEN8_BTCR _MMIO(0x426c) +#define GEN8_VTCR _MMIO(0x4270) + +#define GEN12_VD2_AUX_NV _MMIO(0x4298) +#define GEN12_VD3_AUX_NV _MMIO(0x42a8) +#define GEN12_VE0_AUX_NV _MMIO(0x4238) + +#define BLT_HWS_PGA_GEN7 _MMIO(0x4280) + +#define GEN12_VE1_AUX_NV _MMIO(0x42b8) +#define AUX_INV REG_BIT(0) +#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380) + +#define GEN12_AUX_ERR_DBG _MMIO(0x43f4) + +#define GEN7_TLB_RD_ADDR _MMIO(0x4700) + +#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4) + +#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910) +#define XEHPSDV_CCS_BASE_SHIFT 8 + +#define GAMTARBMODE _MMIO(0x4a08) +#define ARB_MODE_BWGTLB_DISABLE (1 << 9) +#define ARB_MODE_SWIZZLE_BDW (1 << 1) + +#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0) +#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18) + +#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) +#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) +#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) +#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) + +#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10) +#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14) + +#define GEN11_GACB_PERF_CTRL _MMIO(0x4b80) +#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0) +#define GEN11_HASH_CTRL_BIT0 (1 << 0) +#define GEN11_HASH_CTRL_BIT4 (1 << 12) + +/* gamt regs */ +#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) +#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ +#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */ +#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ +#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ + +#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */ +#define MMCD_PCLA (1 << 31) +#define MMCD_HOTSPOT_EN (1 << 27) + +/* There are the 4 64-bit counter registers, one for each stream output */ +#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8) +#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4) + +#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8) +#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4) + +#define GEN9_WM_CHICKEN3 _MMIO(0x5588) +#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9) + +#define VFLSKPD _MMIO(0x62a8) +#define DIS_OVER_FETCH_CACHE REG_BIT(1) +#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0) + +#define FF_MODE2 _MMIO(0x6604) +#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24) +#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224) +#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16) +#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4) + +#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c) + +#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ +#define RC_OP_FLUSH_ENABLE (1 << 0) +#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2) +#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ +#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6) +#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6) +#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1) + +#define GEN7_GT_MODE _MMIO(0x7008) +#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) +#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) + +/* GEN7 chicken */ +#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) +#define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10) +#define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14) + +#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) +#define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13) +#define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12) +#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8) +#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0) + +#define HIZ_CHICKEN _MMIO(0x7018) +#define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15) +#define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14) +#define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3) + +#define GEN8_L3CNTLREG _MMIO(0x7034) +#define GEN8_ERRDETBCTRL (1 << 9) + +#define GEN7_SC_INSTDONE _MMIO(0x7100) +#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104) +#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108) + +/* GEN8 chicken */ +#define HDC_CHICKEN0 _MMIO(0x7300) +#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15) +#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14) +#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11) +#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5) +#define HDC_FORCE_NON_COHERENT (1 << 4) +#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10) + +#define GEN8_HDC_CHICKEN1 _MMIO(0x7304) + +#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) +#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) +#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12) +#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) +#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) + +/* GEN9 chicken */ +#define SLICE_ECO_CHICKEN0 _MMIO(0x7308) +#define PIXEL_MASK_CAMMING_DISABLE (1 << 14) + +#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) +#define DISABLE_PIXEL_MASK_CAMMING (1 << 14) + +#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) +#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) + +#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) +#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14) + +#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4) +#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \ + ((slice) % 3) * 0x4) +#define GEN9_PGCTL_SLICE_ACK (1 << 0) +#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2)) +#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F) + +#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8) +#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \ + ((slice) % 3) * 0x8) +#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8) +#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \ + ((slice) % 3) * 0x8) +#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) +#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2) +#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4) +#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6) +#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8) +#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10) +#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12) +#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) + +#define GEN8_RC6_CTX_INFO _MMIO(0x8504) + +#define GEN12_SQCM _MMIO(0x8724) +#define EN_32B_ACCESS REG_BIT(30) + +#define HSW_IDICR _MMIO(0x9008) +#define IDIHASHMSK(x) (((x) & 0x3f) << 16) + +#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */ +#define GEN6_MBC_SNPCR_SHIFT 21 +#define GEN6_MBC_SNPCR_MASK (3 << 21) +#define GEN6_MBC_SNPCR_MAX (0 << 21) +#define GEN6_MBC_SNPCR_MED (1 << 21) +#define GEN6_MBC_SNPCR_LOW (2 << 21) +#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */ + +#define VLV_G3DCTL _MMIO(0x9024) +#define VLV_GSCKGCTL _MMIO(0x9028) + +/* WaCatErrorRejectionIssue */ +#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) +#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11) + +#define FBC_LLC_READ_CTRL _MMIO(0x9044) +#define FBC_LLC_FULLY_OPEN REG_BIT(30) + +#define GEN6_MBCTL _MMIO(0x907c) +#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) +#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) +#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) +#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) +#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) + +/* Fuse readout registers for GT */ +#define GEN10_MIRROR_FUSE3 _MMIO(0x9118) +#define GEN10_L3BANK_PAIR_COUNT 4 +#define GEN10_L3BANK_MASK 0x0F +/* on Xe_HP the same fuses indicates mslices instead of L3 banks */ +#define GEN12_MAX_MSLICES 4 +#define GEN12_MEML3_EN_MASK 0x0F + +#define HSW_PAVP_FUSE1 _MMIO(0x911c) +#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24) +#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16) +#define HSW_F1_EU_DIS_10EUS 0 +#define HSW_F1_EU_DIS_8EUS 1 +#define HSW_F1_EU_DIS_6EUS 2 + +#define GEN8_FUSE2 _MMIO(0x9120) +#define GEN8_F2_SS_DIS_SHIFT 21 +#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT) +#define GEN8_F2_S_ENA_SHIFT 25 +#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT) +#define GEN9_F2_SS_DIS_SHIFT 20 +#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) +#define GEN10_F2_S_ENA_SHIFT 22 +#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT) +#define GEN10_F2_SS_DIS_SHIFT 18 +#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT) + +#define GEN8_EU_DISABLE0 _MMIO(0x9134) +#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4) +#define GEN11_EU_DISABLE _MMIO(0x9134) +#define GEN8_EU_DIS0_S0_MASK 0xffffff +#define GEN8_EU_DIS0_S1_SHIFT 24 +#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT) +#define GEN11_EU_DIS_MASK 0xFF +#define XEHP_EU_ENABLE _MMIO(0x9134) +#define XEHP_EU_ENA_MASK 0xFF + +#define GEN8_EU_DISABLE1 _MMIO(0x9138) +#define GEN8_EU_DIS1_S1_MASK 0xffff +#define GEN8_EU_DIS1_S2_SHIFT 16 +#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT) + +#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138) +#define GEN11_GT_S_ENA_MASK 0xFF + +#define GEN8_EU_DISABLE2 _MMIO(0x913c) +#define GEN8_EU_DIS2_S2_MASK 0xff + +#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913c) +#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913c) + +#define GEN10_EU_DISABLE3 _MMIO(0x9140) +#define GEN10_EU_DIS_SS_MASK 0xff +#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) +#define GEN11_GT_VDBOX_DISABLE_MASK 0xff +#define GEN11_GT_VEBOX_DISABLE_SHIFT 16 +#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT) + +#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144) + +#define GEN6_UCGCTL1 _MMIO(0x9400) +#define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22) +#define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) +#define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) +#define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) + +#define GEN6_UCGCTL2 _MMIO(0x9404) +#define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31) +#define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) +#define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) +#define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) +#define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) +#define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) + +#define GEN6_UCGCTL3 _MMIO(0x9408) +#define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20) + +#define GEN7_UCGCTL4 _MMIO(0x940c) +#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25) +#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14) + +#define GEN6_RCGCTL1 _MMIO(0x9410) +#define GEN6_RCGCTL2 _MMIO(0x9414) + +#define GEN6_GDRST _MMIO(0x941c) +#define GEN6_GRDOM_FULL (1 << 0) +#define GEN6_GRDOM_RENDER (1 << 1) +#define GEN6_GRDOM_MEDIA (1 << 2) +#define GEN6_GRDOM_BLT (1 << 3) +#define GEN6_GRDOM_VECS (1 << 4) +#define GEN9_GRDOM_GUC (1 << 5) +#define GEN8_GRDOM_MEDIA2 (1 << 7) +/* GEN11 changed all bit defs except for FULL & RENDER */ +#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL +#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER +#define GEN11_GRDOM_BLT (1 << 2) +#define GEN11_GRDOM_GUC (1 << 3) +#define GEN11_GRDOM_MEDIA (1 << 5) +#define GEN11_GRDOM_MEDIA2 (1 << 6) +#define GEN11_GRDOM_MEDIA3 (1 << 7) +#define GEN11_GRDOM_MEDIA4 (1 << 8) +#define GEN11_GRDOM_MEDIA5 (1 << 9) +#define GEN11_GRDOM_MEDIA6 (1 << 10) +#define GEN11_GRDOM_MEDIA7 (1 << 11) +#define GEN11_GRDOM_MEDIA8 (1 << 12) +#define GEN11_GRDOM_VECS (1 << 13) +#define GEN11_GRDOM_VECS2 (1 << 14) +#define GEN11_GRDOM_VECS3 (1 << 15) +#define GEN11_GRDOM_VECS4 (1 << 16) +#define GEN11_GRDOM_SFC0 (1 << 17) +#define GEN11_GRDOM_SFC1 (1 << 18) +#define GEN11_GRDOM_SFC2 (1 << 19) +#define GEN11_GRDOM_SFC3 (1 << 20) +#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1)) +#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance)) + +#define GEN6_RSTCTL _MMIO(0x9420) + +#define GEN7_MISCCPCTL _MMIO(0x9424) +#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0) +#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2) +#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4) +#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6) + +#define GEN8_UCGCTL6 _MMIO(0x9430) +#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24) +#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14) +#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28) + +#define UNSLCGCTL9430 _MMIO(0x9430) +#define MSQDUNIT_CLKGATE_DIS REG_BIT(3) + +#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) +#define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */ +#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */ +#define GAMEDIA_CLKGATE_DIS REG_BIT(11) +#define HSUNIT_CLKGATE_DIS REG_BIT(8) +#define VSUNIT_CLKGATE_DIS REG_BIT(3) + +#define UNSLCGCTL9440 _MMIO(0x9440) +#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28) +#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27) +#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26) +#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24) +#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23) +#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22) +#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21) +#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17) +#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16) +#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15) +#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14) +#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6) + +#define UNSLCGCTL9444 _MMIO(0x9444) +#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30) +#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29) +#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28) +#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27) +#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26) +#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25) +#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24) +#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23) +#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22) +#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21) +#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20) +#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19) +#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18) +#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17) +#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) +#define LTCDD_CLKGATE_DIS REG_BIT(10) + +#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) +#define SARBUNIT_CLKGATE_DIS (1 << 5) +#define RCCUNIT_CLKGATE_DIS (1 << 7) +#define MSCUNIT_CLKGATE_DIS (1 << 10) +#define NODEDSS_CLKGATE_DIS REG_BIT(12) +#define L3_CLKGATE_DIS REG_BIT(16) +#define L3_CR2X_CLKGATE_DIS REG_BIT(17) + +#define SCCGCTL94DC _MMIO(0x94dc) +#define CG3DDISURB REG_BIT(14) + +#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) +#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) +#define PSDUNIT_CLKGATE_DIS REG_BIT(5) + +#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524) +#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28) +#define GWUNIT_CLKGATE_DIS REG_BIT(16) + +#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528) +#define CPSSUNIT_CLKGATE_DIS REG_BIT(9) + +#define SSMCGCTL9530 _MMIO(0x9530) +#define RTFUNIT_CLKGATE_DIS REG_BIT(18) + +#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550) +#define DFR_DISABLE (1 << 9) + +#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560) +#define CGPSF_CLKGATE_DIS (1 << 3) + +#define MICRO_BP0_0 _MMIO(0x9800) +#define MICRO_BP0_2 _MMIO(0x9804) +#define MICRO_BP0_1 _MMIO(0x9808) +#define MICRO_BP1_0 _MMIO(0x980c) +#define MICRO_BP1_2 _MMIO(0x9810) +#define MICRO_BP1_1 _MMIO(0x9814) +#define MICRO_BP2_0 _MMIO(0x9818) +#define MICRO_BP2_2 _MMIO(0x981c) +#define MICRO_BP2_1 _MMIO(0x9820) +#define MICRO_BP3_0 _MMIO(0x9824) +#define MICRO_BP3_2 _MMIO(0x9828) +#define MICRO_BP3_1 _MMIO(0x982c) +#define MICRO_BP_TRIGGER _MMIO(0x9830) +#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834) +#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838) +#define MICRO_BP_FIRED_ARMED _MMIO(0x983c) + +#define GEN6_GFXPAUSE _MMIO(0xa000) +#define GEN6_RPNSWREQ _MMIO(0xa008) +#define GEN6_TURBO_DISABLE (1 << 31) +#define GEN6_FREQUENCY(x) ((x) << 25) +#define HSW_FREQUENCY(x) ((x) << 24) +#define GEN9_FREQUENCY(x) ((x) << 23) +#define GEN6_OFFSET(x) ((x) << 19) +#define GEN6_AGGRESSIVE_TURBO (0 << 15) +#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23 +#define GEN9_IGNORE_SLICE_RATIO (0 << 0) + +#define GEN6_RC_VIDEO_FREQ _MMIO(0xa00c) +#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16) +#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17) +#define GEN6_RC_CTL_RC6_ENABLE (1 << 18) +#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20) +#define GEN6_RC_CTL_RC7_ENABLE (1 << 22) +#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24) +#define GEN7_RC_CTL_TO_MODE (1 << 28) +#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27) +#define GEN6_RC_CTL_HW_ENABLE (1 << 31) +#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xa010) +#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xa014) +#define GEN6_RPSTAT1 _MMIO(0xa01c) +#define GEN6_CAGF_SHIFT 8 +#define HSW_CAGF_SHIFT 7 +#define GEN9_CAGF_SHIFT 23 +#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) +#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) +#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) +#define GEN6_RP_CONTROL _MMIO(0xa024) +#define GEN6_RP_MEDIA_TURBO (1 << 11) +#define GEN6_RP_MEDIA_MODE_MASK (3 << 9) +#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9) +#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9) +#define GEN6_RP_MEDIA_HW_MODE (1 << 9) +#define GEN6_RP_MEDIA_SW_MODE (0 << 9) +#define GEN6_RP_MEDIA_IS_GFX (1 << 8) +#define GEN6_RP_ENABLE (1 << 7) +#define GEN6_RP_UP_IDLE_MIN (0x1 << 3) +#define GEN6_RP_UP_BUSY_AVG (0x2 << 3) +#define GEN6_RP_UP_BUSY_CONT (0x4 << 3) +#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0) +#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0) +#define GEN6_RPSWCTL_SHIFT 9 +#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT) +#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT) +#define GEN6_RP_UP_THRESHOLD _MMIO(0xa02c) +#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xa030) +#define GEN6_RP_CUR_UP_EI _MMIO(0xa050) +#define GEN6_RP_EI_MASK 0xffffff +#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK +#define GEN6_RP_CUR_UP _MMIO(0xa054) +#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK +#define GEN6_RP_PREV_UP _MMIO(0xa058) +#define GEN6_RP_CUR_DOWN_EI _MMIO(0xa05c) +#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK +#define GEN6_RP_CUR_DOWN _MMIO(0xa060) +#define GEN6_RP_PREV_DOWN _MMIO(0xa064) +#define GEN6_RP_UP_EI _MMIO(0xa068) +#define GEN6_RP_DOWN_EI _MMIO(0xa06c) +#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xa070) +#define GEN6_RPDEUHWTC _MMIO(0xa080) +#define GEN6_RPDEUC _MMIO(0xa084) +#define GEN6_RPDEUCSW _MMIO(0xa088) +#define GEN6_RC_CONTROL _MMIO(0xa090) +#define GEN6_RC_STATE _MMIO(0xa094) +#define RC_SW_TARGET_STATE_SHIFT 16 +#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT) +#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xa098) +#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xa09c) +#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xa0a0) +#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xa0a0) +#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xa0a8) +#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xa0ac) +#define GEN6_RC_SLEEP _MMIO(0xa0b0) +#define GEN6_RCUBMABDTMR _MMIO(0xa0b0) +#define GEN6_RC1e_THRESHOLD _MMIO(0xa0b4) +#define GEN6_RC6_THRESHOLD _MMIO(0xa0b8) +#define GEN6_RC6p_THRESHOLD _MMIO(0xa0bc) +#define VLV_RCEDATA _MMIO(0xa0bc) +#define GEN6_RC6pp_THRESHOLD _MMIO(0xa0c0) +#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xa0c4) +#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xa0c8) + +#define GEN6_PMINTRMSK _MMIO(0xa168) +#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31) +#define ARAT_EXPIRED_INTRMSK (1 << 9) + +#define GEN8_MISC_CTRL0 _MMIO(0xa180) + +#define ECOBUS _MMIO(0xa180) +#define FORCEWAKE_MT_ENABLE (1 << 5) + +#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */ +#define FORCEWAKE_GT_GEN9 _MMIO(0xa188) +#define FORCEWAKE _MMIO(0xa18c) + +#define VLV_SPAREG2H _MMIO(0xa194) + +#define GEN9_PG_ENABLE _MMIO(0xa210) +#define GEN9_RENDER_PG_ENABLE REG_BIT(0) +#define GEN9_MEDIA_PG_ENABLE REG_BIT(1) +#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2) +#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n)) +#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n)) + +#define GEN8_PUSHBUS_CONTROL _MMIO(0xa248) +#define GEN8_PUSHBUS_ENABLE _MMIO(0xa250) +#define GEN8_PUSHBUS_SHIFT _MMIO(0xa25c) + +/* GPM unit config (Gen9+) */ +#define CTC_MODE _MMIO(0xa26c) +#define CTC_SOURCE_PARAMETER_MASK 1 +#define CTC_SOURCE_CRYSTAL_CLOCK 0 +#define CTC_SOURCE_DIVIDE_LOGIC 1 +#define CTC_SHIFT_PARAMETER_SHIFT 1 +#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT) + +#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270) +#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278) + +#define VLV_PWRDWNUPCTL _MMIO(0xa294) + +#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xa2a0) +#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) +#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) + +#define MISC_STATUS0 _MMIO(0xa500) +#define MISC_STATUS1 _MMIO(0xa504) + +#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4) +#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4) + +#define CHV_POWER_SS0_SIG1 _MMIO(0xa720) +#define CHV_POWER_SS0_SIG2 _MMIO(0xa724) +#define CHV_POWER_SS1_SIG1 _MMIO(0xa728) +#define CHV_SS_PG_ENABLE (1 << 1) +#define CHV_EU08_PG_ENABLE (1 << 9) +#define CHV_EU19_PG_ENABLE (1 << 17) +#define CHV_EU210_PG_ENABLE (1 << 25) +#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c) +#define CHV_EU311_PG_ENABLE (1 << 1) + +#define GEN7_SARCHKMD _MMIO(0xb000) +#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31) +#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30) + +#define GEN8_GARBCNTL _MMIO(0xb004) +#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7) +#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22) +#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0) +#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0) + +#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008) +#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0) + +#define GEN7_L3SQCREG1 _MMIO(0xb010) +#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 + +#define GEN7_L3CNTLREG1 _MMIO(0xb01c) +#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C +#define GEN7_L3AGDIS (1 << 19) +#define GEN7_L3CNTLREG2 _MMIO(0xb020) + +/* MOCS (Memory Object Control State) registers */ +#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ +#define GEN9_LNCFCMOCS_REG_COUNT 32 + +#define GEN7_L3CNTLREG3 _MMIO(0xb024) + +#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xb030) +#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 + +#define GEN7_L3SQCREG4 _MMIO(0xb034) +#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27) + +#define HSW_SCRATCH1 _MMIO(0xb038) +#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27) + +#define GEN7_L3LOG(slice, i) _MMIO(0xb070 + (slice) * 0x200 + (i) * 4) +#define GEN7_L3LOG_SIZE 0x80 + +#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0) +#define PMFLUSHDONE_LNICRSDROP (1 << 20) +#define PMFLUSH_GAPL3UNBLOCK (1 << 21) +#define PMFLUSHDONE_LNEBLK (1 << 22) + +#define XEHP_L3NODEARBCFG _MMIO(0xb0b4) +#define XEHP_LNESPARE REG_BIT(19) + +#define GEN8_L3SQCREG1 _MMIO(0xb100) +/* + * Note that on CHV the following has an off-by-one error wrt. to BSpec. + * Using the formula in BSpec leads to a hang, while the formula here works + * fine and matches the formulas for all other platforms. A BSpec change + * request has been filed to clarify this. + */ +#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) +#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) +#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14)) + +#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xb114) +#define GEN11_I2M_WRITE_DISABLE (1 << 28) + +#define GEN8_L3SQCREG4 _MMIO(0xb118) +#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6) +#define GEN8_LQSC_RO_PERF_DIS (1 << 27) +#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21) +#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22) + +#define GEN9_SCRATCH1 _MMIO(0xb11c) +#define EVICTION_PERF_FIX_ENABLE REG_BIT(8) + +#define BDW_SCRATCH1 _MMIO(0xb11c) +#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) + +#define GEN11_SCRATCH2 _MMIO(0xb140) +#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19) + +#define GEN11_L3SQCREG5 _MMIO(0xb158) +#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) + +#define MLTICTXCTL _MMIO(0xb170) +#define TDONRENDER REG_BIT(2) + +#define XEHP_L3SCQREG7 _MMIO(0xb188) +#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) + +#define L3SQCREG1_CCS0 _MMIO(0xb200) +#define FLUSHALLNONCOH REG_BIT(5) + +#define GEN11_GLBLINVL _MMIO(0xb404) +#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5) +#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5) + +#define GEN11_LSN_UNSLCVC _MMIO(0xb43c) +#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9) +#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) + +#define __GEN9_RCS0_MOCS0 0xc800 +#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4) +#define __GEN9_VCS0_MOCS0 0xc900 +#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4) +#define __GEN9_VCS1_MOCS0 0xca00 +#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4) +#define __GEN9_VECS0_MOCS0 0xcb00 +#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4) +#define __GEN9_BCS0_MOCS0 0xcc00 +#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4) + +#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8) +#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc) +#define FAULT_VA_HIGH_BITS (0xf << 0) +#define FAULT_GTT_SEL (1 << 4) + +#define GEN12_RING_FAULT_REG _MMIO(0xcec4) +#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7) +#define RING_FAULT_GTTSEL_MASK (1 << 11) +#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) +#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) +#define RING_FAULT_VALID (1 << 0) + +#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8) +#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc) +#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0) +#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4) + +#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28) +#define RENDER_MOD_CTRL _MMIO(0xcf2c) +#define COMP_MOD_CTRL _MMIO(0xcf30) +#define VDBX_MOD_CTRL _MMIO(0xcf34) +#define VEBX_MOD_CTRL _MMIO(0xcf38) +#define FORCE_MISS_FTLB REG_BIT(3) + +#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c) +#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12) +#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11) +#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7) + +#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54) +#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12) +#define GLOBAL_INVALIDATION_MODE REG_BIT(2) + +#define GEN12_GAM_DONE _MMIO(0xcf68) + +#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */ +#define GEN7_MAX_PS_THREAD_DEP (8 << 12) +#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10) +#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4) +#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3) + +#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160) +#define GEN7_ROW_INSTDONE _MMIO(0xe164) + +#define HALF_SLICE_CHICKEN2 _MMIO(0xe180) +#define GEN8_ST_PO_DISABLE (1 << 13) + +#define HALF_SLICE_CHICKEN3 _MMIO(0xe184) +#define HSW_SAMPLE_C_PERFORMANCE (1 << 9) +#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8) +#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5) +#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1) + +#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188) +#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5) +#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) + +#define GEN10_SAMPLER_MODE _MMIO(0xe18c) +#define ENABLE_SMALLPL REG_BIT(15) +#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5) + +#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) +#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8) +#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4) +#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2) + +#define GEN10_CACHE_MODE_SS _MMIO(0xe420) +#define ENABLE_PREFETCH_INTO_IC REG_BIT(3) +#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4) + +#define EU_PERF_CNTL0 _MMIO(0xe458) +#define EU_PERF_CNTL4 _MMIO(0xe45c) + +#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) +#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13) +#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11) +#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) +#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) +#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4) + +#define HSW_ROW_CHICKEN3 _MMIO(0xe49c) +#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) + +#define GEN8_ROW_CHICKEN _MMIO(0xe4f0) +#define FLOW_CONTROL_ENABLE REG_BIT(15) +#define UGM_BACKUP_MODE REG_BIT(13) +#define MDQ_ARBITRATION_MODE REG_BIT(12) +#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8) +#define STALL_DOP_GATING_DISABLE REG_BIT(5) +#define THROTTLE_12_5 REG_GENMASK(4, 2) +#define DISABLE_EARLY_EOT REG_BIT(1) + +#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) +#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15) +#define GEN12_DISABLE_EARLY_READ REG_BIT(14) +#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12) +#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8) + +#define RT_CTRL _MMIO(0xe530) +#define DIS_NULL_QUERY REG_BIT(10) + +#define EU_PERF_CNTL1 _MMIO(0xe558) +#define EU_PERF_CNTL5 _MMIO(0xe55c) + +#define GEN12_HDC_CHICKEN0 _MMIO(0xe5f0) +#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11) +#define ICL_HDC_MODE _MMIO(0xe5f4) + +#define EU_PERF_CNTL2 _MMIO(0xe658) +#define EU_PERF_CNTL6 _MMIO(0xe65c) +#define EU_PERF_CNTL3 _MMIO(0xe758) + +#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8) +#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15) +#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4) +#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32) +#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32) +#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32) +#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32) +#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32) + +#define SARB_CHICKEN1 _MMIO(0xe90c) +#define COMP_CKN_IN REG_GENMASK(30, 29) + +#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100) + +#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) +#define DOP_CLOCK_GATING_DISABLE (1 << 0) +#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8) +#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1) + +#define __GEN11_VCS2_MOCS0 0x10000 +#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4) + +#define CRSTANDVID _MMIO(0x11100) +#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ +#define PXVFREQ_PX_MASK 0x7f000000 +#define PXVFREQ_PX_SHIFT 24 +#define VIDFREQ_BASE _MMIO(0x11110) +#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */ +#define VIDFREQ2 _MMIO(0x11114) +#define VIDFREQ3 _MMIO(0x11118) +#define VIDFREQ4 _MMIO(0x1111c) +#define VIDFREQ_P0_MASK 0x1f000000 +#define VIDFREQ_P0_SHIFT 24 +#define VIDFREQ_P0_CSCLK_MASK 0x00f00000 +#define VIDFREQ_P0_CSCLK_SHIFT 20 +#define VIDFREQ_P0_CRCLK_MASK 0x000f0000 +#define VIDFREQ_P0_CRCLK_SHIFT 16 +#define VIDFREQ_P1_MASK 0x00001f00 +#define VIDFREQ_P1_SHIFT 8 +#define VIDFREQ_P1_CSCLK_MASK 0x000000f0 +#define VIDFREQ_P1_CSCLK_SHIFT 4 +#define VIDFREQ_P1_CRCLK_MASK 0x0000000f +#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */ +#define INTTOEXT_MAP3_SHIFT 24 +#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) +#define INTTOEXT_MAP2_SHIFT 16 +#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) +#define INTTOEXT_MAP1_SHIFT 8 +#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) +#define INTTOEXT_MAP0_SHIFT 0 +#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) +#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */ +#define MEMCTL_CMD_MASK 0xe000 +#define MEMCTL_CMD_SHIFT 13 +#define MEMCTL_CMD_RCLK_OFF 0 +#define MEMCTL_CMD_RCLK_ON 1 +#define MEMCTL_CMD_CHFREQ 2 +#define MEMCTL_CMD_CHVID 3 +#define MEMCTL_CMD_VMMOFF 4 +#define MEMCTL_CMD_VMMON 5 +#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears + when command complete */ +#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ +#define MEMCTL_FREQ_SHIFT 8 +#define MEMCTL_SFCAVM (1 << 7) +#define MEMCTL_TGT_VID_MASK 0x007f +#define MEMIHYST _MMIO(0x1117c) +#define MEMINTREN _MMIO(0x11180) /* 16 bits */ +#define MEMINT_RSEXIT_EN (1 << 8) +#define MEMINT_CX_SUPR_EN (1 << 7) +#define MEMINT_CONT_BUSY_EN (1 << 6) +#define MEMINT_AVG_BUSY_EN (1 << 5) +#define MEMINT_EVAL_CHG_EN (1 << 4) +#define MEMINT_MON_IDLE_EN (1 << 3) +#define MEMINT_UP_EVAL_EN (1 << 2) +#define MEMINT_DOWN_EVAL_EN (1 << 1) +#define MEMINT_SW_CMD_EN (1 << 0) +#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */ +#define MEM_RSEXIT_MASK 0xc000 +#define MEM_RSEXIT_SHIFT 14 +#define MEM_CONT_BUSY_MASK 0x3000 +#define MEM_CONT_BUSY_SHIFT 12 +#define MEM_AVG_BUSY_MASK 0x0c00 +#define MEM_AVG_BUSY_SHIFT 10 +#define MEM_EVAL_CHG_MASK 0x0300 +#define MEM_EVAL_BUSY_SHIFT 8 +#define MEM_MON_IDLE_MASK 0x00c0 +#define MEM_MON_IDLE_SHIFT 6 +#define MEM_UP_EVAL_MASK 0x0030 +#define MEM_UP_EVAL_SHIFT 4 +#define MEM_DOWN_EVAL_MASK 0x000c +#define MEM_DOWN_EVAL_SHIFT 2 +#define MEM_SW_CMD_MASK 0x0003 +#define MEM_INT_STEER_GFX 0 +#define MEM_INT_STEER_CMR 1 +#define MEM_INT_STEER_SMI 2 +#define MEM_INT_STEER_SCI 3 +#define MEMINTRSTS _MMIO(0x11184) +#define MEMINT_RSEXIT (1 << 7) +#define MEMINT_CONT_BUSY (1 << 6) +#define MEMINT_AVG_BUSY (1 << 5) +#define MEMINT_EVAL_CHG (1 << 4) +#define MEMINT_MON_IDLE (1 << 3) +#define MEMINT_UP_EVAL (1 << 2) +#define MEMINT_DOWN_EVAL (1 << 1) +#define MEMINT_SW_CMD (1 << 0) +#define MEMMODECTL _MMIO(0x11190) +#define MEMMODE_BOOST_EN (1 << 31) +#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ +#define MEMMODE_BOOST_FREQ_SHIFT 24 +#define MEMMODE_IDLE_MODE_MASK 0x00030000 +#define MEMMODE_IDLE_MODE_SHIFT 16 +#define MEMMODE_IDLE_MODE_EVAL 0 +#define MEMMODE_IDLE_MODE_CONT 1 +#define MEMMODE_HWIDLE_EN (1 << 15) +#define MEMMODE_SWMODE_EN (1 << 14) +#define MEMMODE_RCLK_GATE (1 << 13) +#define MEMMODE_HW_UPDATE (1 << 12) +#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ +#define MEMMODE_FSTART_SHIFT 8 +#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ +#define MEMMODE_FMAX_SHIFT 4 +#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ +#define RCBMAXAVG _MMIO(0x1119c) +#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */ +#define SWMEMCMD_RENDER_OFF (0 << 13) +#define SWMEMCMD_RENDER_ON (1 << 13) +#define SWMEMCMD_SWFREQ (2 << 13) +#define SWMEMCMD_TARVID (3 << 13) +#define SWMEMCMD_VRM_OFF (4 << 13) +#define SWMEMCMD_VRM_ON (5 << 13) +#define CMDSTS (1 << 12) +#define SFCAVM (1 << 11) +#define SWFREQ_MASK 0x0380 /* P0-7 */ +#define SWFREQ_SHIFT 7 +#define TARVID_MASK 0x001f +#define MEMSTAT_CTG _MMIO(0x111a0) +#define RCBMINAVG _MMIO(0x111a0) +#define RCUPEI _MMIO(0x111b0) +#define RCDNEI _MMIO(0x111b4) +#define RSTDBYCTL _MMIO(0x111b8) +#define RS1EN (1 << 31) +#define RS2EN (1 << 30) +#define RS3EN (1 << 29) +#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */ +#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */ +#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */ +#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */ +#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */ +#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */ +#define RSX_STATUS_MASK (7 << 20) +#define RSX_STATUS_ON (0 << 20) +#define RSX_STATUS_RC1 (1 << 20) +#define RSX_STATUS_RC1E (2 << 20) +#define RSX_STATUS_RS1 (3 << 20) +#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */ +#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */ +#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */ +#define RSX_STATUS_RSVD2 (7 << 20) +#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */ +#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */ +#define JRSC (1 << 17) /* rsx coupled to cpu c-state */ +#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */ +#define RS1CONTSAV_MASK (3 << 14) +#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */ +#define RS1CONTSAV_RSVD (1 << 14) +#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */ +#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */ +#define NORMSLEXLAT_MASK (3 << 12) +#define SLOW_RS123 (0 << 12) +#define SLOW_RS23 (1 << 12) +#define SLOW_RS3 (2 << 12) +#define NORMAL_RS123 (3 << 12) +#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */ +#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ +#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */ +#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */ +#define RS_CSTATE_MASK (3 << 4) +#define RS_CSTATE_C367_RS1 (0 << 4) +#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4) +#define RS_CSTATE_RSVD (2 << 4) +#define RS_CSTATE_C367_RS2 (3 << 4) +#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */ +#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */ +#define VIDCTL _MMIO(0x111c0) +#define VIDSTS _MMIO(0x111c8) +#define VIDSTART _MMIO(0x111cc) /* 8 bits */ +#define MEMSTAT_ILK _MMIO(0x111f8) +#define MEMSTAT_VID_MASK 0x7f00 +#define MEMSTAT_VID_SHIFT 8 +#define MEMSTAT_PSTATE_MASK 0x00f8 +#define MEMSTAT_PSTATE_SHIFT 3 +#define MEMSTAT_MON_ACTV (1 << 2) +#define MEMSTAT_SRC_CTL_MASK 0x0003 +#define MEMSTAT_SRC_CTL_CORE 0 +#define MEMSTAT_SRC_CTL_TRB 1 +#define MEMSTAT_SRC_CTL_THM 2 +#define MEMSTAT_SRC_CTL_STDBY 3 +#define PMMISC _MMIO(0x11214) +#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */ +#define SDEW _MMIO(0x1124c) +#define CSIEW0 _MMIO(0x11250) +#define CSIEW1 _MMIO(0x11254) +#define CSIEW2 _MMIO(0x11258) +#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */ +#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */ +#define MCHAFE _MMIO(0x112c0) +#define CSIEC _MMIO(0x112e0) +#define DMIEC _MMIO(0x112e4) +#define DDREC _MMIO(0x112e8) +#define PEG0EC _MMIO(0x112ec) +#define PEG1EC _MMIO(0x112f0) +#define GFXEC _MMIO(0x112f4) +#define INTTOEXT_BASE_ILK _MMIO(0x11300) +#define RPPREVBSYTUPAVG _MMIO(0x113b8) +#define RCPREVBSYTUPAVG _MMIO(0x113b8) +#define RCPREVBSYTDNAVG _MMIO(0x113bc) +#define RPPREVBSYTDNAVG _MMIO(0x113bc) +#define ECR _MMIO(0x11600) +#define ECR_GPFE (1 << 31) +#define ECR_IMONE (1 << 30) +#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ +#define OGW0 _MMIO(0x11608) +#define OGW1 _MMIO(0x1160c) +#define EG0 _MMIO(0x11610) +#define EG1 _MMIO(0x11614) +#define EG2 _MMIO(0x11618) +#define EG3 _MMIO(0x1161c) +#define EG4 _MMIO(0x11620) +#define EG5 _MMIO(0x11624) +#define EG6 _MMIO(0x11628) +#define EG7 _MMIO(0x1162c) +#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */ +#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */ +#define LCFUSE02 _MMIO(0x116c0) +#define LCFUSE_HIV_MASK 0x000000ff + +#define GAC_ECO_BITS _MMIO(0x14090) +#define ECOBITS_SNB_BIT (1 << 13) +#define ECOBITS_PPGTT_CACHE64B (3 << 8) +#define ECOBITS_PPGTT_CACHE4B (0 << 8) + +#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168) +#define CHV_FGT_DISABLE_SS0 (1 << 10) +#define CHV_FGT_DISABLE_SS1 (1 << 11) +#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 +#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT) +#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20 +#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT) +#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24 +#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT) +#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 +#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) + +#define BCS_SWCTRL _MMIO(0x22200) +#define BCS_SRC_Y REG_BIT(0) +#define BCS_DST_Y REG_BIT(1) + +#define GAB_CTL _MMIO(0x24000) +#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8) + +#define GEN6_PMISR _MMIO(0x44020) +#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */ +#define GEN6_PMIIR _MMIO(0x44028) +#define GEN6_PMIER _MMIO(0x4402c) +#define GEN6_PM_MBOX_EVENT (1 << 25) +#define GEN6_PM_THERMAL_EVENT (1 << 24) +/* + * For Gen11 these are in the upper word of the GPM_WGBOXPERF + * registers. Shifting is handled on accessing the imr and ier. + */ +#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6) +#define GEN6_PM_RP_UP_THRESHOLD (1 << 5) +#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4) +#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2) +#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1) +#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \ + GEN6_PM_RP_UP_THRESHOLD | \ + GEN6_PM_RP_DOWN_EI_EXPIRED | \ + GEN6_PM_RP_DOWN_THRESHOLD | \ + GEN6_PM_RP_DOWN_TIMEOUT) + +#define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4) +#define GEN7_GT_SCRATCH_REG_NUM 8 + +#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008) +#define GFX_FLSH_CNTL_EN (1 << 0) + +#define GTFIFODBG _MMIO(0x120000) +#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) +#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) +#define GT_FIFO_SBDROPERR (1 << 6) +#define GT_FIFO_BLOBDROPERR (1 << 5) +#define GT_FIFO_SB_READ_ABORTERR (1 << 4) +#define GT_FIFO_DROPERR (1 << 3) +#define GT_FIFO_OVFERR (1 << 2) +#define GT_FIFO_IAWRERR (1 << 1) +#define GT_FIFO_IARDERR (1 << 0) + +#define GTFIFOCTL _MMIO(0x120008) +#define GT_FIFO_FREE_ENTRIES_MASK 0x7f +#define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) +#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) + +#define FORCEWAKE_MT_ACK _MMIO(0x130040) +#define FORCEWAKE_ACK_HSW _MMIO(0x130044) +#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044) +#define FORCEWAKE_KERNEL BIT(0) +#define FORCEWAKE_USER BIT(1) +#define FORCEWAKE_KERNEL_FALLBACK BIT(15) +#define FORCEWAKE_ACK _MMIO(0x130090) +#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090) +#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25) +#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24) +#define VLV_GTLC_ALLOWWAKEREQ (1 << 0) +#define VLV_GTLC_PW_STATUS _MMIO(0x130094) +#define VLV_GTLC_ALLOWWAKEACK (1 << 0) +#define VLV_GTLC_ALLOWWAKEERR (1 << 1) +#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) +#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) +#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098) +#define VLV_GFX_CLK_STATUS_BIT (1 << 3) +#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2) +#define FORCEWAKE_VLV _MMIO(0x1300b0) +#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4) +#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8) +#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc) + +#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c) +#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 + +#define GEN6_GT_CORE_STATUS _MMIO(0x138060) +#define GEN6_CORE_CPD_STATE_MASK (7 << 4) +#define GEN6_RCn_MASK 7 +#define GEN6_RC0 0 +#define GEN6_RC3 2 +#define GEN6_RC6 3 +#define GEN6_RC7 4 + +#define GEN8_GT_SLICE_INFO _MMIO(0x138064) +#define GEN8_LSLICESTAT_MASK 0x7 + +#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104) +#define VLV_COUNTER_CONTROL _MMIO(0x138104) +#define VLV_COUNT_RANGE_HIGH (1 << 15) +#define VLV_MEDIA_RC0_COUNT_EN (1 << 5) +#define VLV_RENDER_RC0_COUNT_EN (1 << 4) +#define VLV_MEDIA_RC6_COUNT_EN (1 << 1) +#define VLV_RENDER_RC6_COUNT_EN (1 << 0) +#define GEN6_GT_GFX_RC6 _MMIO(0x138108) +#define VLV_GT_RENDER_RC6 _MMIO(0x138108) +#define VLV_GT_MEDIA_RC6 _MMIO(0x13810c) + +#define GEN6_GT_GFX_RC6p _MMIO(0x13810c) +#define GEN6_GT_GFX_RC6pp _MMIO(0x138110) +#define VLV_RENDER_C0_COUNT _MMIO(0x138118) +#define VLV_MEDIA_C0_COUNT _MMIO(0x13811c) + +#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4)) +#define GEN11_CSME (31) +#define GEN11_GUNIT (28) +#define GEN11_GUC (25) +#define GEN11_WDPERF (20) +#define GEN11_KCR (19) +#define GEN11_GTPM (16) +#define GEN11_BCS (15) +#define GEN11_RCS0 (0) +#define GEN11_VECS(x) (31 - (x)) +#define GEN11_VCS(x) (x) + +#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030) +#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034) +#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038) +#define ENGINE1_MASK REG_GENMASK(31, 16) +#define ENGINE0_MASK REG_GENMASK(15, 0) +#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c) +#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040) +#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044) + +#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4)) +#define GEN11_INTR_DATA_VALID (1 << 31) +#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16) +#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20) +#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff) +/* irq instances for OTHER_CLASS */ +#define OTHER_GUC_INSTANCE 0 +#define OTHER_GTPM_INSTANCE 1 +#define OTHER_KCR_INSTANCE 4 + +#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) + +#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090) +#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0) +#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8) +#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac) +#define GEN12_VCS4_VCS5_INTR_MASK _MMIO(0x1900b0) +#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4) +#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0) +#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4) +#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8) +#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec) +#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0) +#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4) + +#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) + +enum { + INTEL_ADVANCED_CONTEXT = 0, + INTEL_LEGACY_32B_CONTEXT, + INTEL_ADVANCED_AD_CONTEXT, + INTEL_LEGACY_64B_CONTEXT +}; + +enum { + FAULT_AND_HANG = 0, + FAULT_AND_HALT, /* Debug only */ + FAULT_AND_STREAM, + FAULT_AND_CONTINUE /* Unsupported */ +}; + +#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12) +#define GEN8_CTX_VALID (1 << 0) +#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1) +#define GEN8_CTX_FORCE_RESTORE (1 << 2) +#define GEN8_CTX_L3LLC_COHERENT (1 << 5) +#define GEN8_CTX_PRIVILEGE (1 << 8) +#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 +#define GEN8_CTX_ID_SHIFT 32 +#define GEN8_CTX_ID_WIDTH 21 +#define GEN11_SW_CTX_ID_SHIFT 37 +#define GEN11_SW_CTX_ID_WIDTH 11 +#define GEN11_ENGINE_CLASS_SHIFT 61 +#define GEN11_ENGINE_CLASS_WIDTH 3 +#define GEN11_ENGINE_INSTANCE_SHIFT 48 +#define GEN11_ENGINE_INSTANCE_WIDTH 6 +#define XEHP_SW_CTX_ID_SHIFT 39 +#define XEHP_SW_CTX_ID_WIDTH 16 +#define XEHP_SW_COUNTER_SHIFT 58 +#define XEHP_SW_COUNTER_WIDTH 6 + +#endif /* __INTEL_GT_REGS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 46be4197b93f..49a8fb63e6e5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -10,9 +10,11 @@ #include <drm/drm_cache.h> +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "i915_trace.h" #include "intel_gt.h" +#include "intel_gt_regs.h" #include "intel_gtt.h" struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index 08d7d5ae263a..40e2e28ee6c7 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -7,8 +7,10 @@ #include <linux/cpufreq.h> #include "i915_drv.h" +#include "i915_reg.h" #include "intel_gt.h" #include "intel_llc.h" +#include "intel_mchbar_regs.h" #include "intel_pcode.h" struct ia_constants { @@ -140,11 +142,10 @@ static void gen6_update_ring_freq(struct intel_llc *llc) unsigned int ia_freq, ring_freq; calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); - sandybridge_pcode_write(i915, - GEN6_PCODE_WRITE_MIN_FREQ_TABLE, - ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | - ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | - gpu_freq); + snb_pcode_write(i915, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, + ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | + ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | + gpu_freq); } } diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b4b7453554d5..004e1216e654 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -8,9 +8,13 @@ #include "gen8_engine_cs.h" #include "i915_drv.h" #include "i915_perf.h" +#include "i915_reg.h" +#include "intel_context.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" +#include "intel_gt_regs.h" #include "intel_lrc.h" #include "intel_lrc_reg.h" #include "intel_ring.h" @@ -1718,6 +1722,17 @@ static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) #endif } +static u32 lrc_get_runtime(const struct intel_context *ce) +{ + /* + * We can use either ppHWSP[16] which is recorded before the context + * switch (and so excludes the cost of context switches) or use the + * value from the context image itself, which is saved/restored earlier + * and so includes the cost of the save. + */ + return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); +} + void lrc_update_runtime(struct intel_context *ce) { u32 old; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index 7f697845c4cf..0b76f096b559 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -8,12 +8,12 @@ #include <linux/types.h> -#include "intel_context.h" -#include "intel_lrc_reg.h" - struct drm_i915_gem_object; +struct i915_gem_ww_ctx; +struct intel_context; struct intel_engine_cs; struct intel_ring; +struct kref; /* At the start of the context image is its per-process HWS page */ #define LRC_PPHWSP_PN (0) @@ -68,15 +68,5 @@ void lrc_check_regs(const struct intel_context *ce, const char *when); void lrc_update_runtime(struct intel_context *ce); -static inline u32 lrc_get_runtime(const struct intel_context *ce) -{ - /* - * We can use either ppHWSP[16] which is recorded before the context - * switch (and so excludes the cost of context switches) or use the - * value from the context image itself, which is saved/restored earlier - * and so includes the cost of the save. - */ - return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); -} #endif /* __INTEL_LRC_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index f785d0ed238f..304000c7e345 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -53,21 +53,6 @@ #define GEN8_EXECLISTS_STATUS_BUF 0x370 #define GEN11_EXECLISTS_STATUS_BUF2 0x3c0 -/* Execlists regs */ -#define RING_ELSP(base) _MMIO((base) + 0x230) -#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) -#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) -#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) -#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0) -#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1) -#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2) -#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3) -#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8) -#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) -#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) -#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) -#define EL_CTRL_LOAD REG_BIT(0) - /* * The docs specify that the write pointer wraps around after 5h, "After status * is written out to the last available status QW at offset 5h, this pointer diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 9c253ba593c6..c4c37585ae8c 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -7,7 +7,7 @@ #include "intel_engine.h" #include "intel_gt.h" -#include "intel_lrc_reg.h" +#include "intel_gt_regs.h" #include "intel_mocs.h" #include "intel_ring.h" diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index c3155ee58689..6df359c534fe 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -6,9 +6,12 @@ #include <linux/pm_runtime.h> #include "i915_drv.h" +#include "i915_reg.h" #include "i915_vgpu.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_regs.h" #include "intel_pcode.h" #include "intel_rc6.h" @@ -267,8 +270,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_HW_ENABLE; rc6vids = 0; - ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, - &rc6vids, NULL); + ret = snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); if (GRAPHICS_VER(i915) == 6 && ret) { drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n"); } else if (GRAPHICS_VER(i915) == 6 && @@ -278,7 +280,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; rc6vids |= GEN6_ENCODE_RC6_VID(450); - ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); + ret = snb_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); if (ret) drm_err(&i915->drm, "Couldn't fix incorrect rc6 voltage\n"); @@ -449,10 +451,10 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) enable_rc6 = false; } - if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 && - (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 && - (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 && - (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) { + if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT(RENDER_RING_BASE)) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT(GEN6_BSD_RING_BASE)) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT(BLT_RING_BASE)) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT(VEBOX_RING_BASE)) & IDLE_TIME_MASK) > 1)) { drm_dbg(&i915->drm, "Engine Idle wait time not set properly.\n"); enable_rc6 = false; diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h index e119ec4a0bcc..b6fea71afc22 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.h +++ b/drivers/gpu/drm/i915/gt/intel_rc6.h @@ -6,7 +6,7 @@ #ifndef INTEL_RC6_H #define INTEL_RC6_H -#include "i915_reg.h" +#include "i915_reg_defs.h" struct intel_engine_cs; struct intel_rc6; diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 21215a080088..a04e0cf4a94b 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -4,6 +4,7 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "intel_memory_region.h" #include "intel_region_lmem.h" #include "intel_region_ttm.h" @@ -11,6 +12,7 @@ #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_regs.h" static int init_fake_lmem_bar(struct intel_memory_region *mem) { diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index b575cd6e0b7a..5121e6dc2fa5 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -3,6 +3,8 @@ * Copyright © 2014 Intel Corporation */ +#include "gem/i915_gem_internal.h" + #include "i915_drv.h" #include "intel_renderstate.h" #include "intel_context.h" diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index cf1f8a382c45..82713264b96c 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -11,14 +11,20 @@ #include "gem/i915_gem_context.h" +#include "gt/intel_gt_regs.h" + #include "i915_drv.h" +#include "i915_file_private.h" #include "i915_gpu_error.h" #include "i915_irq.h" #include "intel_breadcrumbs.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" +#include "intel_mchbar_regs.h" +#include "intel_pci_config.h" #include "intel_reset.h" #include "uc/intel_guc.h" @@ -343,25 +349,25 @@ static void get_sfc_forced_lock_data(struct intel_engine_cs *engine, MISSING_CASE(engine->class); fallthrough; case VIDEO_DECODE_CLASS: - sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base); sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; - sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base); sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; - sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base); sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT; sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); break; case VIDEO_ENHANCEMENT_CLASS: - sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base); sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; - sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine); + sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base); sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; - sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine); + sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base); sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT; sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); @@ -408,7 +414,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, * forced lock on the VE engine that shares the same SFC. */ if (!(intel_uncore_read_fw(uncore, - GEN12_HCP_SFC_LOCK_STATUS(engine)) & + GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) & GEN12_HCP_SFC_USAGE_BIT)) return 0; diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 2fdd52b62092..40ffcb94e379 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -3,12 +3,14 @@ * Copyright © 2019 Intel Corporation */ +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_object.h" #include "i915_drv.h" #include "i915_vma.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_ring.h" #include "intel_timeline.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 3e6fac0340ef..6d7ec3bf1f32 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -3,6 +3,10 @@ * Copyright © 2008-2021 Intel Corporation */ +#include <drm/drm_cache.h> + +#include "gem/i915_gem_internal.h" + #include "gen2_engine_cs.h" #include "gen6_engine_cs.h" #include "gen6_ppgtt.h" @@ -11,8 +15,10 @@ #include "i915_mitigations.h" #include "intel_breadcrumbs.h" #include "intel_context.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_irq.h" +#include "intel_gt_regs.h" #include "intel_reset.h" #include "intel_ring.h" #include "shmem_utils.h" @@ -1002,15 +1008,15 @@ static void gen6_bsd_submit_request(struct i915_request *request) /* Disable notification that the ring is IDLE. The GT * will then assume that it is busy and bring it out of rc6. */ - intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), + _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); /* Clear the context id. Here be magic! */ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); /* Wait for the ring not to be idle, i.e. for it to wake up. */ if (__intel_wait_for_register_fw(uncore, - GEN6_BSD_SLEEP_PSMI_CONTROL, + RING_PSMI_CTL(GEN6_BSD_RING_BASE), GEN6_BSD_SLEEP_INDICATOR, 0, 1000, 0, NULL)) @@ -1023,8 +1029,8 @@ static void gen6_bsd_submit_request(struct i915_request *request) /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. */ - intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), + _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); } diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 54e7df788dbf..fd95449ed46d 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -6,11 +6,14 @@ #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_irq.h" #include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" #include "intel_gt_irq.h" #include "intel_gt_pm_irq.h" +#include "intel_gt_regs.h" +#include "intel_mchbar_regs.h" #include "intel_pcode.h" #include "intel_rps.h" #include "vlv_sideband.h" @@ -1090,9 +1093,8 @@ static void gen6_rps_init(struct intel_rps *rps) IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { u32 ddcc_status = 0; - if (sandybridge_pcode_read(i915, - HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, - &ddcc_status, NULL) == 0) + if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + &ddcc_status, NULL) == 0) rps->efficient_freq = clamp_t(u8, (ddcc_status >> 8) & 0xff, @@ -1940,8 +1942,7 @@ void intel_rps_init(struct intel_rps *rps) if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { u32 params = 0; - sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, - ¶ms, NULL); + snb_pcode_read(i915, GEN6_READ_OC_PARAMS, ¶ms, NULL); if (params & BIT(31)) { /* OC supported */ drm_dbg(&i915->drm, "Overclocking supported, max: %dMHz, overclock: %dMHz\n", diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index bdf09051b8a0..29118c652811 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -4,7 +4,8 @@ */ #include "i915_drv.h" -#include "intel_lrc_reg.h" +#include "intel_engine_regs.h" +#include "intel_gt_regs.h" #include "intel_sseu.h" void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c index 8bb3a91dad82..903626f106ea 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_gt_debugfs.h" +#include "intel_gt_regs.h" #include "intel_sseu_debugfs.h" static void sseu_copy_subslices(const struct sseu_dev_info *sseu, diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 438bbc7b8147..b9640212d659 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -3,9 +3,12 @@ * Copyright © 2016-2018 Intel Corporation */ -#include "i915_drv.h" +#include <drm/drm_cache.h> + +#include "gem/i915_gem_internal.h" #include "i915_active.h" +#include "i915_drv.h" #include "i915_syncmap.h" #include "intel_gt.h" #include "intel_ring.h" diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 77ac294acc9d..26038066e90b 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -6,8 +6,10 @@ #include "i915_drv.h" #include "intel_context.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" +#include "intel_gt_regs.h" #include "intel_ring.h" #include "intel_workarounds.h" @@ -235,7 +237,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); /* WaDisableAsyncFlipPerfMode:bdw,chv */ - wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE); + wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE); /* WaDisablePartialInstShootdown:bdw,chv */ wa_masked_en(wal, GEN8_ROW_CHICKEN, @@ -2219,7 +2221,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * For DG1 this only applies to A0. */ wa_masked_en(wal, - GEN6_RC_SLEEP_PSMI_CONTROL, + RING_PSMI_CTL(RENDER_RING_BASE), GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | GEN8_RC_SEMA_IDLE_MSG_DISABLE); } @@ -2434,7 +2436,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) if (GRAPHICS_VER(i915) == 7) { /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ wa_masked_en(wal, - GFX_MODE_GEN7, + RING_MODE_GEN7(RENDER_RING_BASE), GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE); /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */ @@ -2472,7 +2474,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ wa_masked_en(wal, - MI_MODE, + RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE); if (GRAPHICS_VER(i915) == 6) { @@ -2531,7 +2533,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) if (IS_GRAPHICS_VER(i915, 4, 6)) /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - wa_add(wal, MI_MODE, + wa_add(wal, RING_MI_MODE(RENDER_RING_BASE), 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH), /* XXX bit doesn't stick on Broadwater */ IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true); @@ -2547,7 +2549,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * they are already accustomed to from before contexts were * enabled. */ - wa_add(wal, ECOSKPD, + wa_add(wal, ECOSKPD(RENDER_RING_BASE), 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE), 0 /* XXX bit doesn't stick on Broadwater */, true); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h index 1e873681795d..8a4b6de4e754 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h +++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h @@ -8,7 +8,7 @@ #include <linux/types.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" struct i915_wa { i915_reg_t reg; diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 8af261831470..0dcb3ed44a73 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -6,6 +6,7 @@ #include <linux/sort.h> #include "i915_selftest.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt_clock_utils.h" #include "selftest_engine.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index e10da897e07a..72d5faab8f9a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -5,6 +5,7 @@ #include <linux/prime_numbers.h> +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_reset.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 8bf62a5826cc..be94f863bdef 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -5,6 +5,7 @@ #include <linux/sort.h> +#include "intel_engine_regs.h" #include "intel_gt_clock_utils.h" #include "selftest_llc.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 9c21b55b927b..83ff4c2e57c5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -6,7 +6,9 @@ #include <linux/kthread.h> #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" +#include "i915_gem_evict.h" #include "intel_gt.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index 459b775f163a..2cd184ab32b1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -31,9 +31,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); val = gpu_freq; - if (sandybridge_pcode_read(i915, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &val, NULL)) { + if (snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + &val, NULL)) { pr_err("Failed to read freq table[%d], range [%d, %d]\n", gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq); err = -ENXIO; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 618c905daa19..21c29d315cc0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -5,6 +5,8 @@ #include <linux/prime_numbers.h> +#include "gem/i915_gem_internal.h" + #include "i915_selftest.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index fa4293d2944f..c9c4f391c5cc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -5,6 +5,8 @@ #include <linux/sort.h> +#include "gem/i915_gem_internal.h" + #include "selftests/i915_random.h" static const unsigned int sizes[] = { diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 7ee2513e15f9..6a69ac0184ad 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -6,8 +6,11 @@ #include <linux/pm_qos.h> #include <linux/sort.h> +#include "gem/i915_gem_internal.h" + #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt_clock_utils.h" #include "intel_gt_pm.h" @@ -518,9 +521,8 @@ static void show_pcu_config(struct intel_rps *rps) for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { int ia_freq = gpu_freq; - sandybridge_pcode_read(i915, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq, NULL); + snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + &ia_freq, NULL); pr_info("%5d %5d %5d\n", gpu_freq * 50, diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index e2eb686a9763..0410c402f2a3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -8,6 +8,7 @@ #include "intel_context.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_requests.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 0287c2573c51..67a9aab801dd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -3,6 +3,7 @@ * Copyright © 2018 Intel Corporation */ +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h index 7a8d4bfc5f6a..62cb4254a77a 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h @@ -7,7 +7,6 @@ #define _GUC_ACTIONS_SLPC_ABI_H_ #include <linux/types.h> -#include "i915_reg.h" /** * DOC: SLPC SHARED DATA STRUCTURE diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index cbec51f4dec3..447a976c9f25 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -7,11 +7,13 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" +#include "gt/intel_gt_regs.h" #include "intel_guc.h" #include "intel_guc_slpc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" +#include "i915_irq.h" /** * DOC: GuC diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 542bada48674..7e41175618f5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -5,7 +5,9 @@ #include <linux/bsearch.h> +#include "gt/intel_engine_regs.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_lrc.h" #include "gt/shmem_utils.h" #include "intel_guc_ads.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 40f7d4779c9e..a0372735cddb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -10,6 +10,7 @@ */ #include "gt/intel_gt.h" +#include "gt/intel_gt_regs.h" #include "intel_guc_fw.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index be35f0570396..b53f61f3101f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -7,6 +7,7 @@ #include "gt/intel_gt.h" #include "i915_drv.h" +#include "i915_irq.h" #include "i915_memcpy.h" #include "intel_guc_log.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h index cdb47c2291c8..66027a42cda9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -9,7 +9,7 @@ #include <linux/compiler.h> #include <linux/types.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" /* Definitions of GuC H/W registers, bits, etc */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 13b27b8ff74e..b3d28b003b73 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -3,9 +3,14 @@ * Copyright © 2021 Intel Corporation */ +#include <drm/drm_cache.h> + #include "i915_drv.h" +#include "i915_reg.h" #include "intel_guc_slpc.h" +#include "intel_mchbar_regs.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_regs.h" static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 9c0c2f433b9b..b3a429a92c0d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -9,13 +9,15 @@ #include "gt/gen8_engine_cs.h" #include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" -#include "gt/intel_engine_pm.h" #include "gt/intel_engine_heartbeat.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_gt_requests.h" #include "gt/intel_lrc.h" #include "gt/intel_lrc_reg.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index ae8c8a6c8cc8..73ec670800f2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -6,7 +6,7 @@ #ifndef _INTEL_HUC_H_ #define _INTEL_HUC_H_ -#include "i915_reg.h" +#include "i915_reg_defs.h" #include "intel_uc_fw.h" #include "intel_huc_fw.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 62db14d41042..c88113044494 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -5,12 +5,15 @@ #include <linux/bitfield.h> #include <linux/firmware.h> + +#include <drm/drm_cache.h> #include <drm/drm_print.h> #include "gem/i915_gem_lmem.h" #include "intel_uc_fw.h" #include "intel_uc_fw_abi.h" #include "i915_drv.h" +#include "i915_reg.h" static inline struct intel_gt * ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type) diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index c08098a167e9..557f3314291a 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -35,6 +35,7 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "gt/intel_ggtt_fencing.h" #include "gvt.h" diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c4118b808268..2459213b6c87 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -37,7 +37,9 @@ #include <linux/slab.h> #include "i915_drv.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "gt/intel_gt_requests.h" @@ -1144,7 +1146,7 @@ struct cmd_interrupt_event { int mi_user_interrupt; }; -static struct cmd_interrupt_event cmd_interrupt_events[] = { +static const struct cmd_interrupt_event cmd_interrupt_events[] = { [RCS0] = { .pipe_control_notify = RCS_PIPE_CONTROL, .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 034c060f89d4..c7722c818b4d 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -33,6 +33,7 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "gvt.h" static int get_edp_pipe(struct intel_vgpu *vgpu) @@ -184,10 +185,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) for_each_pipe(dev_priv, pipe) { vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= - ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); - vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE); + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; - vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } @@ -245,7 +246,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * setup_virtual_dp_monitor. */ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE; + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE; /* * Golden M/N are calculated based on: @@ -253,7 +254,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ - vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = TU_SIZE(64); vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; @@ -387,7 +388,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ - vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = TU_SIZE(64); vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; @@ -496,9 +497,9 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) /* Disable Primary/Sprite/Cursor plane */ for_each_pipe(dev_priv, pipe) { - vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; - vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 94c3eb1586b0..c95c25d2addb 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -31,7 +31,13 @@ #include <linux/dma-buf.h> #include <linux/vfio.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_plane.h> + +#include "gem/i915_gem_dmabuf.h" + #include "i915_drv.h" +#include "i915_reg.h" #include "gvt.h" #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) @@ -148,8 +154,7 @@ static void dmabuf_gem_object_free(struct kref *kref) if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) { list_for_each(pos, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, - struct intel_vgpu_dmabuf_obj, list); + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); intel_gvt_hypervisor_put_vfio_device(vgpu); @@ -357,10 +362,8 @@ pick_dmabuf_by_info(struct intel_vgpu *vgpu, struct intel_vgpu_dmabuf_obj *ret = NULL; list_for_each(pos, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, - list); - if ((dmabuf_obj == NULL) || - (dmabuf_obj->info == NULL)) + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); + if (!dmabuf_obj->info) continue; fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info; @@ -387,11 +390,7 @@ pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id) struct intel_vgpu_dmabuf_obj *ret = NULL; list_for_each(pos, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, - list); - if (!dmabuf_obj) - continue; - + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj->dmabuf_id == id) { ret = dmabuf_obj; break; @@ -600,8 +599,7 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) mutex_lock(&vgpu->dmabuf_lock); list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, - list); + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 22247805c345..a30ba2d7b7ba 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -33,6 +33,7 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "gvt.h" #define GMBUS1_TOTAL_BYTES_SHIFT 16 diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index c8dcda6d4f0d..66d354c4195b 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -163,7 +163,7 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, status, 8); intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4, + hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, &write_pointer, 4); } diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 9ec064199364..835c3fde8a20 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -37,15 +37,16 @@ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" +#include "i915_reg.h" #define PRIMARY_FORMAT_NUM 16 struct pixel_format { - int drm_format; /* Pixel format in DRM definition */ - int bpp; /* Bits per pixel, 0 indicates invalid */ - char *desc; /* The description */ + int drm_format; /* Pixel format in DRM definition */ + int bpp; /* Bits per pixel, 0 indicates invalid */ + const char *desc; /* The description */ }; -static struct pixel_format bdw_pixel_formats[] = { +static const struct pixel_format bdw_pixel_formats[] = { {DRM_FORMAT_C8, 8, "8-bit Indexed"}, {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, @@ -58,7 +59,7 @@ static struct pixel_format bdw_pixel_formats[] = { {0, 0, NULL}, }; -static struct pixel_format skl_pixel_formats[] = { +static const struct pixel_format skl_pixel_formats[] = { {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"}, {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"}, {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"}, @@ -83,22 +84,22 @@ static int bdw_format_to_drm(int format) int bdw_pixel_formats_index = 6; switch (format) { - case DISPPLANE_8BPP: + case DISP_FORMAT_8BPP: bdw_pixel_formats_index = 0; break; - case DISPPLANE_BGRX565: + case DISP_FORMAT_BGRX565: bdw_pixel_formats_index = 1; break; - case DISPPLANE_BGRX888: + case DISP_FORMAT_BGRX888: bdw_pixel_formats_index = 2; break; - case DISPPLANE_RGBX101010: + case DISP_FORMAT_RGBX101010: bdw_pixel_formats_index = 3; break; - case DISPPLANE_BGRX101010: + case DISP_FORMAT_BGRX101010: bdw_pixel_formats_index = 4; break; - case DISPPLANE_RGBX888: + case DISP_FORMAT_RGBX888: bdw_pixel_formats_index = 5; break; @@ -211,14 +212,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, return -ENODEV; val = vgpu_vreg_t(vgpu, DSPCNTR(pipe)); - plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); + plane->enabled = !!(val & DISP_ENABLE); if (!plane->enabled) return -ENODEV; if (GRAPHICS_VER(dev_priv) >= 9) { plane->tiled = val & PLANE_CTL_TILED_MASK; fmt = skl_format_to_drm( - val & PLANE_CTL_FORMAT_MASK, + val & PLANE_CTL_FORMAT_MASK_SKL, val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ALPHA_MASK, val & PLANE_CTL_YUV422_ORDER_MASK); @@ -231,8 +232,8 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { - plane->tiled = val & DISPPLANE_TILED; - fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); + plane->tiled = val & DISP_TILED; + fmt = bdw_format_to_drm(val & DISP_FORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; } @@ -278,14 +279,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, #define CURSOR_FORMAT_NUM (1 << 6) struct cursor_mode_format { - int drm_format; /* Pixel format in DRM definition */ - u8 bpp; /* Bits per pixel; 0 indicates invalid */ - u32 width; /* In pixel */ - u32 height; /* In lines */ - char *desc; /* The description */ + int drm_format; /* Pixel format in DRM definition */ + u8 bpp; /* Bits per pixel; 0 indicates invalid */ + u32 width; /* In pixel */ + u32 height; /* In lines */ + const char *desc; /* The description */ }; -static struct cursor_mode_format cursor_pixel_formats[] = { +static const struct cursor_mode_format cursor_pixel_formats[] = { {DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"}, {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"}, {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, @@ -342,7 +343,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, return -ENODEV; val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); - mode = val & MCURSOR_MODE; + mode = val & MCURSOR_MODE_MASK; plane->enabled = (mode != MCURSOR_MODE_DISABLE); if (!plane->enabled) return -ENODEV; @@ -391,7 +392,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, #define SPRITE_FORMAT_NUM (1 << 3) -static struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = { +static const struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = { [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"}, [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"}, [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"}, @@ -430,7 +431,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, yuv_order = (val & SPRITE_YUV_ORDER_MASK) >> _SPRITE_YUV_ORDER_SHIFT; - fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; + fmt = (val & SPRITE_FORMAT_MASK) >> _SPRITE_FMT_SHIFT; if (!sprite_pixel_formats[fmt].bpp) { gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt); return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 99d1781fa5f0..28a94c3dc991 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -38,6 +38,8 @@ #include "i915_pvinfo.h" #include "trace.h" +#include "gt/intel_gt_regs.h" + #if defined(VERBOSE_DEBUG) #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) #else @@ -185,7 +187,7 @@ struct gtt_type_table_entry { .pse_entry_type = pse_type, \ } -static struct gtt_type_table_entry gtt_type_table[] = { +static const struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_INVALID, @@ -499,7 +501,7 @@ DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); -static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { +static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .get_entry = gtt_get_entry64, .set_entry = gtt_set_entry64, .clear_present = gtt_entry_clear_present, @@ -516,7 +518,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .set_pfn = gen8_gtt_set_pfn, }; -static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { +static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, .gma_to_pte_index = gen8_gma_to_pte_index, .gma_to_pde_index = gen8_gma_to_pde_index, @@ -526,7 +528,7 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { }; /* Update entry type per pse and ips bit. */ -static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops, +static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops, struct intel_gvt_gtt_entry *entry, bool ips) { switch (entry->type) { @@ -553,7 +555,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); @@ -580,7 +582,7 @@ static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, @@ -596,7 +598,7 @@ static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -608,7 +610,7 @@ static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -619,7 +621,7 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -629,7 +631,7 @@ static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; unsigned long offset = index; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -655,7 +657,7 @@ static inline int ppgtt_spt_get_entry( bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; int ret; e->type = get_entry_type(type); @@ -684,7 +686,7 @@ static inline int ppgtt_spt_set_entry( bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) return -EINVAL; @@ -947,7 +949,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *e) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; enum intel_gvt_gtt_type cur_pt_type; @@ -984,7 +986,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *entry) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; int type; @@ -1072,7 +1074,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = NULL; bool ips = false; int ret; @@ -1136,7 +1138,7 @@ err: static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) { - struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; se->type = ge->type; se->val64 = ge->val64; @@ -1159,7 +1161,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) @@ -1176,7 +1178,7 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *sub_spt; struct intel_gvt_gtt_entry sub_se; unsigned long start_gfn; @@ -1223,7 +1225,7 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = *se; unsigned long start_gfn; dma_addr_t dma_addr; @@ -1254,7 +1256,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *ge) { - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; unsigned long gfn, page_size = PAGE_SIZE; dma_addr_t dma_addr; @@ -1308,7 +1310,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; unsigned long gfn, i; @@ -1351,7 +1353,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *se, unsigned long index) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int ret; trace_spt_guest_change(spt->vgpu->id, "remove", spt, @@ -1432,7 +1434,7 @@ static int sync_oos_page(struct intel_vgpu *vgpu, { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; struct intel_gvt_gtt_entry old, new; int index; @@ -1603,7 +1605,7 @@ static int ppgtt_handle_guest_write_page_table( { struct intel_vgpu *vgpu = spt->vgpu; int type = spt->shadow_page.type; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry old_se; int new_present; int i, ret; @@ -1720,7 +1722,7 @@ static int ppgtt_handle_guest_write_page_table_bytes( u64 pa, void *p_data, int bytes) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt_gtt_entry we, se; unsigned long index; @@ -1785,7 +1787,7 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; - struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_gvt_gtt_entry se; int index; @@ -1815,7 +1817,7 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; - struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_vgpu_ppgtt_spt *spt; struct intel_gvt_gtt_entry ge, se; int index, ret; @@ -2067,7 +2069,7 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) { struct intel_vgpu *vgpu = mm->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); @@ -2096,8 +2098,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) { struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; - struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; unsigned long gpa = INTEL_GVT_INVALID_ADDR; unsigned long gma_index[4]; struct intel_gvt_gtt_entry e; @@ -2221,7 +2223,7 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; pfn = pte_ops->get_pfn(entry); @@ -2236,7 +2238,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long gma, gfn; struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; @@ -2391,7 +2393,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_gtt *gtt = &vgpu->gtt; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = I915_GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; @@ -2822,7 +2824,7 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry old_entry; u32 index; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3bf45672ef98..a3b0f59ec8bd 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -91,8 +91,8 @@ struct intel_gvt_gtt_gma_ops { }; struct intel_gvt_gtt { - struct intel_gvt_gtt_pte_ops *pte_ops; - struct intel_gvt_gtt_gma_ops *gma_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops; + const struct intel_gvt_gtt_gma_ops *gma_ops; int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm); void (*mm_free_page_table)(struct intel_vgpu_mm *mm); struct list_head oos_page_use_list_head; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0c0615602343..0ebffc327528 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -272,7 +272,7 @@ struct intel_gvt_mmio { /* Value of command write of this reg needs to be patched */ #define F_CMD_WRITE_PATCH (1 << 8) - struct gvt_mmio_block *mmio_block; + const struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index cde0a477fb49..520a7e1942f3 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -37,9 +37,14 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "gvt.h" #include "i915_pvinfo.h" +#include "intel_mchbar_regs.h" #include "display/intel_display_types.h" +#include "display/intel_fbc.h" +#include "display/vlv_dsi_pll_regs.h" +#include "gt/intel_gt_regs.h" /* XXX FIXME i915 has changed PP_XXX definition */ #define PCH_PP_STATUS _MMIO(0xc7200) @@ -701,11 +706,11 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, data = vgpu_vreg(vgpu, offset); if (data & PIPECONF_ENABLE) { - vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; + vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE; vgpu_update_refresh_rate(vgpu); vgpu_update_vblank_emulation(vgpu, true); } else { - vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; + vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE; vgpu_update_vblank_emulation(vgpu, false); } return 0; @@ -2647,12 +2652,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL); MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL); - MMIO_D(ILK_DPFC_CB_BASE, D_ALL); - MMIO_D(ILK_DPFC_CONTROL, D_ALL); - MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL); - MMIO_D(ILK_DPFC_STATUS, D_ALL); - MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL); - MMIO_D(ILK_DPFC_CHICKEN, D_ALL); + MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL); MMIO_D(ILK_FBC_RT_BASE, D_ALL); MMIO_D(IPS_CTL, D_ALL); @@ -2876,9 +2881,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x3c), D_ALL); MMIO_D(_MMIO(0x860), D_ALL); - MMIO_D(ECOSKPD, D_ALL); + MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL); MMIO_D(_MMIO(0x121d0), D_ALL); - MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL); + MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL); MMIO_D(_MMIO(0x41d0), D_ALL); MMIO_D(GAC_ECO_BITS, D_ALL); MMIO_D(_MMIO(0x6200), D_ALL); @@ -3436,6 +3441,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); + MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } @@ -3627,11 +3633,11 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) return 0; } -static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, - unsigned int offset) +static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, + unsigned int offset) { unsigned long device = intel_gvt_get_device_type(gvt); - struct gvt_mmio_block *block = gvt->mmio.mmio_block; + const struct gvt_mmio_block *block = gvt->mmio.mmio_block; int num = gvt->mmio.num_mmio_block; int i; @@ -3670,7 +3676,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) * accessible (should have no F_CMD_ACCESS flag). * otherwise, need to update cmd_reg_handler in cmd_parser.c */ -static struct gvt_mmio_block mmio_blocks[] = { +static const struct gvt_mmio_block mmio_blocks[] = { {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL}, {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, @@ -3753,7 +3759,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data) { - struct gvt_mmio_block *block = gvt->mmio.mmio_block; + const struct gvt_mmio_block *block = gvt->mmio.mmio_block; struct intel_gvt_mmio_info *e; int i, j, ret; @@ -3871,7 +3877,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio_info; - struct gvt_mmio_block *mmio_block; + const struct gvt_mmio_block *mmio_block; gvt_mmio_func func; int ret; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 614b951d919f..228f623d466d 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -30,6 +30,7 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "gvt.h" #include "trace.h" @@ -176,7 +177,7 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; u32 imr = *(u32 *)p_data; trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg), @@ -206,7 +207,7 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; u32 ier = *(u32 *)p_data; u32 virtual_ier = vgpu_vreg(vgpu, reg); @@ -246,7 +247,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; struct intel_gvt_irq_info *info; u32 ier = *(u32 *)p_data; @@ -604,7 +605,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU); } -static struct intel_gvt_irq_ops gen8_irq_ops = { +static const struct intel_gvt_irq_ops gen8_irq_ops = { .init_irq = gen8_init_irq, .check_pending_irq = gen8_check_pending_irq, }; @@ -626,7 +627,7 @@ void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_irq *irq = &gvt->irq; gvt_event_virt_handler_t handler; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; handler = get_event_virt_handler(irq, event); drm_WARN_ON(&i915->drm, !handler); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 6c47d3e33161..b62f04ab47cb 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -35,7 +35,7 @@ #include <linux/hrtimer.h> #include <linux/kernel.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" enum intel_gvt_event_type { RCS_MI_USER_INTERRUPT = 0, @@ -203,7 +203,7 @@ struct intel_gvt_irq_map { /* structure containing device specific IRQ state */ struct intel_gvt_irq { - struct intel_gvt_irq_ops *ops; + const struct intel_gvt_irq_ops *ops; struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX]; DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX); struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX]; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 20b82fb036f8..e8d6c76e9234 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -46,6 +46,8 @@ #include <linux/nospec.h> +#include <drm/drm_edid.h> + #include "i915_drv.h" #include "gvt.h" diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 24210b1eaec5..5db0ef83d522 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -34,8 +34,11 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "gvt.h" +#include "gt/intel_gt_regs.h" + /** * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index f776c470914d..c85bafe7539e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -35,7 +35,9 @@ #include "i915_drv.h" #include "gt/intel_context.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_ring.h" #include "gvt.h" #include "trace.h" @@ -44,7 +46,7 @@ /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { - {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ @@ -76,7 +78,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { - {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index b6b69777af49..9540813b88e5 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -38,9 +38,9 @@ #include <linux/types.h> +#include "gt/intel_engine_regs.h" #include "gt/intel_engine_types.h" #include "gt/intel_lrc_reg.h" -#include "i915_reg.h" struct i915_request; struct intel_context; diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 244cc7320b54..7d666d34f9ff 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -62,7 +62,6 @@ #define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe)) -#define PLANE_CTL_ASYNC_FLIP (1 << 9) #define REG50080_FLIP_TYPE_MASK 0x3 #define REG50080_FLIP_TYPE_ASYNC 0x1 diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 036b74fe9298..c077fb4674f0 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -368,7 +368,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) vgpu_data->active = false; } -static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { +static const struct intel_gvt_sched_policy_ops tbs_schedule_ops = { .init = tbs_sched_init, .clean = tbs_sched_clean, .init_vgpu = tbs_sched_init_vgpu, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 42a0c9ae0a73..679476da0640 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -38,11 +38,13 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #include "gt/intel_execlists_submission.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_gem_gtt.h" +#include "i915_perf_oa_regs.h" #include "gvt.h" #define RING_CTX_OFF(x) \ diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 7c86984a842f..1f391b3da2cc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -56,7 +56,7 @@ struct intel_gvt_workload_scheduler { wait_queue_head_t waitq[I915_NUM_ENGINES]; void *sched_data; - struct intel_gvt_sched_policy_ops *sched_ops; + const struct intel_gvt_sched_policy_ops *sched_ops; }; #define INDIRECT_CTX_ADDR_MASK 0xffffffc0 diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index fa6b92615799..8dddd0a940a1 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -77,7 +77,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) #define VGPU_WEIGHT(vgpu_num) \ (VGPU_MAX_WEIGHT / (vgpu_num)) -static struct { +static const struct { unsigned int low_mm; unsigned int high_mm; unsigned int fence; @@ -88,7 +88,7 @@ static struct { */ unsigned int weight; enum intel_vgpu_edid edid; - char *name; + const char *name; } vgpu_types[] = { /* Fixed vGPU type table */ { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" }, diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index e0403ce9ce69..5f6e41636655 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -25,11 +25,17 @@ * */ +#include <drm/drm_cache.h> + #include "gt/intel_engine.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_gt_regs.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" #include "i915_memcpy.h" +#include "i915_reg.h" /** * DOC: batch buffer command parser @@ -591,6 +597,10 @@ struct drm_i915_reg_descriptor { { .addr = _reg(idx) }, \ { .addr = _reg ## _UDW(idx) } +#define REG64_BASE_IDX(_reg, base, idx) \ + { .addr = _reg(base, idx) }, \ + { .addr = _reg ## _UDW(base, idx) } + static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(GPGPU_THREADS_DISPATCHED), REG64(HS_INVOCATION_COUNT), @@ -605,8 +615,8 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(PS_INVOCATION_COUNT), REG64(PS_DEPTH_COUNT), REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), - REG64(MI_PREDICATE_SRC0), - REG64(MI_PREDICATE_SRC1), + REG64_IDX(MI_PREDICATE_SRC0, RENDER_RING_BASE), + REG64_IDX(MI_PREDICATE_SRC1, RENDER_RING_BASE), REG32(GEN7_3DPRIM_END_OFFSET), REG32(GEN7_3DPRIM_START_VERTEX), REG32(GEN7_3DPRIM_VERTEX_COUNT), @@ -636,22 +646,22 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { }; static const struct drm_i915_reg_descriptor hsw_render_regs[] = { - REG64_IDX(HSW_CS_GPR, 0), - REG64_IDX(HSW_CS_GPR, 1), - REG64_IDX(HSW_CS_GPR, 2), - REG64_IDX(HSW_CS_GPR, 3), - REG64_IDX(HSW_CS_GPR, 4), - REG64_IDX(HSW_CS_GPR, 5), - REG64_IDX(HSW_CS_GPR, 6), - REG64_IDX(HSW_CS_GPR, 7), - REG64_IDX(HSW_CS_GPR, 8), - REG64_IDX(HSW_CS_GPR, 9), - REG64_IDX(HSW_CS_GPR, 10), - REG64_IDX(HSW_CS_GPR, 11), - REG64_IDX(HSW_CS_GPR, 12), - REG64_IDX(HSW_CS_GPR, 13), - REG64_IDX(HSW_CS_GPR, 14), - REG64_IDX(HSW_CS_GPR, 15), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 0), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 1), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 2), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 3), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 4), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 5), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 6), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 7), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 8), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 9), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 10), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 11), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 12), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 13), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 14), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 15), REG32(HSW_SCRATCH1, .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE, .value = 0), @@ -674,22 +684,22 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { REG32(BCS_SWCTRL), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), - REG64_IDX(BCS_GPR, 0), - REG64_IDX(BCS_GPR, 1), - REG64_IDX(BCS_GPR, 2), - REG64_IDX(BCS_GPR, 3), - REG64_IDX(BCS_GPR, 4), - REG64_IDX(BCS_GPR, 5), - REG64_IDX(BCS_GPR, 6), - REG64_IDX(BCS_GPR, 7), - REG64_IDX(BCS_GPR, 8), - REG64_IDX(BCS_GPR, 9), - REG64_IDX(BCS_GPR, 10), - REG64_IDX(BCS_GPR, 11), - REG64_IDX(BCS_GPR, 12), - REG64_IDX(BCS_GPR, 13), - REG64_IDX(BCS_GPR, 14), - REG64_IDX(BCS_GPR, 15), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 0), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 1), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 2), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 3), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 4), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 5), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 6), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 7), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 8), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 9), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 10), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 11), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 12), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 13), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 14), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 15), }; #undef REG64 diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.h b/drivers/gpu/drm/i915/i915_cmd_parser.h new file mode 100644 index 000000000000..ba70ac6c97cd --- /dev/null +++ b/drivers/gpu/drm/i915/i915_cmd_parser.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_CMD_PARSER_H__ +#define __I915_CMD_PARSER_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_engine_cs; +struct i915_vma; + +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); +void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); +int intel_engine_cmd_parser(struct intel_engine_cs *engine, + struct i915_vma *batch, + unsigned long batch_offset, + unsigned long batch_length, + struct i915_vma *shadow, + bool trampoline); +#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 + +#endif /* __I915_CMD_PARSER_H__ */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f3141b58d912..946bbe57bfe5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -38,6 +38,7 @@ #include "gt/intel_gt_debugfs.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm_debugfs.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_gt_requests.h" #include "gt/intel_rc6.h" #include "gt/intel_reset.h" @@ -48,6 +49,7 @@ #include "i915_debugfs_params.h" #include "i915_irq.h" #include "i915_scheduler.h" +#include "intel_mchbar_regs.h" #include "intel_pm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) @@ -136,6 +138,17 @@ static const char *stringify_vma_type(const struct i915_vma *vma) return "ppgtt"; } +static const char *i915_cache_level_str(struct drm_i915_private *i915, int type) +{ + switch (type) { + case I915_CACHE_NONE: return " uncached"; + case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; + case I915_CACHE_L3_LLC: return " L3+LLC"; + case I915_CACHE_WT: return " WT"; + default: return ""; + } +} + void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 5f2343389b5e..1c67ff735f18 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -62,6 +62,8 @@ #include "display/intel_vga.h" #include "gem/i915_gem_context.h" +#include "gem/i915_gem_create.h" +#include "gem/i915_gem_dmabuf.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_mman.h" #include "gem/i915_gem_pm.h" @@ -71,10 +73,13 @@ #include "pxp/intel_pxp_pm.h" +#include "i915_file_private.h" #include "i915_debugfs.h" #include "i915_driver.h" #include "i915_drv.h" +#include "i915_getparam.h" #include "i915_ioc32.h" +#include "i915_ioctl.h" #include "i915_irq.h" #include "i915_memcpy.h" #include "i915_perf.h" @@ -86,6 +91,7 @@ #include "intel_dram.h" #include "intel_gvt.h" #include "intel_memory_region.h" +#include "intel_pci_config.h" #include "intel_pcode.h" #include "intel_pm.h" #include "intel_region_ttm.h" @@ -1820,6 +1826,21 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), }; +/* + * Interface history: + * + * 1.1: Original. + * 1.2: Add Power Management + * 1.3: Add vblank support + * 1.4: Fix cmdbuffer path, add heap destroy + * 1.5: Add vblank pipe configuration + * 1.6: - New ioctl for scheduling buffer swaps on vertical blank + * - Support vertical blank on secondary display pipe + */ +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 6 +#define DRIVER_PATCHLEVEL 0 + static const struct drm_driver i915_drm_driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h index 9ef8db4aa0a6..9d11de65daaf 100644 --- a/drivers/gpu/drm/i915/i915_driver.h +++ b/drivers/gpu/drm/i915/i915_driver.h @@ -12,6 +12,11 @@ struct pci_dev; struct pci_device_id; struct drm_i915_private; +#define DRIVER_NAME "i915" +#define DRIVER_DESC "Intel Graphics" +#define DRIVER_DATE "20201103" +#define DRIVER_TIMESTAMP 1604406085 + extern const struct dev_pm_ops i915_pm_ops; int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9b48dd9ccacd..f600d1cb01b3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -31,54 +31,34 @@ #define _I915_DRV_H_ #include <uapi/drm/i915_drm.h> -#include <uapi/drm/drm_fourcc.h> #include <asm/hypervisor.h> -#include <linux/io-mapping.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> -#include <linux/backlight.h> -#include <linux/hash.h> #include <linux/intel-iommu.h> -#include <linux/kref.h> -#include <linux/mm_types.h> -#include <linux/perf_event.h> #include <linux/pm_qos.h> -#include <linux/dma-resv.h> -#include <linux/shmem_fs.h> -#include <linux/stackdepot.h> -#include <linux/xarray.h> - -#include <drm/drm_gem.h> -#include <drm/drm_auth.h> -#include <drm/drm_cache.h> -#include <drm/drm_util.h> -#include <drm/drm_dsc.h> -#include <drm/drm_atomic.h> + #include <drm/drm_connector.h> -#include <drm/i915_mei_hdcp_interface.h> #include <drm/ttm/ttm_device.h> -#include "i915_params.h" -#include "i915_reg.h" -#include "i915_utils.h" - #include "display/intel_bios.h" +#include "display/intel_cdclk.h" #include "display/intel_display.h" #include "display/intel_display_power.h" #include "display/intel_dmc.h" #include "display/intel_dpll_mgr.h" #include "display/intel_dsb.h" +#include "display/intel_fbc.h" #include "display/intel_frontbuffer.h" #include "display/intel_global_state.h" #include "display/intel_gmbus.h" #include "display/intel_opregion.h" #include "gem/i915_gem_context_types.h" +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_shrinker.h" #include "gem/i915_gem_stolen.h" -#include "gem/i915_gem_lmem.h" #include "gt/intel_engine.h" #include "gt/intel_gt_types.h" @@ -86,6 +66,12 @@ #include "gt/intel_workarounds.h" #include "gt/uc/intel_uc.h" +#include "i915_gem.h" +#include "i915_gpu_error.h" +#include "i915_params.h" +#include "i915_perf_types.h" +#include "i915_scheduler.h" +#include "i915_utils.h" #include "intel_device_info.h" #include "intel_memory_region.h" #include "intel_pch.h" @@ -93,29 +79,32 @@ #include "intel_runtime_pm.h" #include "intel_step.h" #include "intel_uncore.h" -#include "intel_wakeref.h" #include "intel_wopcm.h" -#include "i915_gem.h" -#include "i915_gem_gtt.h" -#include "i915_gpu_error.h" -#include "i915_perf_types.h" -#include "i915_request.h" -#include "i915_scheduler.h" -#include "gt/intel_timeline.h" -#include "i915_vma.h" -#include "i915_irq.h" - - -/* General customization: - */ - -#define DRIVER_NAME "i915" -#define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20201103" -#define DRIVER_TIMESTAMP 1604406085 - +struct dpll; +struct drm_i915_clock_gating_funcs; struct drm_i915_gem_object; +struct drm_i915_private; +struct intel_atomic_state; +struct intel_audio_funcs; +struct intel_cdclk_config; +struct intel_cdclk_funcs; +struct intel_cdclk_state; +struct intel_cdclk_vals; +struct intel_color_funcs; +struct intel_connector; +struct intel_crtc; +struct intel_dp; +struct intel_dpll_funcs; +struct intel_encoder; +struct intel_fbdev; +struct intel_fdi_funcs; +struct intel_hotplug_funcs; +struct intel_initial_plane_config; +struct intel_limit; +struct intel_overlay; +struct intel_overlay_error_state; +struct vlv_s0ix_state; /* Threshold == 5 for long IRQs, 50 for short */ #define HPD_STORM_DEFAULT_THRESHOLD 50 @@ -166,117 +155,6 @@ struct i915_hotplug { I915_GEM_DOMAIN_INSTRUCTION | \ I915_GEM_DOMAIN_VERTEX) -struct drm_i915_private; - -struct drm_i915_file_private { - struct drm_i915_private *dev_priv; - - union { - struct drm_file *file; - struct rcu_head rcu; - }; - - /** @proto_context_lock: Guards all struct i915_gem_proto_context - * operations - * - * This not only guards @proto_context_xa, but is always held - * whenever we manipulate any struct i915_gem_proto_context, - * including finalizing it on first actual use of the GEM context. - * - * See i915_gem_proto_context. - */ - struct mutex proto_context_lock; - - /** @proto_context_xa: xarray of struct i915_gem_proto_context - * - * Historically, the context uAPI allowed for two methods of - * setting context parameters: SET_CONTEXT_PARAM and - * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called - * at any time while the later happens as part of - * GEM_CONTEXT_CREATE. Everything settable via one was settable - * via the other. While some params are fairly simple and setting - * them on a live context is harmless such as the context priority, - * others are far trickier such as the VM or the set of engines. - * In order to swap out the VM, for instance, we have to delay - * until all current in-flight work is complete, swap in the new - * VM, and then continue. This leads to a plethora of potential - * race conditions we'd really rather avoid. - * - * We have since disallowed setting these more complex parameters - * on active contexts. This works by delaying the creation of the - * actual context until after the client is done configuring it - * with SET_CONTEXT_PARAM. From the perspective of the client, it - * has the same u32 context ID the whole time. From the - * perspective of i915, however, it's a struct i915_gem_proto_context - * right up until the point where we attempt to do something which - * the proto-context can't handle. Then the struct i915_gem_context - * gets created. - * - * This is accomplished via a little xarray dance. When - * GEM_CONTEXT_CREATE is called, we create a struct - * i915_gem_proto_context, reserve a slot in @context_xa but leave - * it NULL, and place the proto-context in the corresponding slot - * in @proto_context_xa. Then, in i915_gem_context_lookup(), we - * first check @context_xa. If it's there, we return the struct - * i915_gem_context and we're done. If it's not, we look in - * @proto_context_xa and, if we find it there, we create the actual - * context and kill the proto-context. - * - * In order for this dance to work properly, everything which ever - * touches a struct i915_gem_proto_context is guarded by - * @proto_context_lock, including context creation. Yes, this - * means context creation now takes a giant global lock but it - * can't really be helped and that should never be on any driver's - * fast-path anyway. - */ - struct xarray proto_context_xa; - - /** @context_xa: xarray of fully created i915_gem_context - * - * Write access to this xarray is guarded by @proto_context_lock. - * Otherwise, writers may race with finalize_create_context_locked(). - * - * See @proto_context_xa. - */ - struct xarray context_xa; - struct xarray vm_xa; - - unsigned int bsd_engine; - -/* - * Every context ban increments per client ban score. Also - * hangs in short succession increments ban score. If ban threshold - * is reached, client is considered banned and submitting more work - * will fail. This is a stop gap measure to limit the badly behaving - * clients access to gpu. Note that unbannable contexts never increment - * the client ban score. - */ -#define I915_CLIENT_SCORE_HANG_FAST 1 -#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) -#define I915_CLIENT_SCORE_CONTEXT_BAN 3 -#define I915_CLIENT_SCORE_BANNED 9 - /** ban_score: Accumulated score of all ctx bans and fast hangs. */ - atomic_t ban_score; - unsigned long hang_timestamp; -}; - -/* Interface history: - * - * 1.1: Original. - * 1.2: Add Power Management - * 1.3: Add vblank support - * 1.4: Fix cmdbuffer path, add heap destroy - * 1.5: Add vblank pipe configuration - * 1.6: - New ioctl for scheduling buffer swaps on vertical blank - * - Support vertical blank on secondary display pipe - */ -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 6 -#define DRIVER_PATCHLEVEL 0 - -struct intel_overlay; -struct intel_overlay_error_state; - struct sdvo_device_mapping { u8 initialized; u8 dvo_port; @@ -286,22 +164,6 @@ struct sdvo_device_mapping { u8 ddc_pin; }; -struct intel_connector; -struct intel_encoder; -struct intel_atomic_state; -struct intel_cdclk_config; -struct intel_cdclk_state; -struct intel_cdclk_vals; -struct intel_initial_plane_config; -struct intel_crtc; -struct intel_limit; -struct dpll; - -/* functions used internal in intel_pm.c */ -struct drm_i915_clock_gating_funcs { - void (*init_clock_gating)(struct drm_i915_private *dev_priv); -}; - /* functions used for watermark calcs for display. */ struct drm_i915_wm_disp_funcs { /* update_wm is for legacy wm management */ @@ -319,49 +181,6 @@ struct drm_i915_wm_disp_funcs { int (*compute_global_watermarks)(struct intel_atomic_state *state); }; -struct intel_color_funcs { - int (*color_check)(struct intel_crtc_state *crtc_state); - /* - * Program double buffered color management registers during - * vblank evasion. The registers should then latch during the - * next vblank start, alongside any other double buffered registers - * involved with the same commit. - */ - void (*color_commit)(const struct intel_crtc_state *crtc_state); - /* - * Load LUTs (and other single buffered color management - * registers). Will (hopefully) be called during the vblank - * following the latching of any double buffered registers - * involved with the same commit. - */ - void (*load_luts)(const struct intel_crtc_state *crtc_state); - void (*read_luts)(struct intel_crtc_state *crtc_state); -}; - -struct intel_cdclk_funcs { - void (*get_cdclk)(struct drm_i915_private *dev_priv, - struct intel_cdclk_config *cdclk_config); - void (*set_cdclk)(struct drm_i915_private *dev_priv, - const struct intel_cdclk_config *cdclk_config, - enum pipe pipe); - int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); - int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); - u8 (*calc_voltage_level)(int cdclk); -}; - -struct intel_hotplug_funcs { - void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); -}; - -struct intel_fdi_funcs { - void (*fdi_link_train)(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state); -}; - -struct intel_dpll_funcs { - int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state); -}; - struct drm_i915_display_funcs { /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ @@ -395,7 +214,6 @@ enum drrs_support_type { SEAMLESS_DRRS_SUPPORT = 2 }; -struct intel_dp; struct i915_drrs { struct mutex mutex; struct delayed_work work; @@ -413,8 +231,6 @@ struct i915_drrs { #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) -struct intel_fbdev; - struct intel_gmbus { struct i2c_adapter adapter; #define GMBUS_FORCE_BIT_RETRY (1U << 31) @@ -433,8 +249,6 @@ struct i915_suspend_saved_registers { u16 saveGCDGMBUS; }; -struct vlv_s0ix_state; - #define MAX_L3_SLICES 2 struct intel_l3_parity { u32 *remap_info[MAX_L3_SLICES]; @@ -523,13 +337,6 @@ i915_fence_timeout(const struct drm_i915_private *i915) /* Amount of PSF GV points, BSpec precisely defines this */ #define I915_NUM_PSF_GV_POINTS 3 -enum psr_lines_to_wait { - PSR_0_LINES_TO_WAIT = 0, - PSR_1_LINE_TO_WAIT, - PSR_4_LINES_TO_WAIT, - PSR_8_LINES_TO_WAIT -}; - struct intel_vbt_data { /* bdb version */ u16 version; @@ -550,6 +357,9 @@ struct intel_vbt_data { unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ enum drm_panel_orientation orientation; + bool override_afc_startup; + u8 override_afc_startup_val; + enum drrs_support_type drrs_type; struct { @@ -569,7 +379,6 @@ struct intel_vbt_data { bool full_link; bool require_aux_wakeup; int idle_frames; - enum psr_lines_to_wait lines_to_wait; int tp1_wakeup_time_us; int tp2_tp3_wakeup_time_us; int psr2_tp2_tp3_wakeup_time_us; @@ -625,18 +434,12 @@ struct i915_virtual_gpu { u32 caps; }; -struct intel_cdclk_config { - unsigned int cdclk, vco, ref, bypass; - u8 voltage_level; -}; - struct i915_selftest_stash { atomic_t counter; struct ida mock_region_instances; }; /* intel_audio.c private */ -struct intel_audio_funcs; struct intel_audio_private { /* Display internal audio functions */ const struct intel_audio_funcs *funcs; @@ -749,7 +552,7 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; - struct intel_fbc *fbc; + struct intel_fbc *fbc[I915_MAX_FBCS]; struct i915_drrs drrs; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -1288,6 +1091,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12) #define IS_ADLS_RPLS(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) +#define IS_ADLP_N(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -1486,8 +1291,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) -#define HAS_GMBUS_IRQ(dev_priv) (GRAPHICS_VER(dev_priv) >= 4) -#define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 11 || \ +#define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4) +#define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \ IS_GEMINILAKE(dev_priv) || \ IS_KABYLAKE(dev_priv)) @@ -1499,9 +1304,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) -#define HAS_FW_BLC(dev_priv) (GRAPHICS_VER(dev_priv) > 2) -#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc) -#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && GRAPHICS_VER(dev_priv) >= 7) +#define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2) +#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) +#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) @@ -1514,7 +1319,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) -#define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12) +#define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12) #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) @@ -1525,7 +1330,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) -#define HAS_MSO(i915) (GRAPHICS_VER(i915) >= 12) +#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) @@ -1558,7 +1363,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) -#define HAS_LSPCON(dev_priv) (IS_GRAPHICS_VER(dev_priv, 9, 10)) +#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10)) /* DPF == dynamic parity feature */ #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) @@ -1572,7 +1377,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) -#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) +#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) @@ -1605,7 +1410,7 @@ i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) { - return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); + return DISPLAY_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); } static inline bool @@ -1620,13 +1425,7 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); } -/* i915_getparam.c */ -int i915_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - /* i915_gem.c */ -int i915_gem_init_userptr(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); @@ -1689,79 +1488,16 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); -int i915_gem_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); - int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); -static inline u32 i915_reset_count(struct i915_gpu_error *error) -{ - return atomic_read(&error->reset_count); -} - -static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, - const struct intel_engine_cs *engine) -{ - return atomic_read(&error->reset_engine_count[engine->uabi_class]); -} - int __must_check i915_gem_init(struct drm_i915_private *dev_priv); void i915_gem_driver_register(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_remove(struct drm_i915_private *dev_priv); void i915_gem_driver_release(struct drm_i915_private *dev_priv); -void i915_gem_suspend(struct drm_i915_private *dev_priv); -void i915_gem_suspend_late(struct drm_i915_private *dev_priv); -void i915_gem_resume(struct drm_i915_private *dev_priv); int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); -int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level); - -struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf); - -struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags); - -static inline struct i915_address_space * -i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) -{ - struct i915_address_space *vm; - - xa_lock(&file_priv->vm_xa); - vm = xa_load(&file_priv->vm_xa, id); - if (vm) - kref_get(&vm->ref); - xa_unlock(&file_priv->vm_xa); - - return vm; -} - -/* i915_gem_evict.c */ -int __must_check i915_gem_evict_something(struct i915_address_space *vm, - struct i915_gem_ww_ctx *ww, - u64 min_size, u64 alignment, - unsigned long color, - u64 start, u64 end, - unsigned flags); -int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, - struct i915_gem_ww_ctx *ww, - struct drm_mm_node *node, - unsigned int flags); -int i915_gem_evict_vm(struct i915_address_space *vm, - struct i915_gem_ww_ctx *ww); - -/* i915_gem_internal.c */ -struct drm_i915_gem_object * -i915_gem_object_create_internal(struct drm_i915_private *dev_priv, - phys_addr_t size); -struct drm_i915_gem_object * -__i915_gem_object_create_internal(struct drm_i915_private *dev_priv, - const struct drm_i915_gem_object_ops *ops, - phys_addr_t size); - /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { @@ -1771,25 +1507,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec i915_gem_object_is_tiled(obj); } -u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, - unsigned int tiling, unsigned int stride); -u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, - unsigned int tiling, unsigned int stride); - -const char *i915_cache_level_str(struct drm_i915_private *i915, int type); - -/* i915_cmd_parser.c */ -int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); -int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); -void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -int intel_engine_cmd_parser(struct intel_engine_cs *engine, - struct i915_vma *batch, - unsigned long batch_offset, - unsigned long batch_length, - struct i915_vma *shadow, - bool trampoline); -#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 - /* intel_device_info.c */ static inline struct intel_device_info * mkwrite_device_info(struct drm_i915_private *dev_priv) @@ -1797,17 +1514,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) return (struct intel_device_info *)INTEL_INFO(dev_priv); } -int i915_reg_read_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); - -static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) -{ - if (GRAPHICS_VER(i915) >= 11) - return ICL_HWS_CSB_WRITE_INDEX; - else - return I915_HWS_CSB_WRITE_INDEX; -} - static inline enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, bool always_coherent) diff --git a/drivers/gpu/drm/i915/i915_file_private.h b/drivers/gpu/drm/i915/i915_file_private.h new file mode 100644 index 000000000000..fb16cc431b2a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_file_private.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_FILE_PRIVATE_H__ +#define __I915_FILE_PRIVATE_H__ + +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/xarray.h> + +struct drm_i915_private; +struct drm_file; + +struct drm_i915_file_private { + struct drm_i915_private *dev_priv; + + union { + struct drm_file *file; + struct rcu_head rcu; + }; + + /** @proto_context_lock: Guards all struct i915_gem_proto_context + * operations + * + * This not only guards @proto_context_xa, but is always held + * whenever we manipulate any struct i915_gem_proto_context, + * including finalizing it on first actual use of the GEM context. + * + * See i915_gem_proto_context. + */ + struct mutex proto_context_lock; + + /** @proto_context_xa: xarray of struct i915_gem_proto_context + * + * Historically, the context uAPI allowed for two methods of + * setting context parameters: SET_CONTEXT_PARAM and + * CONTEXT_CREATE_EXT_SETPARAM. The former is allowed to be called + * at any time while the later happens as part of + * GEM_CONTEXT_CREATE. Everything settable via one was settable + * via the other. While some params are fairly simple and setting + * them on a live context is harmless such as the context priority, + * others are far trickier such as the VM or the set of engines. + * In order to swap out the VM, for instance, we have to delay + * until all current in-flight work is complete, swap in the new + * VM, and then continue. This leads to a plethora of potential + * race conditions we'd really rather avoid. + * + * We have since disallowed setting these more complex parameters + * on active contexts. This works by delaying the creation of the + * actual context until after the client is done configuring it + * with SET_CONTEXT_PARAM. From the perspective of the client, it + * has the same u32 context ID the whole time. From the + * perspective of i915, however, it's a struct i915_gem_proto_context + * right up until the point where we attempt to do something which + * the proto-context can't handle. Then the struct i915_gem_context + * gets created. + * + * This is accomplished via a little xarray dance. When + * GEM_CONTEXT_CREATE is called, we create a struct + * i915_gem_proto_context, reserve a slot in @context_xa but leave + * it NULL, and place the proto-context in the corresponding slot + * in @proto_context_xa. Then, in i915_gem_context_lookup(), we + * first check @context_xa. If it's there, we return the struct + * i915_gem_context and we're done. If it's not, we look in + * @proto_context_xa and, if we find it there, we create the actual + * context and kill the proto-context. + * + * In order for this dance to work properly, everything which ever + * touches a struct i915_gem_proto_context is guarded by + * @proto_context_lock, including context creation. Yes, this + * means context creation now takes a giant global lock but it + * can't really be helped and that should never be on any driver's + * fast-path anyway. + */ + struct xarray proto_context_xa; + + /** @context_xa: xarray of fully created i915_gem_context + * + * Write access to this xarray is guarded by @proto_context_lock. + * Otherwise, writers may race with finalize_create_context_locked(). + * + * See @proto_context_xa. + */ + struct xarray context_xa; + struct xarray vm_xa; + + unsigned int bsd_engine; + +/* + * Every context ban increments per client ban score. Also + * hangs in short succession increments ban score. If ban threshold + * is reached, client is considered banned and submitting more work + * will fail. This is a stop gap measure to limit the badly behaving + * clients access to gpu. Note that unbannable contexts never increment + * the client ban score. + */ +#define I915_CLIENT_SCORE_HANG_FAST 1 +#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) +#define I915_CLIENT_SCORE_CONTEXT_BAN 3 +#define I915_CLIENT_SCORE_BANNED 9 + /** ban_score: Accumulated score of all ctx bans and fast hangs. */ + atomic_t ban_score; + unsigned long hang_timestamp; +}; + +#endif /* __I915_FILE_PRIVATE_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bb65563296b5..2e10187cd0a0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -25,7 +25,6 @@ * */ -#include <drm/drm_vma_manager.h> #include <linux/dma-fence-array.h> #include <linux/kthread.h> #include <linux/dma-resv.h> @@ -37,6 +36,9 @@ #include <linux/dma-buf.h> #include <linux/mman.h> +#include <drm/drm_cache.h> +#include <drm/drm_vma_manager.h> + #include "display/intel_display.h" #include "display/intel_frontbuffer.h" @@ -44,16 +46,18 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_mman.h" +#include "gem/i915_gem_pm.h" #include "gem/i915_gem_region.h" +#include "gem/i915_gem_userptr.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_workarounds.h" #include "i915_drv.h" +#include "i915_file_private.h" #include "i915_trace.h" #include "i915_vgpu.h" - #include "intel_pm.h" static int diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3483d4ee235b..f025ee4fa526 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,6 +31,7 @@ #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_trace.h" I915_SELFTEST_DECLARE(static struct igt_evict_ctl { diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h new file mode 100644 index 000000000000..e593c530f9bd --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_evict.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_GEM_EVICT_H__ +#define __I915_GEM_EVICT_H__ + +#include <linux/types.h> + +struct drm_mm_node; +struct i915_address_space; +struct i915_gem_ww_ctx; + +int __must_check i915_gem_evict_something(struct i915_address_space *vm, + struct i915_gem_ww_ctx *ww, + u64 min_size, u64 alignment, + unsigned long color, + u64 start, u64 end, + unsigned flags); +int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, + struct i915_gem_ww_ctx *ww, + struct drm_mm_node *node, + unsigned int flags); +int i915_gem_evict_vm(struct i915_address_space *vm, + struct i915_gem_ww_ctx *ww); + +#endif /* __I915_GEM_EVICT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 048ad921f952..329ff75b80b9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -20,6 +20,7 @@ #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_scatterlist.h" #include "i915_trace.h" #include "i915_vgpu.h" diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 5b8a2157d797..c12a0adefda5 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -5,7 +5,9 @@ #include "gem/i915_gem_mman.h" #include "gt/intel_engine_user.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" +#include "i915_getparam.h" #include "i915_perf.h" int i915_getparam_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_getparam.h b/drivers/gpu/drm/i915/i915_getparam.h new file mode 100644 index 000000000000..18e4752e8f70 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_getparam.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_GETPARAM_H__ +#define __I915_GETPARAM_H__ + +struct drm_device; +struct drm_file; + +int i915_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#endif /* __I915_GETPARAM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9c3f53579fe9..1d042551619e 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -34,6 +34,7 @@ #include <linux/utsname.h> #include <linux/zlib.h> +#include <drm/drm_cache.h> #include <drm/drm_print.h> #include "display/intel_dmc.h" @@ -41,9 +42,12 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_regs.h" +#include "i915_driver.h" #include "i915_drv.h" #include "i915_gpu_error.h" #include "i915_memcpy.h" diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 5aedf5129814..903d838e2e63 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -210,6 +210,17 @@ struct drm_i915_error_state_buf { int err; }; +static inline u32 i915_reset_count(struct i915_gpu_error *error) +{ + return atomic_read(&error->reset_count); +} + +static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, + const struct intel_engine_cs *engine) +{ + return atomic_read(&error->reset_engine_count[engine->uabi_class]); +} + #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) __printf(2, 3) diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 55b97c3a3dde..33348960f623 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -31,6 +31,7 @@ #include <drm/drm_ioctl.h> #include "i915_drv.h" +#include "i915_getparam.h" #include "i915_ioc32.h" struct drm_i915_getparam32 { diff --git a/drivers/gpu/drm/i915/i915_ioctl.c b/drivers/gpu/drm/i915/i915_ioctl.c new file mode 100644 index 000000000000..06a10ccea80b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ioctl.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "gt/intel_engine_regs.h" + +#include "i915_drv.h" +#include "i915_gem.h" +#include "i915_ioctl.h" +#include "i915_reg.h" +#include "intel_runtime_pm.h" +#include "intel_uncore.h" + +/* + * This file is for small ioctl functions that are out of place everywhere else, + * and not big enough to warrant a file of their own. + * + * This is not the dumping ground for random ioctls. + */ + +struct reg_whitelist { + i915_reg_t offset_ldw; + i915_reg_t offset_udw; + u8 min_graphics_ver; + u8 max_graphics_ver; + u8 size; +}; + +static const struct reg_whitelist reg_read_whitelist[] = { + { + .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), + .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), + .min_graphics_ver = 4, + .max_graphics_ver = 12, + .size = 8 + } +}; + +int i915_reg_read_ioctl(struct drm_device *dev, + void *data, struct drm_file *unused) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct intel_uncore *uncore = &i915->uncore; + struct drm_i915_reg_read *reg = data; + struct reg_whitelist const *entry; + intel_wakeref_t wakeref; + unsigned int flags; + int remain; + int ret = 0; + + entry = reg_read_whitelist; + remain = ARRAY_SIZE(reg_read_whitelist); + while (remain) { + u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); + + GEM_BUG_ON(!is_power_of_2(entry->size)); + GEM_BUG_ON(entry->size > 8); + GEM_BUG_ON(entry_offset & (entry->size - 1)); + + if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) && + entry_offset == (reg->offset & -entry->size)) + break; + entry++; + remain--; + } + + if (!remain) + return -EINVAL; + + flags = reg->offset & (entry->size - 1); + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = intel_uncore_read64_2x32(uncore, + entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = intel_uncore_read64(uncore, + entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = intel_uncore_read(uncore, entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = intel_uncore_read16(uncore, + entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = intel_uncore_read8(uncore, + entry->offset_ldw); + else + ret = -EINVAL; + } + + return ret; +} diff --git a/drivers/gpu/drm/i915/i915_ioctl.h b/drivers/gpu/drm/i915/i915_ioctl.h new file mode 100644 index 000000000000..f16ae87b8b8a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ioctl.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_IOCTL_H__ +#define __I915_IOCTL_H__ + +struct drm_device; +struct drm_file; + +int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + +#endif /* __I915_IOCTL_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 21f75b069fa8..73cebc6aa650 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -34,6 +34,7 @@ #include <drm/drm_drv.h> +#include "display/icl_dsi_regs.h" #include "display/intel_de.h" #include "display/intel_display_trace.h" #include "display/intel_display_types.h" @@ -46,8 +47,10 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_rps.h" +#include "i915_driver.h" #include "i915_drv.h" #include "i915_irq.h" #include "intel_pm.h" @@ -177,6 +180,7 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), + [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1), }; static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) @@ -836,10 +840,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; - if (DISPLAY_VER(dev_priv) == 2) - position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; - else - position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; /* * On HSW, the DSL reg (0x70000) appears to return 0 if we @@ -858,7 +859,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) for (i = 0; i < 100; i++) { udelay(1); - temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; if (temp != position) { position = temp; break; @@ -4349,6 +4350,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) return ret; } +struct intel_hotplug_funcs { + void (*hpd_irq_setup)(struct drm_i915_private *i915); +}; + #define HPD_FUNCS(platform) \ static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ .hpd_irq_setup = platform##_hpd_irq_setup, \ @@ -4363,6 +4368,12 @@ HPD_FUNCS(spt); HPD_FUNCS(ilk); #undef HPD_FUNCS +void intel_hpd_irq_setup(struct drm_i915_private *i915) +{ + if (i915->display_irqs_enabled && i915->hotplug_funcs) + i915->hotplug_funcs->hpd_irq_setup(i915); +} + /** * intel_irq_init - initializes irq support * @dev_priv: i915 device instance @@ -4415,7 +4426,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (I915_HAS_HOTPLUG(dev_priv)) dev_priv->hotplug_funcs = &i915_hpd_funcs; } else { - if (HAS_PCH_DG1(dev_priv)) + if (HAS_PCH_DG2(dev_priv)) + dev_priv->hotplug_funcs = &icp_hpd_funcs; + else if (HAS_PCH_DG1(dev_priv)) dev_priv->hotplug_funcs = &dg1_hpd_funcs; else if (DISPLAY_VER(dev_priv) >= 11) dev_priv->hotplug_funcs = &gen11_hpd_funcs; diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 0eb90d271fa7..82639d9d7e82 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -37,6 +37,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); +void intel_hpd_irq_setup(struct drm_i915_private *i915); void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, u32 mask, u32 bits); diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c index 84f12598d145..def7302ef7fe 100644 --- a/drivers/gpu/drm/i915/i915_mitigations.c +++ b/drivers/gpu/drm/i915/i915_mitigations.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/string.h> +#include "i915_driver.h" #include "i915_drv.h" #include "i915_mitigations.h" diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h index 76f1d53bdf34..04c8974d822b 100644 --- a/drivers/gpu/drm/i915/i915_mm.h +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -6,6 +6,7 @@ #ifndef __I915_MM_H__ #define __I915_MM_H__ +#include <linux/bug.h> #include <linux/types.h> struct vm_area_struct; @@ -22,8 +23,7 @@ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap) { - pr_err("Architecture has no %s() and shouldn't be calling this function\n", __func__); - WARN_ON_ONCE(1); + WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); return 0; } #endif diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c index 4d324638aba5..65acd7bf75d0 100644 --- a/drivers/gpu/drm/i915/i915_module.c +++ b/drivers/gpu/drm/i915/i915_module.c @@ -9,6 +9,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_object.h" #include "i915_active.h" +#include "i915_driver.h" #include "i915_params.h" #include "i915_pci.h" #include "i915_perf.h" diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 55333ccd1e6d..8246cbe9b01d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -22,12 +22,14 @@ * */ +#include <drm/drm_color_mgmt.h> #include <drm/drm_drv.h> #include <drm/i915_pciids.h> #include "i915_driver.h" #include "i915_drv.h" #include "i915_pci.h" +#include "i915_reg.h" #define PLATFORM(x) .platform = (x) #define GEN(x) \ @@ -214,13 +216,13 @@ static const struct intel_device_info i845g_info = { static const struct intel_device_info i85x_info = { I830_FEATURES, PLATFORM(INTEL_I85X), - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN3_FEATURES \ @@ -258,7 +260,7 @@ static const struct intel_device_info i915gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -283,7 +285,7 @@ static const struct intel_device_info i945gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -342,7 +344,7 @@ static const struct intel_device_info i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), .is_mobile = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .display.has_overlay = 1, .display.supports_tv = 1, .hws_needs_physical = 1, @@ -360,7 +362,7 @@ static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .display.supports_tv = 1, .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, @@ -393,7 +395,7 @@ static const struct intel_device_info ilk_m_info = { PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, .has_rps = true, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN6_FEATURES \ @@ -401,7 +403,7 @@ static const struct intel_device_info ilk_m_info = { .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -452,7 +454,7 @@ static const struct intel_device_info snb_m_gt2_info = { .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -693,7 +695,7 @@ static const struct intel_device_info skl_gt4_info = { .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .display.has_hdcp = 1, \ .display.has_psr = 1, \ .display.has_psr_hw_tracking = 1, \ @@ -948,7 +950,7 @@ static const struct intel_device_info adl_s_info = { .display.has_dp_mst = 1, \ .display.has_dsb = 1, \ .display.has_dsc = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .display.has_fpga_dbg = 1, \ .display.has_hdcp = 1, \ .display.has_hotplug = 1, \ @@ -1132,6 +1134,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_RKL_IDS(&rkl_info), INTEL_ADLS_IDS(&adl_s_info), INTEL_ADLP_IDS(&adl_p_info), + INTEL_ADLN_IDS(&adl_p_info), INTEL_DG1_IDS(&dg1_info), INTEL_RPLS_IDS(&adl_s_info), {0, 0, 0} diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2e8028e826b5..00fb40029f43 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -196,17 +196,23 @@ #include <linux/uuid.h> #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" #include "gt/intel_execlists_submission.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_clock_utils.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_lrc.h" +#include "gt/intel_lrc_reg.h" #include "gt/intel_ring.h" #include "i915_drv.h" +#include "i915_file_private.h" #include "i915_perf.h" +#include "i915_perf_oa_regs.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -1682,7 +1688,7 @@ retry: stream, cs, true /* save */, CS_GPR(i), INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); cs = save_restore_register( - stream, cs, true /* save */, MI_PREDICATE_RESULT_1, + stream, cs, true /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE), INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); /* First timestamp snapshot location. */ @@ -1736,7 +1742,7 @@ retry: */ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); - *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); + *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)); /* Restart from the beginning if we had timestamps roll over. */ *cs++ = (GRAPHICS_VER(i915) < 8 ? @@ -1773,7 +1779,7 @@ retry: */ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); - *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); + *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)); /* Predicate the jump. */ *cs++ = (GRAPHICS_VER(i915) < 8 ? @@ -1789,7 +1795,7 @@ retry: stream, cs, false /* restore */, CS_GPR(i), INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); cs = save_restore_register( - stream, cs, false /* restore */, MI_PREDICATE_RESULT_1, + stream, cs, false /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE), INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); /* And return to the ring. */ @@ -2418,7 +2424,7 @@ gen12_configure_all_contexts(struct i915_perf_stream *stream, { struct flex regs[] = { { - GEN8_R_PWR_CLK_STATE, + GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), CTX_R_PWR_CLK_STATE, }, }; @@ -2438,7 +2444,7 @@ lrc_configure_all_contexts(struct i915_perf_stream *stream, #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) struct flex regs[] = { { - GEN8_R_PWR_CLK_STATE, + GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), CTX_R_PWR_CLK_STATE, }, { @@ -3862,80 +3868,116 @@ static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) return false; } -#define ADDR_IN_RANGE(addr, start, end) \ - ((addr) >= (start) && \ - (addr) <= (end)) +static bool reg_in_range_table(u32 addr, const struct i915_range *table) +{ + while (table->start || table->end) { + if (addr >= table->start && addr <= table->end) + return true; -#define REG_IN_RANGE(addr, start, end) \ - ((addr) >= i915_mmio_reg_offset(start) && \ - (addr) <= i915_mmio_reg_offset(end)) + table++; + } + + return false; +} #define REG_EQUAL(addr, mmio) \ ((addr) == i915_mmio_reg_offset(mmio)) -static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) -{ - return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) || - REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) || - REG_IN_RANGE(addr, OACEC0_0, OACEC7_1); -} +static const struct i915_range gen7_oa_b_counters[] = { + { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */ + { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */ + { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */ + {} +}; + +static const struct i915_range gen12_oa_b_counters[] = { + { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */ + { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */ + { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */ + { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */ + { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */ + { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */ + { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */ + {} +}; + +static const struct i915_range gen7_oa_mux_regs[] = { + { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */ + { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */ + { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */ + {} +}; -static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr) +static const struct i915_range hsw_oa_mux_regs[] = { + { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */ + { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */ + { .start = 0x25100, .end = 0x2ff90 }, + {} +}; + +static const struct i915_range chv_oa_mux_regs[] = { + { .start = 0x182300, .end = 0x1823a4 }, + {} +}; + +static const struct i915_range gen8_oa_mux_regs[] = { + { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */ + { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ + {} +}; + +static const struct i915_range gen11_oa_mux_regs[] = { + { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */ + {} +}; + +static const struct i915_range gen12_oa_mux_regs[] = { + { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ + { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ + { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ + { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */ + { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ + {} +}; + +static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) { - return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) || - REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) || - REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) || - REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI); + return reg_in_range_table(addr, gen7_oa_b_counters); } static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return gen7_is_valid_mux_addr(perf, addr) || - REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || - REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8)); + return reg_in_range_table(addr, gen7_oa_mux_regs) || + reg_in_range_table(addr, gen8_oa_mux_regs); } static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return gen8_is_valid_mux_addr(perf, addr) || - REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || - REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI); + return reg_in_range_table(addr, gen7_oa_mux_regs) || + reg_in_range_table(addr, gen8_oa_mux_regs) || + reg_in_range_table(addr, gen11_oa_mux_regs); } static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return gen7_is_valid_mux_addr(perf, addr) || - ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) || - REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) || - REG_EQUAL(addr, HSW_MBVID2_MISR0); + return reg_in_range_table(addr, gen7_oa_mux_regs) || + reg_in_range_table(addr, hsw_oa_mux_regs); } static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return gen7_is_valid_mux_addr(perf, addr) || - ADDR_IN_RANGE(addr, 0x182300, 0x1823A4); + return reg_in_range_table(addr, gen7_oa_mux_regs) || + reg_in_range_table(addr, chv_oa_mux_regs); } static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) { - return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) || - REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) || - REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) || - REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) || - REG_EQUAL(addr, GEN12_OAA_DBG_REG) || - REG_EQUAL(addr, GEN12_OAG_OA_PESS) || - REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF); + return reg_in_range_table(addr, gen12_oa_b_counters); } static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return REG_EQUAL(addr, NOA_WRITE) || - REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || - REG_EQUAL(addr, GDT_CHICKEN_BITS) || - REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || - REG_EQUAL(addr, RPM_CONFIG0) || - REG_EQUAL(addr, RPM_CONFIG1) || - REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8)); + return reg_in_range_table(addr, gen12_oa_mux_regs); } static u32 mask_reg_value(u32 reg, u32 val) diff --git a/drivers/gpu/drm/i915/i915_perf_oa_regs.h b/drivers/gpu/drm/i915/i915_perf_oa_regs.h new file mode 100644 index 000000000000..f31c9f13a9fc --- /dev/null +++ b/drivers/gpu/drm/i915/i915_perf_oa_regs.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_PERF_OA_REGS__ +#define __INTEL_PERF_OA_REGS__ + +#include "i915_reg_defs.h" + +#define GEN7_OACONTROL _MMIO(0x2360) +#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000 +#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F +#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6 +#define GEN7_OACONTROL_TIMER_ENABLE (1 << 5) +#define GEN7_OACONTROL_FORMAT_A13 (0 << 2) +#define GEN7_OACONTROL_FORMAT_A29 (1 << 2) +#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2 << 2) +#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3 << 2) +#define GEN7_OACONTROL_FORMAT_B4_C8 (4 << 2) +#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5 << 2) +#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6 << 2) +#define GEN7_OACONTROL_FORMAT_C4_B8 (7 << 2) +#define GEN7_OACONTROL_FORMAT_SHIFT 2 +#define GEN7_OACONTROL_PER_CTX_ENABLE (1 << 1) +#define GEN7_OACONTROL_ENABLE (1 << 0) + +#define GEN8_OACTXID _MMIO(0x2364) + +#define GEN8_OA_DEBUG _MMIO(0x2B04) +#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5) +#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6) +#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2) +#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1) + +#define GEN8_OACONTROL _MMIO(0x2B00) +#define GEN8_OA_REPORT_FORMAT_A12 (0 << 2) +#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2 << 2) +#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5 << 2) +#define GEN8_OA_REPORT_FORMAT_C4_B8 (7 << 2) +#define GEN8_OA_REPORT_FORMAT_SHIFT 2 +#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1 << 1) +#define GEN8_OA_COUNTER_ENABLE (1 << 0) + +#define GEN8_OACTXCONTROL _MMIO(0x2360) +#define GEN8_OA_TIMER_PERIOD_MASK 0x3F +#define GEN8_OA_TIMER_PERIOD_SHIFT 2 +#define GEN8_OA_TIMER_ENABLE (1 << 1) +#define GEN8_OA_COUNTER_RESUME (1 << 0) + +#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */ +#define GEN7_OABUFFER_OVERRUN_DISABLE (1 << 3) +#define GEN7_OABUFFER_EDGE_TRIGGER (1 << 2) +#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1 << 1) +#define GEN7_OABUFFER_RESUME (1 << 0) + +#define GEN8_OABUFFER_UDW _MMIO(0x23b4) +#define GEN8_OABUFFER _MMIO(0x2b14) +#define GEN8_OABUFFER_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ + +#define GEN7_OASTATUS1 _MMIO(0x2364) +#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0 +#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1 << 2) +#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1 << 1) +#define GEN7_OASTATUS1_REPORT_LOST (1 << 0) + +#define GEN7_OASTATUS2 _MMIO(0x2368) +#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0 +#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ + +#define GEN8_OASTATUS _MMIO(0x2b08) +#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17) +#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16) +#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3) +#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2) +#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1) +#define GEN8_OASTATUS_REPORT_LOST (1 << 0) + +#define GEN8_OAHEADPTR _MMIO(0x2B0C) +#define GEN8_OAHEADPTR_MASK 0xffffffc0 +#define GEN8_OATAILPTR _MMIO(0x2B10) +#define GEN8_OATAILPTR_MASK 0xffffffc0 + +#define OABUFFER_SIZE_128K (0 << 3) +#define OABUFFER_SIZE_256K (1 << 3) +#define OABUFFER_SIZE_512K (2 << 3) +#define OABUFFER_SIZE_1M (3 << 3) +#define OABUFFER_SIZE_2M (4 << 3) +#define OABUFFER_SIZE_4M (5 << 3) +#define OABUFFER_SIZE_8M (6 << 3) +#define OABUFFER_SIZE_16M (7 << 3) + +#define GEN12_OA_TLB_INV_CR _MMIO(0xceec) + +/* Gen12 OAR unit */ +#define GEN12_OAR_OACONTROL _MMIO(0x2960) +#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 +#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0) + +#define GEN12_OACTXCONTROL _MMIO(0x2360) +#define GEN12_OAR_OASTATUS _MMIO(0x2968) + +/* Gen12 OAG unit */ +#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00) +#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0 +#define GEN12_OAG_OATAILPTR _MMIO(0xdb04) +#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0 + +#define GEN12_OAG_OABUFFER _MMIO(0xdb08) +#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7) +#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3) +#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */ + +#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28) +#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2 +#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1) +#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0) + +#define GEN12_OAG_OACONTROL _MMIO(0xdaf4) +#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2 +#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0) + +#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8) +#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6) +#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5) +#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2) +#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1) + +#define GEN12_OAG_OASTATUS _MMIO(0xdafc) +#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2) +#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1) +#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0) + +#define GDT_CHICKEN_BITS _MMIO(0x9840) +#define GT_NOA_ENABLE 0x00000080 + +#endif /* __INTEL_PERF_OA_REGS__ */ diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index aa14354a5120..473a3c0544bb 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -18,7 +18,7 @@ #include <uapi/drm/i915_drm.h> #include "gt/intel_sseu.h" -#include "i915_reg.h" +#include "i915_reg_defs.h" #include "intel_wakeref.h" struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index ea655161793e..cfc21042499d 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -8,8 +8,10 @@ #include "gt/intel_engine.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_rc6.h" #include "gt/intel_rps.h" diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f95bbb10b6f4..2b8a3086ed35 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -25,8 +25,7 @@ #ifndef _I915_REG_H_ #define _I915_REG_H_ -#include <linux/bitfield.h> -#include <linux/bits.h> +#include "i915_reg_defs.h" /** * DOC: The i915 register macro definition style guide @@ -116,95 +115,6 @@ * #define GEN8_BAR _MMIO(0xb888) */ -/** - * REG_BIT() - Prepare a u32 bit value - * @__n: 0-based bit number - * - * Local wrapper for BIT() to force u32, with compile time checks. - * - * @return: Value with bit @__n set. - */ -#define REG_BIT(__n) \ - ((u32)(BIT(__n) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ - ((__n) < 0 || (__n) > 31)))) - -/** - * REG_GENMASK() - Prepare a continuous u32 bitmask - * @__high: 0-based high bit - * @__low: 0-based low bit - * - * Local wrapper for GENMASK() to force u32, with compile time checks. - * - * @return: Continuous bitmask from @__high to @__low, inclusive. - */ -#define REG_GENMASK(__high, __low) \ - ((u32)(GENMASK(__high, __low) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ - __is_constexpr(__low) && \ - ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) - -/* - * Local integer constant expression version of is_power_of_2(). - */ -#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0)) - -/** - * REG_FIELD_PREP() - Prepare a u32 bitfield value - * @__mask: shifted mask defining the field's length and position - * @__val: value to put in the field - * - * Local copy of FIELD_PREP() to generate an integer constant expression, force - * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). - * - * @return: @__val masked and shifted into the field defined by @__mask. - */ -#define REG_FIELD_PREP(__mask, __val) \ - ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ - BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ - BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \ - BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ - BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) - -/** - * REG_FIELD_GET() - Extract a u32 bitfield value - * @__mask: shifted mask defining the field's length and position - * @__val: value to extract the bitfield value from - * - * Local wrapper for FIELD_GET() to force u32 and for consistency with - * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). - * - * @return: Masked and shifted value of the field defined by @__mask in @__val. - */ -#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val)) - -typedef struct { - u32 reg; -} i915_reg_t; - -#define _MMIO(r) ((const i915_reg_t){ .reg = (r) }) - -#define INVALID_MMIO_REG _MMIO(0) - -static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg) -{ - return reg.reg; -} - -static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b) -{ - return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b); -} - -static inline bool i915_mmio_reg_valid(i915_reg_t reg) -{ - return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); -} - -#define VLV_DISPLAY_BASE 0x180000 -#define VLV_MIPI_BASE VLV_DISPLAY_BASE -#define BXT_MIPI_BASE 0x60000 - #define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset) /* @@ -275,251 +185,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); }) #define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0)) -/* PCI config space */ - -#define MCHBAR_I915 0x44 -#define MCHBAR_I965 0x48 -#define MCHBAR_SIZE (4 * 4096) - -#define DEVEN 0x54 -#define DEVEN_MCHBAR_EN (1 << 28) - -/* BSM in include/drm/i915_drm.h */ - -#define HPLLCC 0xc0 /* 85x only */ -#define GC_CLOCK_CONTROL_MASK (0x7 << 0) -#define GC_CLOCK_133_200 (0 << 0) -#define GC_CLOCK_100_200 (1 << 0) -#define GC_CLOCK_100_133 (2 << 0) -#define GC_CLOCK_133_266 (3 << 0) -#define GC_CLOCK_133_200_2 (4 << 0) -#define GC_CLOCK_133_266_2 (5 << 0) -#define GC_CLOCK_166_266 (6 << 0) -#define GC_CLOCK_166_250 (7 << 0) - -#define I915_GDRST 0xc0 /* PCI config register */ -#define GRDOM_FULL (0 << 2) -#define GRDOM_RENDER (1 << 2) -#define GRDOM_MEDIA (3 << 2) -#define GRDOM_MASK (3 << 2) -#define GRDOM_RESET_STATUS (1 << 1) -#define GRDOM_RESET_ENABLE (1 << 0) - -/* BSpec only has register offset, PCI device and bit found empirically */ -#define I830_CLOCK_GATE 0xc8 /* device 0 */ -#define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2) - -#define GCDGMBUS 0xcc - -#define GCFGC2 0xda -#define GCFGC 0xf0 /* 915+ only */ -#define GC_LOW_FREQUENCY_ENABLE (1 << 7) -#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) -#define GC_DISPLAY_CLOCK_333_320_MHZ (4 << 4) -#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) -#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) -#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) -#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) -#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) -#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) -#define GC_DISPLAY_CLOCK_MASK (7 << 4) -#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) -#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) -#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) -#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) -#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) -#define I965_GC_RENDER_CLOCK_MASK (0xf << 0) -#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) -#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) -#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) -#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) -#define I945_GC_RENDER_CLOCK_MASK (7 << 0) -#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) -#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) -#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) -#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) -#define I915_GC_RENDER_CLOCK_MASK (7 << 0) -#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) -#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) -#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) - -#define ASLE 0xe4 -#define ASLS 0xfc - -#define SWSCI 0xe8 -#define SWSCI_SCISEL (1 << 15) -#define SWSCI_GSSCIE (1 << 0) - -#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ - - -#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4) -#define ILK_GRDOM_FULL (0 << 1) -#define ILK_GRDOM_RENDER (1 << 1) -#define ILK_GRDOM_MEDIA (3 << 1) -#define ILK_GRDOM_MASK (3 << 1) -#define ILK_GRDOM_RESET_ENABLE (1 << 0) - -#define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */ -#define GEN6_MBC_SNPCR_SHIFT 21 -#define GEN6_MBC_SNPCR_MASK (3 << 21) -#define GEN6_MBC_SNPCR_MAX (0 << 21) -#define GEN6_MBC_SNPCR_MED (1 << 21) -#define GEN6_MBC_SNPCR_LOW (2 << 21) -#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */ - -#define VLV_G3DCTL _MMIO(0x9024) -#define VLV_GSCKGCTL _MMIO(0x9028) - -#define FBC_LLC_READ_CTRL _MMIO(0x9044) -#define FBC_LLC_FULLY_OPEN REG_BIT(30) - -#define GEN6_MBCTL _MMIO(0x0907c) -#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) -#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) -#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2) -#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1) -#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0) - -#define GEN6_GDRST _MMIO(0x941c) -#define GEN6_GRDOM_FULL (1 << 0) -#define GEN6_GRDOM_RENDER (1 << 1) -#define GEN6_GRDOM_MEDIA (1 << 2) -#define GEN6_GRDOM_BLT (1 << 3) -#define GEN6_GRDOM_VECS (1 << 4) -#define GEN9_GRDOM_GUC (1 << 5) -#define GEN8_GRDOM_MEDIA2 (1 << 7) -/* GEN11 changed all bit defs except for FULL & RENDER */ -#define GEN11_GRDOM_FULL GEN6_GRDOM_FULL -#define GEN11_GRDOM_RENDER GEN6_GRDOM_RENDER -#define GEN11_GRDOM_BLT (1 << 2) -#define GEN11_GRDOM_GUC (1 << 3) -#define GEN11_GRDOM_MEDIA (1 << 5) -#define GEN11_GRDOM_MEDIA2 (1 << 6) -#define GEN11_GRDOM_MEDIA3 (1 << 7) -#define GEN11_GRDOM_MEDIA4 (1 << 8) -#define GEN11_GRDOM_MEDIA5 (1 << 9) -#define GEN11_GRDOM_MEDIA6 (1 << 10) -#define GEN11_GRDOM_MEDIA7 (1 << 11) -#define GEN11_GRDOM_MEDIA8 (1 << 12) -#define GEN11_GRDOM_VECS (1 << 13) -#define GEN11_GRDOM_VECS2 (1 << 14) -#define GEN11_GRDOM_VECS3 (1 << 15) -#define GEN11_GRDOM_VECS4 (1 << 16) -#define GEN11_GRDOM_SFC0 (1 << 17) -#define GEN11_GRDOM_SFC1 (1 << 18) -#define GEN11_GRDOM_SFC2 (1 << 19) -#define GEN11_GRDOM_SFC3 (1 << 20) - -#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1)) -#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance)) - -#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C) -#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0) -#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890) -#define GEN11_VCS_SFC_USAGE_BIT (1 << 0) -#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1) - -#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C) -#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0) -#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018) -#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0) -#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014) -#define GEN11_VECS_SFC_USAGE_BIT (1 << 0) - -#define GEN12_HCP_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x2910) -#define GEN12_HCP_SFC_FORCED_LOCK_BIT REG_BIT(0) -#define GEN12_HCP_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x2914) -#define GEN12_HCP_SFC_LOCK_ACK_BIT REG_BIT(1) -#define GEN12_HCP_SFC_USAGE_BIT REG_BIT(0) - -#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) -#define GEN12_SFC_DONE_MAX 4 - -#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) -#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) -#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) -#define PP_DIR_DCLV_2G 0xffffffff - -#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4) -#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8) - -#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8) -#define GEN8_RPCS_ENABLE (1 << 31) -#define GEN8_RPCS_S_CNT_ENABLE (1 << 18) -#define GEN8_RPCS_S_CNT_SHIFT 15 -#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT) -#define GEN11_RPCS_S_CNT_SHIFT 12 -#define GEN11_RPCS_S_CNT_MASK (0x3f << GEN11_RPCS_S_CNT_SHIFT) -#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11) -#define GEN8_RPCS_SS_CNT_SHIFT 8 -#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT) -#define GEN8_RPCS_EU_MAX_SHIFT 4 -#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT) -#define GEN8_RPCS_EU_MIN_SHIFT 0 -#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT) - -#define WAIT_FOR_RC6_EXIT _MMIO(0x20CC) -/* HSW only */ -#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2 -#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT) -#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4 -#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT) -/* HSW+ */ -#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0) -#define HSW_RCS_CONTEXT_ENABLE (1 << 7) -#define HSW_RCS_INHIBIT (1 << 8) -/* Gen8 */ -#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4 -#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT) -#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4 -#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT) -#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6) -#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9 -#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT) -#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11 -#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT) -#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13) - -#define GAM_ECOCHK _MMIO(0x4090) -#define BDW_DISABLE_HDC_INVALIDATION (1 << 25) -#define ECOCHK_SNB_BIT (1 << 10) -#define ECOCHK_DIS_TLB (1 << 8) -#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6) -#define ECOCHK_PPGTT_CACHE64B (0x3 << 3) -#define ECOCHK_PPGTT_CACHE4B (0x0 << 3) -#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4) -#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3) -#define ECOCHK_PPGTT_UC_HSW (0x1 << 3) -#define ECOCHK_PPGTT_WT_HSW (0x2 << 3) -#define ECOCHK_PPGTT_WB_HSW (0x3 << 3) - -#define GEN8_RC6_CTX_INFO _MMIO(0x8504) - -#define GAC_ECO_BITS _MMIO(0x14090) -#define ECOBITS_SNB_BIT (1 << 13) -#define ECOBITS_PPGTT_CACHE64B (3 << 8) -#define ECOBITS_PPGTT_CACHE4B (0 << 8) - -#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54) -#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12) -#define GLOBAL_INVALIDATION_MODE REG_BIT(2) - -#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c) -#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12) -#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11) -#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7) - -#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28) -#define RENDER_MOD_CTRL _MMIO(0xcf2c) -#define COMP_MOD_CTRL _MMIO(0xcf30) -#define VDBX_MOD_CTRL _MMIO(0xcf34) -#define VEBX_MOD_CTRL _MMIO(0xcf38) -#define FORCE_MISS_FTLB REG_BIT(3) - -#define GAB_CTL _MMIO(0x24000) -#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8) - #define GU_CNTL _MMIO(0x101010) #define LMEM_INIT REG_BIT(7) @@ -542,671 +207,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN6_STOLEN_RESERVED_ENABLE (1 << 0) #define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20) -/* VGA stuff */ - -#define VGA_ST01_MDA 0x3ba -#define VGA_ST01_CGA 0x3da - #define _VGA_MSR_WRITE _MMIO(0x3c2) -#define VGA_MSR_WRITE 0x3c2 -#define VGA_MSR_READ 0x3cc -#define VGA_MSR_MEM_EN (1 << 1) -#define VGA_MSR_CGA_MODE (1 << 0) - -#define VGA_SR_INDEX 0x3c4 -#define SR01 1 -#define VGA_SR_DATA 0x3c5 - -#define VGA_AR_INDEX 0x3c0 -#define VGA_AR_VID_EN (1 << 5) -#define VGA_AR_DATA_WRITE 0x3c0 -#define VGA_AR_DATA_READ 0x3c1 - -#define VGA_GR_INDEX 0x3ce -#define VGA_GR_DATA 0x3cf -/* GR05 */ -#define VGA_GR_MEM_READ_MODE_SHIFT 3 -#define VGA_GR_MEM_READ_MODE_PLANE 1 -/* GR06 */ -#define VGA_GR_MEM_MODE_MASK 0xc -#define VGA_GR_MEM_MODE_SHIFT 2 -#define VGA_GR_MEM_A0000_AFFFF 0 -#define VGA_GR_MEM_A0000_BFFFF 1 -#define VGA_GR_MEM_B0000_B7FFF 2 -#define VGA_GR_MEM_B0000_BFFFF 3 - -#define VGA_DACMASK 0x3c6 -#define VGA_DACRX 0x3c7 -#define VGA_DACWX 0x3c8 -#define VGA_DACDATA 0x3c9 - -#define VGA_CR_INDEX_MDA 0x3b4 -#define VGA_CR_DATA_MDA 0x3b5 -#define VGA_CR_INDEX_CGA 0x3d4 -#define VGA_CR_DATA_CGA 0x3d5 - -#define MI_PREDICATE_SRC0 _MMIO(0x2400) -#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4) -#define MI_PREDICATE_SRC1 _MMIO(0x2408) -#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4) -#define MI_PREDICATE_DATA _MMIO(0x2410) -#define MI_PREDICATE_RESULT _MMIO(0x2418) -#define MI_PREDICATE_RESULT_1 _MMIO(0x241c) -#define MI_PREDICATE_RESULT_2 _MMIO(0x2214) -#define LOWER_SLICE_ENABLED (1 << 0) -#define LOWER_SLICE_DISABLED (0 << 0) - -/* - * Registers used only by the command parser - */ -#define BCS_SWCTRL _MMIO(0x22200) -#define BCS_SRC_Y REG_BIT(0) -#define BCS_DST_Y REG_BIT(1) - -/* There are 16 GPR registers */ -#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) -#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) - -#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) -#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) -#define HS_INVOCATION_COUNT _MMIO(0x2300) -#define HS_INVOCATION_COUNT_UDW _MMIO(0x2300 + 4) -#define DS_INVOCATION_COUNT _MMIO(0x2308) -#define DS_INVOCATION_COUNT_UDW _MMIO(0x2308 + 4) -#define IA_VERTICES_COUNT _MMIO(0x2310) -#define IA_VERTICES_COUNT_UDW _MMIO(0x2310 + 4) -#define IA_PRIMITIVES_COUNT _MMIO(0x2318) -#define IA_PRIMITIVES_COUNT_UDW _MMIO(0x2318 + 4) -#define VS_INVOCATION_COUNT _MMIO(0x2320) -#define VS_INVOCATION_COUNT_UDW _MMIO(0x2320 + 4) -#define GS_INVOCATION_COUNT _MMIO(0x2328) -#define GS_INVOCATION_COUNT_UDW _MMIO(0x2328 + 4) -#define GS_PRIMITIVES_COUNT _MMIO(0x2330) -#define GS_PRIMITIVES_COUNT_UDW _MMIO(0x2330 + 4) -#define CL_INVOCATION_COUNT _MMIO(0x2338) -#define CL_INVOCATION_COUNT_UDW _MMIO(0x2338 + 4) -#define CL_PRIMITIVES_COUNT _MMIO(0x2340) -#define CL_PRIMITIVES_COUNT_UDW _MMIO(0x2340 + 4) -#define PS_INVOCATION_COUNT _MMIO(0x2348) -#define PS_INVOCATION_COUNT_UDW _MMIO(0x2348 + 4) -#define PS_DEPTH_COUNT _MMIO(0x2350) -#define PS_DEPTH_COUNT_UDW _MMIO(0x2350 + 4) - -/* There are the 4 64-bit counter registers, one for each stream output */ -#define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8) -#define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4) - -#define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8) -#define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4) - -#define GEN7_3DPRIM_END_OFFSET _MMIO(0x2420) -#define GEN7_3DPRIM_START_VERTEX _MMIO(0x2430) -#define GEN7_3DPRIM_VERTEX_COUNT _MMIO(0x2434) -#define GEN7_3DPRIM_INSTANCE_COUNT _MMIO(0x2438) -#define GEN7_3DPRIM_START_INSTANCE _MMIO(0x243C) -#define GEN7_3DPRIM_BASE_VERTEX _MMIO(0x2440) - -#define GEN7_GPGPU_DISPATCHDIMX _MMIO(0x2500) -#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504) -#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508) - -/* There are the 16 64-bit CS General Purpose Registers */ -#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8) -#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) - -#define GEN7_OACONTROL _MMIO(0x2360) -#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000 -#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F -#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6 -#define GEN7_OACONTROL_TIMER_ENABLE (1 << 5) -#define GEN7_OACONTROL_FORMAT_A13 (0 << 2) -#define GEN7_OACONTROL_FORMAT_A29 (1 << 2) -#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2 << 2) -#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3 << 2) -#define GEN7_OACONTROL_FORMAT_B4_C8 (4 << 2) -#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5 << 2) -#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6 << 2) -#define GEN7_OACONTROL_FORMAT_C4_B8 (7 << 2) -#define GEN7_OACONTROL_FORMAT_SHIFT 2 -#define GEN7_OACONTROL_PER_CTX_ENABLE (1 << 1) -#define GEN7_OACONTROL_ENABLE (1 << 0) - -#define GEN8_OACTXID _MMIO(0x2364) - -#define GEN8_OA_DEBUG _MMIO(0x2B04) -#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5) -#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6) -#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2) -#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1) - -#define GEN8_OACONTROL _MMIO(0x2B00) -#define GEN8_OA_REPORT_FORMAT_A12 (0 << 2) -#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2 << 2) -#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5 << 2) -#define GEN8_OA_REPORT_FORMAT_C4_B8 (7 << 2) -#define GEN8_OA_REPORT_FORMAT_SHIFT 2 -#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1 << 1) -#define GEN8_OA_COUNTER_ENABLE (1 << 0) - -#define GEN8_OACTXCONTROL _MMIO(0x2360) -#define GEN8_OA_TIMER_PERIOD_MASK 0x3F -#define GEN8_OA_TIMER_PERIOD_SHIFT 2 -#define GEN8_OA_TIMER_ENABLE (1 << 1) -#define GEN8_OA_COUNTER_RESUME (1 << 0) - -#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */ -#define GEN7_OABUFFER_OVERRUN_DISABLE (1 << 3) -#define GEN7_OABUFFER_EDGE_TRIGGER (1 << 2) -#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1 << 1) -#define GEN7_OABUFFER_RESUME (1 << 0) - -#define GEN8_OABUFFER_UDW _MMIO(0x23b4) -#define GEN8_OABUFFER _MMIO(0x2b14) -#define GEN8_OABUFFER_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ - -#define GEN7_OASTATUS1 _MMIO(0x2364) -#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0 -#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1 << 2) -#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1 << 1) -#define GEN7_OASTATUS1_REPORT_LOST (1 << 0) - -#define GEN7_OASTATUS2 _MMIO(0x2368) -#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0 -#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ - -#define GEN8_OASTATUS _MMIO(0x2b08) -#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17) -#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16) -#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3) -#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2) -#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1) -#define GEN8_OASTATUS_REPORT_LOST (1 << 0) - -#define GEN8_OAHEADPTR _MMIO(0x2B0C) -#define GEN8_OAHEADPTR_MASK 0xffffffc0 -#define GEN8_OATAILPTR _MMIO(0x2B10) -#define GEN8_OATAILPTR_MASK 0xffffffc0 - -#define OABUFFER_SIZE_128K (0 << 3) -#define OABUFFER_SIZE_256K (1 << 3) -#define OABUFFER_SIZE_512K (2 << 3) -#define OABUFFER_SIZE_1M (3 << 3) -#define OABUFFER_SIZE_2M (4 << 3) -#define OABUFFER_SIZE_4M (5 << 3) -#define OABUFFER_SIZE_8M (6 << 3) -#define OABUFFER_SIZE_16M (7 << 3) - -#define GEN12_OA_TLB_INV_CR _MMIO(0xceec) - -#define GEN12_SQCM _MMIO(0x8724) -#define EN_32B_ACCESS REG_BIT(30) - -/* Gen12 OAR unit */ -#define GEN12_OAR_OACONTROL _MMIO(0x2960) -#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 -#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0) - -#define GEN12_OACTXCONTROL _MMIO(0x2360) -#define GEN12_OAR_OASTATUS _MMIO(0x2968) - -/* Gen12 OAG unit */ -#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00) -#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0 -#define GEN12_OAG_OATAILPTR _MMIO(0xdb04) -#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0 - -#define GEN12_OAG_OABUFFER _MMIO(0xdb08) -#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7) -#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3) -#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */ - -#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28) -#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2 -#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1) -#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0) - -#define GEN12_OAG_OACONTROL _MMIO(0xdaf4) -#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2 -#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0) - -#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8) -#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6) -#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5) -#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2) -#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1) - -#define GEN12_OAG_OASTATUS _MMIO(0xdafc) -#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2) -#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1) -#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0) - -/* - * Flexible, Aggregate EU Counter Registers. - * Note: these aren't contiguous - */ -#define EU_PERF_CNTL0 _MMIO(0xe458) -#define EU_PERF_CNTL1 _MMIO(0xe558) -#define EU_PERF_CNTL2 _MMIO(0xe658) -#define EU_PERF_CNTL3 _MMIO(0xe758) -#define EU_PERF_CNTL4 _MMIO(0xe45c) -#define EU_PERF_CNTL5 _MMIO(0xe55c) -#define EU_PERF_CNTL6 _MMIO(0xe65c) - -#define RT_CTRL _MMIO(0xe530) -#define DIS_NULL_QUERY REG_BIT(10) - -/* - * OA Boolean state - */ - -#define OASTARTTRIG1 _MMIO(0x2710) -#define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000 -#define OASTARTTRIG1_THRESHOLD_MASK 0xffff - -#define OASTARTTRIG2 _MMIO(0x2714) -#define OASTARTTRIG2_INVERT_A_0 (1 << 0) -#define OASTARTTRIG2_INVERT_A_1 (1 << 1) -#define OASTARTTRIG2_INVERT_A_2 (1 << 2) -#define OASTARTTRIG2_INVERT_A_3 (1 << 3) -#define OASTARTTRIG2_INVERT_A_4 (1 << 4) -#define OASTARTTRIG2_INVERT_A_5 (1 << 5) -#define OASTARTTRIG2_INVERT_A_6 (1 << 6) -#define OASTARTTRIG2_INVERT_A_7 (1 << 7) -#define OASTARTTRIG2_INVERT_A_8 (1 << 8) -#define OASTARTTRIG2_INVERT_A_9 (1 << 9) -#define OASTARTTRIG2_INVERT_A_10 (1 << 10) -#define OASTARTTRIG2_INVERT_A_11 (1 << 11) -#define OASTARTTRIG2_INVERT_A_12 (1 << 12) -#define OASTARTTRIG2_INVERT_A_13 (1 << 13) -#define OASTARTTRIG2_INVERT_A_14 (1 << 14) -#define OASTARTTRIG2_INVERT_A_15 (1 << 15) -#define OASTARTTRIG2_INVERT_B_0 (1 << 16) -#define OASTARTTRIG2_INVERT_B_1 (1 << 17) -#define OASTARTTRIG2_INVERT_B_2 (1 << 18) -#define OASTARTTRIG2_INVERT_B_3 (1 << 19) -#define OASTARTTRIG2_INVERT_C_0 (1 << 20) -#define OASTARTTRIG2_INVERT_C_1 (1 << 21) -#define OASTARTTRIG2_INVERT_D_0 (1 << 22) -#define OASTARTTRIG2_THRESHOLD_ENABLE (1 << 23) -#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1 << 24) -#define OASTARTTRIG2_EVENT_SELECT_0 (1 << 28) -#define OASTARTTRIG2_EVENT_SELECT_1 (1 << 29) -#define OASTARTTRIG2_EVENT_SELECT_2 (1 << 30) -#define OASTARTTRIG2_EVENT_SELECT_3 (1 << 31) - -#define OASTARTTRIG3 _MMIO(0x2718) -#define OASTARTTRIG3_NOA_SELECT_MASK 0xf -#define OASTARTTRIG3_NOA_SELECT_8_SHIFT 0 -#define OASTARTTRIG3_NOA_SELECT_9_SHIFT 4 -#define OASTARTTRIG3_NOA_SELECT_10_SHIFT 8 -#define OASTARTTRIG3_NOA_SELECT_11_SHIFT 12 -#define OASTARTTRIG3_NOA_SELECT_12_SHIFT 16 -#define OASTARTTRIG3_NOA_SELECT_13_SHIFT 20 -#define OASTARTTRIG3_NOA_SELECT_14_SHIFT 24 -#define OASTARTTRIG3_NOA_SELECT_15_SHIFT 28 - -#define OASTARTTRIG4 _MMIO(0x271c) -#define OASTARTTRIG4_NOA_SELECT_MASK 0xf -#define OASTARTTRIG4_NOA_SELECT_0_SHIFT 0 -#define OASTARTTRIG4_NOA_SELECT_1_SHIFT 4 -#define OASTARTTRIG4_NOA_SELECT_2_SHIFT 8 -#define OASTARTTRIG4_NOA_SELECT_3_SHIFT 12 -#define OASTARTTRIG4_NOA_SELECT_4_SHIFT 16 -#define OASTARTTRIG4_NOA_SELECT_5_SHIFT 20 -#define OASTARTTRIG4_NOA_SELECT_6_SHIFT 24 -#define OASTARTTRIG4_NOA_SELECT_7_SHIFT 28 - -#define OASTARTTRIG5 _MMIO(0x2720) -#define OASTARTTRIG5_THRESHOLD_COUNT_MASK_MBZ 0xffff0000 -#define OASTARTTRIG5_THRESHOLD_MASK 0xffff - -#define OASTARTTRIG6 _MMIO(0x2724) -#define OASTARTTRIG6_INVERT_A_0 (1 << 0) -#define OASTARTTRIG6_INVERT_A_1 (1 << 1) -#define OASTARTTRIG6_INVERT_A_2 (1 << 2) -#define OASTARTTRIG6_INVERT_A_3 (1 << 3) -#define OASTARTTRIG6_INVERT_A_4 (1 << 4) -#define OASTARTTRIG6_INVERT_A_5 (1 << 5) -#define OASTARTTRIG6_INVERT_A_6 (1 << 6) -#define OASTARTTRIG6_INVERT_A_7 (1 << 7) -#define OASTARTTRIG6_INVERT_A_8 (1 << 8) -#define OASTARTTRIG6_INVERT_A_9 (1 << 9) -#define OASTARTTRIG6_INVERT_A_10 (1 << 10) -#define OASTARTTRIG6_INVERT_A_11 (1 << 11) -#define OASTARTTRIG6_INVERT_A_12 (1 << 12) -#define OASTARTTRIG6_INVERT_A_13 (1 << 13) -#define OASTARTTRIG6_INVERT_A_14 (1 << 14) -#define OASTARTTRIG6_INVERT_A_15 (1 << 15) -#define OASTARTTRIG6_INVERT_B_0 (1 << 16) -#define OASTARTTRIG6_INVERT_B_1 (1 << 17) -#define OASTARTTRIG6_INVERT_B_2 (1 << 18) -#define OASTARTTRIG6_INVERT_B_3 (1 << 19) -#define OASTARTTRIG6_INVERT_C_0 (1 << 20) -#define OASTARTTRIG6_INVERT_C_1 (1 << 21) -#define OASTARTTRIG6_INVERT_D_0 (1 << 22) -#define OASTARTTRIG6_THRESHOLD_ENABLE (1 << 23) -#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1 << 24) -#define OASTARTTRIG6_EVENT_SELECT_4 (1 << 28) -#define OASTARTTRIG6_EVENT_SELECT_5 (1 << 29) -#define OASTARTTRIG6_EVENT_SELECT_6 (1 << 30) -#define OASTARTTRIG6_EVENT_SELECT_7 (1 << 31) - -#define OASTARTTRIG7 _MMIO(0x2728) -#define OASTARTTRIG7_NOA_SELECT_MASK 0xf -#define OASTARTTRIG7_NOA_SELECT_8_SHIFT 0 -#define OASTARTTRIG7_NOA_SELECT_9_SHIFT 4 -#define OASTARTTRIG7_NOA_SELECT_10_SHIFT 8 -#define OASTARTTRIG7_NOA_SELECT_11_SHIFT 12 -#define OASTARTTRIG7_NOA_SELECT_12_SHIFT 16 -#define OASTARTTRIG7_NOA_SELECT_13_SHIFT 20 -#define OASTARTTRIG7_NOA_SELECT_14_SHIFT 24 -#define OASTARTTRIG7_NOA_SELECT_15_SHIFT 28 - -#define OASTARTTRIG8 _MMIO(0x272c) -#define OASTARTTRIG8_NOA_SELECT_MASK 0xf -#define OASTARTTRIG8_NOA_SELECT_0_SHIFT 0 -#define OASTARTTRIG8_NOA_SELECT_1_SHIFT 4 -#define OASTARTTRIG8_NOA_SELECT_2_SHIFT 8 -#define OASTARTTRIG8_NOA_SELECT_3_SHIFT 12 -#define OASTARTTRIG8_NOA_SELECT_4_SHIFT 16 -#define OASTARTTRIG8_NOA_SELECT_5_SHIFT 20 -#define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24 -#define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28 - -#define OAREPORTTRIG1 _MMIO(0x2740) -#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff -#define OAREPORTTRIG1_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */ - -#define OAREPORTTRIG2 _MMIO(0x2744) -#define OAREPORTTRIG2_INVERT_A_0 (1 << 0) -#define OAREPORTTRIG2_INVERT_A_1 (1 << 1) -#define OAREPORTTRIG2_INVERT_A_2 (1 << 2) -#define OAREPORTTRIG2_INVERT_A_3 (1 << 3) -#define OAREPORTTRIG2_INVERT_A_4 (1 << 4) -#define OAREPORTTRIG2_INVERT_A_5 (1 << 5) -#define OAREPORTTRIG2_INVERT_A_6 (1 << 6) -#define OAREPORTTRIG2_INVERT_A_7 (1 << 7) -#define OAREPORTTRIG2_INVERT_A_8 (1 << 8) -#define OAREPORTTRIG2_INVERT_A_9 (1 << 9) -#define OAREPORTTRIG2_INVERT_A_10 (1 << 10) -#define OAREPORTTRIG2_INVERT_A_11 (1 << 11) -#define OAREPORTTRIG2_INVERT_A_12 (1 << 12) -#define OAREPORTTRIG2_INVERT_A_13 (1 << 13) -#define OAREPORTTRIG2_INVERT_A_14 (1 << 14) -#define OAREPORTTRIG2_INVERT_A_15 (1 << 15) -#define OAREPORTTRIG2_INVERT_B_0 (1 << 16) -#define OAREPORTTRIG2_INVERT_B_1 (1 << 17) -#define OAREPORTTRIG2_INVERT_B_2 (1 << 18) -#define OAREPORTTRIG2_INVERT_B_3 (1 << 19) -#define OAREPORTTRIG2_INVERT_C_0 (1 << 20) -#define OAREPORTTRIG2_INVERT_C_1 (1 << 21) -#define OAREPORTTRIG2_INVERT_D_0 (1 << 22) -#define OAREPORTTRIG2_THRESHOLD_ENABLE (1 << 23) -#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31) - -#define OAREPORTTRIG3 _MMIO(0x2748) -#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf -#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0 -#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4 -#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8 -#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12 -#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16 -#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20 -#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24 -#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28 - -#define OAREPORTTRIG4 _MMIO(0x274c) -#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf -#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0 -#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4 -#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8 -#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12 -#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16 -#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20 -#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24 -#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28 - -#define OAREPORTTRIG5 _MMIO(0x2750) -#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff -#define OAREPORTTRIG5_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */ - -#define OAREPORTTRIG6 _MMIO(0x2754) -#define OAREPORTTRIG6_INVERT_A_0 (1 << 0) -#define OAREPORTTRIG6_INVERT_A_1 (1 << 1) -#define OAREPORTTRIG6_INVERT_A_2 (1 << 2) -#define OAREPORTTRIG6_INVERT_A_3 (1 << 3) -#define OAREPORTTRIG6_INVERT_A_4 (1 << 4) -#define OAREPORTTRIG6_INVERT_A_5 (1 << 5) -#define OAREPORTTRIG6_INVERT_A_6 (1 << 6) -#define OAREPORTTRIG6_INVERT_A_7 (1 << 7) -#define OAREPORTTRIG6_INVERT_A_8 (1 << 8) -#define OAREPORTTRIG6_INVERT_A_9 (1 << 9) -#define OAREPORTTRIG6_INVERT_A_10 (1 << 10) -#define OAREPORTTRIG6_INVERT_A_11 (1 << 11) -#define OAREPORTTRIG6_INVERT_A_12 (1 << 12) -#define OAREPORTTRIG6_INVERT_A_13 (1 << 13) -#define OAREPORTTRIG6_INVERT_A_14 (1 << 14) -#define OAREPORTTRIG6_INVERT_A_15 (1 << 15) -#define OAREPORTTRIG6_INVERT_B_0 (1 << 16) -#define OAREPORTTRIG6_INVERT_B_1 (1 << 17) -#define OAREPORTTRIG6_INVERT_B_2 (1 << 18) -#define OAREPORTTRIG6_INVERT_B_3 (1 << 19) -#define OAREPORTTRIG6_INVERT_C_0 (1 << 20) -#define OAREPORTTRIG6_INVERT_C_1 (1 << 21) -#define OAREPORTTRIG6_INVERT_D_0 (1 << 22) -#define OAREPORTTRIG6_THRESHOLD_ENABLE (1 << 23) -#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31) - -#define OAREPORTTRIG7 _MMIO(0x2758) -#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf -#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0 -#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4 -#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8 -#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12 -#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16 -#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20 -#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24 -#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28 - -#define OAREPORTTRIG8 _MMIO(0x275c) -#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf -#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0 -#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4 -#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8 -#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12 -#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16 -#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20 -#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24 -#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28 - -/* Same layout as OASTARTTRIGX */ -#define GEN12_OAG_OASTARTTRIG1 _MMIO(0xd900) -#define GEN12_OAG_OASTARTTRIG2 _MMIO(0xd904) -#define GEN12_OAG_OASTARTTRIG3 _MMIO(0xd908) -#define GEN12_OAG_OASTARTTRIG4 _MMIO(0xd90c) -#define GEN12_OAG_OASTARTTRIG5 _MMIO(0xd910) -#define GEN12_OAG_OASTARTTRIG6 _MMIO(0xd914) -#define GEN12_OAG_OASTARTTRIG7 _MMIO(0xd918) -#define GEN12_OAG_OASTARTTRIG8 _MMIO(0xd91c) - -/* Same layout as OAREPORTTRIGX */ -#define GEN12_OAG_OAREPORTTRIG1 _MMIO(0xd920) -#define GEN12_OAG_OAREPORTTRIG2 _MMIO(0xd924) -#define GEN12_OAG_OAREPORTTRIG3 _MMIO(0xd928) -#define GEN12_OAG_OAREPORTTRIG4 _MMIO(0xd92c) -#define GEN12_OAG_OAREPORTTRIG5 _MMIO(0xd930) -#define GEN12_OAG_OAREPORTTRIG6 _MMIO(0xd934) -#define GEN12_OAG_OAREPORTTRIG7 _MMIO(0xd938) -#define GEN12_OAG_OAREPORTTRIG8 _MMIO(0xd93c) - -/* CECX_0 */ -#define OACEC_COMPARE_LESS_OR_EQUAL 6 -#define OACEC_COMPARE_NOT_EQUAL 5 -#define OACEC_COMPARE_LESS_THAN 4 -#define OACEC_COMPARE_GREATER_OR_EQUAL 3 -#define OACEC_COMPARE_EQUAL 2 -#define OACEC_COMPARE_GREATER_THAN 1 -#define OACEC_COMPARE_ANY_EQUAL 0 - -#define OACEC_COMPARE_VALUE_MASK 0xffff -#define OACEC_COMPARE_VALUE_SHIFT 3 - -#define OACEC_SELECT_NOA (0 << 19) -#define OACEC_SELECT_PREV (1 << 19) -#define OACEC_SELECT_BOOLEAN (2 << 19) - -/* 11-bit array 0: pass-through, 1: negated */ -#define GEN12_OASCEC_NEGATE_MASK 0x7ff -#define GEN12_OASCEC_NEGATE_SHIFT 21 - -/* CECX_1 */ -#define OACEC_MASK_MASK 0xffff -#define OACEC_CONSIDERATIONS_MASK 0xffff -#define OACEC_CONSIDERATIONS_SHIFT 16 - -#define OACEC0_0 _MMIO(0x2770) -#define OACEC0_1 _MMIO(0x2774) -#define OACEC1_0 _MMIO(0x2778) -#define OACEC1_1 _MMIO(0x277c) -#define OACEC2_0 _MMIO(0x2780) -#define OACEC2_1 _MMIO(0x2784) -#define OACEC3_0 _MMIO(0x2788) -#define OACEC3_1 _MMIO(0x278c) -#define OACEC4_0 _MMIO(0x2790) -#define OACEC4_1 _MMIO(0x2794) -#define OACEC5_0 _MMIO(0x2798) -#define OACEC5_1 _MMIO(0x279c) -#define OACEC6_0 _MMIO(0x27a0) -#define OACEC6_1 _MMIO(0x27a4) -#define OACEC7_0 _MMIO(0x27a8) -#define OACEC7_1 _MMIO(0x27ac) - -/* Same layout as CECX_Y */ -#define GEN12_OAG_CEC0_0 _MMIO(0xd940) -#define GEN12_OAG_CEC0_1 _MMIO(0xd944) -#define GEN12_OAG_CEC1_0 _MMIO(0xd948) -#define GEN12_OAG_CEC1_1 _MMIO(0xd94c) -#define GEN12_OAG_CEC2_0 _MMIO(0xd950) -#define GEN12_OAG_CEC2_1 _MMIO(0xd954) -#define GEN12_OAG_CEC3_0 _MMIO(0xd958) -#define GEN12_OAG_CEC3_1 _MMIO(0xd95c) -#define GEN12_OAG_CEC4_0 _MMIO(0xd960) -#define GEN12_OAG_CEC4_1 _MMIO(0xd964) -#define GEN12_OAG_CEC5_0 _MMIO(0xd968) -#define GEN12_OAG_CEC5_1 _MMIO(0xd96c) -#define GEN12_OAG_CEC6_0 _MMIO(0xd970) -#define GEN12_OAG_CEC6_1 _MMIO(0xd974) -#define GEN12_OAG_CEC7_0 _MMIO(0xd978) -#define GEN12_OAG_CEC7_1 _MMIO(0xd97c) - -/* Same layout as CECX_Y + negate 11-bit array */ -#define GEN12_OAG_SCEC0_0 _MMIO(0xdc00) -#define GEN12_OAG_SCEC0_1 _MMIO(0xdc04) -#define GEN12_OAG_SCEC1_0 _MMIO(0xdc08) -#define GEN12_OAG_SCEC1_1 _MMIO(0xdc0c) -#define GEN12_OAG_SCEC2_0 _MMIO(0xdc10) -#define GEN12_OAG_SCEC2_1 _MMIO(0xdc14) -#define GEN12_OAG_SCEC3_0 _MMIO(0xdc18) -#define GEN12_OAG_SCEC3_1 _MMIO(0xdc1c) -#define GEN12_OAG_SCEC4_0 _MMIO(0xdc20) -#define GEN12_OAG_SCEC4_1 _MMIO(0xdc24) -#define GEN12_OAG_SCEC5_0 _MMIO(0xdc28) -#define GEN12_OAG_SCEC5_1 _MMIO(0xdc2c) -#define GEN12_OAG_SCEC6_0 _MMIO(0xdc30) -#define GEN12_OAG_SCEC6_1 _MMIO(0xdc34) -#define GEN12_OAG_SCEC7_0 _MMIO(0xdc38) -#define GEN12_OAG_SCEC7_1 _MMIO(0xdc3c) - -/* OA perf counters */ -#define OA_PERFCNT1_LO _MMIO(0x91B8) -#define OA_PERFCNT1_HI _MMIO(0x91BC) -#define OA_PERFCNT2_LO _MMIO(0x91C0) -#define OA_PERFCNT2_HI _MMIO(0x91C4) -#define OA_PERFCNT3_LO _MMIO(0x91C8) -#define OA_PERFCNT3_HI _MMIO(0x91CC) -#define OA_PERFCNT4_LO _MMIO(0x91D8) -#define OA_PERFCNT4_HI _MMIO(0x91DC) - -#define OA_PERFMATRIX_LO _MMIO(0x91C8) -#define OA_PERFMATRIX_HI _MMIO(0x91CC) - -/* RPM unit config (Gen8+) */ -#define RPM_CONFIG0 _MMIO(0x0D00) -#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3 -#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT) -#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0 -#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1 -#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3 -#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT) -#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0 -#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1 -#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2 -#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3 -#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1 -#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT) - -#define RPM_CONFIG1 _MMIO(0x0D04) -#define GEN10_GT_NOA_ENABLE (1 << 9) - -/* GPM unit config (Gen9+) */ -#define CTC_MODE _MMIO(0xA26C) -#define CTC_SOURCE_PARAMETER_MASK 1 -#define CTC_SOURCE_CRYSTAL_CLOCK 0 -#define CTC_SOURCE_DIVIDE_LOGIC 1 -#define CTC_SHIFT_PARAMETER_SHIFT 1 -#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT) - -/* RCP unit config (Gen8+) */ -#define RCP_CONFIG _MMIO(0x0D08) - -/* NOA (HSW) */ -#define HSW_MBVID2_NOA0 _MMIO(0x9E80) -#define HSW_MBVID2_NOA1 _MMIO(0x9E84) -#define HSW_MBVID2_NOA2 _MMIO(0x9E88) -#define HSW_MBVID2_NOA3 _MMIO(0x9E8C) -#define HSW_MBVID2_NOA4 _MMIO(0x9E90) -#define HSW_MBVID2_NOA5 _MMIO(0x9E94) -#define HSW_MBVID2_NOA6 _MMIO(0x9E98) -#define HSW_MBVID2_NOA7 _MMIO(0x9E9C) -#define HSW_MBVID2_NOA8 _MMIO(0x9EA0) -#define HSW_MBVID2_NOA9 _MMIO(0x9EA4) - -#define HSW_MBVID2_MISR0 _MMIO(0x9EC0) - -/* NOA (Gen8+) */ -#define NOA_CONFIG(i) _MMIO(0x0D0C + (i) * 4) - -#define MICRO_BP0_0 _MMIO(0x9800) -#define MICRO_BP0_2 _MMIO(0x9804) -#define MICRO_BP0_1 _MMIO(0x9808) - -#define MICRO_BP1_0 _MMIO(0x980C) -#define MICRO_BP1_2 _MMIO(0x9810) -#define MICRO_BP1_1 _MMIO(0x9814) - -#define MICRO_BP2_0 _MMIO(0x9818) -#define MICRO_BP2_2 _MMIO(0x981C) -#define MICRO_BP2_1 _MMIO(0x9820) - -#define MICRO_BP3_0 _MMIO(0x9824) -#define MICRO_BP3_2 _MMIO(0x9828) -#define MICRO_BP3_1 _MMIO(0x982C) - -#define MICRO_BP_TRIGGER _MMIO(0x9830) -#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834) -#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838) -#define MICRO_BP_FIRED_ARMED _MMIO(0x983C) - -#define GEN12_OAA_DBG_REG _MMIO(0xdc44) -#define GEN12_OAG_OA_PESS _MMIO(0x2b2c) -#define GEN12_OAG_SPCTR_CNF _MMIO(0xdc40) - -#define GDT_CHICKEN_BITS _MMIO(0x9840) -#define GT_NOA_ENABLE 0x00000080 - -#define NOA_DATA _MMIO(0x986C) -#define NOA_WRITE _MMIO(0x9888) -#define GEN10_NOA_WRITE_HIGH _MMIO(0x9884) #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 #define _GEN7_PIPEB_DE_LOAD_SL 0x71068 @@ -1248,177 +249,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104) #define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108) -/* See configdb bunit SB addr map */ -#define BUNIT_REG_BISOC 0x11 - -/* PUNIT_REG_*SSPM0 */ -#define _SSPM0_SSC(val) ((val) << 0) -#define SSPM0_SSC_MASK _SSPM0_SSC(0x3) -#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0) -#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1) -#define SSPM0_SSC_RESET _SSPM0_SSC(0x2) -#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3) -#define _SSPM0_SSS(val) ((val) << 24) -#define SSPM0_SSS_MASK _SSPM0_SSS(0x3) -#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0) -#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1) -#define SSPM0_SSS_RESET _SSPM0_SSS(0x2) -#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3) - -/* PUNIT_REG_*SSPM1 */ -#define SSPM1_FREQSTAT_SHIFT 24 -#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT) -#define SSPM1_FREQGUAR_SHIFT 8 -#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT) -#define SSPM1_FREQ_SHIFT 0 -#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT) - -#define PUNIT_REG_VEDSSPM0 0x32 -#define PUNIT_REG_VEDSSPM1 0x33 - -#define PUNIT_REG_DSPSSPM 0x36 -#define DSPFREQSTAT_SHIFT_CHV 24 -#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) -#define DSPFREQGUAR_SHIFT_CHV 8 -#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV) -#define DSPFREQSTAT_SHIFT 30 -#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) -#define DSPFREQGUAR_SHIFT 14 -#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) -#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */ -#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */ -#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */ -#define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) -#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) -#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) -#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe)) -#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe)) -#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe)) -#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16)) -#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe)) -#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe)) -#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe)) -#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe)) -#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe)) - -#define PUNIT_REG_ISPSSPM0 0x39 -#define PUNIT_REG_ISPSSPM1 0x3a - -#define PUNIT_REG_PWRGT_CTRL 0x60 -#define PUNIT_REG_PWRGT_STATUS 0x61 -#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_PWR_ON(pw_idx) (0 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_CLK_GATE(pw_idx) (1 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_RESET(pw_idx) (2 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_PWR_GATE(pw_idx) (3 << ((pw_idx) * 2)) - -#define PUNIT_PWGT_IDX_RENDER 0 -#define PUNIT_PWGT_IDX_MEDIA 1 -#define PUNIT_PWGT_IDX_DISP2D 3 -#define PUNIT_PWGT_IDX_DPIO_CMN_BC 5 -#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01 6 -#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23 7 -#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01 8 -#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23 9 -#define PUNIT_PWGT_IDX_DPIO_RX0 10 -#define PUNIT_PWGT_IDX_DPIO_RX1 11 -#define PUNIT_PWGT_IDX_DPIO_CMN_D 12 - -#define PUNIT_REG_GPU_LFM 0xd3 -#define PUNIT_REG_GPU_FREQ_REQ 0xd4 -#define PUNIT_REG_GPU_FREQ_STS 0xd8 -#define GPLLENABLE (1 << 4) -#define GENFREQSTATUS (1 << 0) -#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc -#define PUNIT_REG_CZ_TIMESTAMP 0xce - -#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ -#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ - -#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136 -#define FB_GFX_FREQ_FUSE_MASK 0xff -#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24 -#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16 -#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8 - -#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 -#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 - -#define PUNIT_REG_DDR_SETUP2 0x139 -#define FORCE_DDR_FREQ_REQ_ACK (1 << 8) -#define FORCE_DDR_LOW_FREQ (1 << 1) -#define FORCE_DDR_HIGH_FREQ (1 << 0) - -#define PUNIT_GPU_STATUS_REG 0xdb -#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 -#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff -#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8 -#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff - -#define PUNIT_GPU_DUTYCYCLE_REG 0xdf -#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8 -#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff - -#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c -#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 -#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 -#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11 -#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800 -#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34 -#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007 -#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30 -#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 -#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 - -#define VLV_TURBO_SOC_OVERRIDE 0x04 -#define VLV_OVERRIDE_EN 1 -#define VLV_SOC_TDP_EN (1 << 1) -#define VLV_BIAS_CPU_125_SOC_875 (6 << 2) -#define CHV_BIAS_CPU_50_SOC_50 (3 << 2) - -/* vlv2 north clock has */ -#define CCK_FUSE_REG 0x8 -#define CCK_FUSE_HPLL_FREQ_MASK 0x3 -#define CCK_REG_DSI_PLL_FUSE 0x44 -#define CCK_REG_DSI_PLL_CONTROL 0x48 -#define DSI_PLL_VCO_EN (1 << 31) -#define DSI_PLL_LDO_GATE (1 << 30) -#define DSI_PLL_P1_POST_DIV_SHIFT 17 -#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17) -#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13) -#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12) -#define DSI_PLL_MUX_MASK (3 << 9) -#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10) -#define DSI_PLL_MUX_DSI0_CCK (1 << 10) -#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9) -#define DSI_PLL_MUX_DSI1_CCK (1 << 9) -#define DSI_PLL_CLK_GATE_MASK (0xf << 5) -#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8) -#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7) -#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6) -#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5) -#define DSI_PLL_LOCK (1 << 0) -#define CCK_REG_DSI_PLL_DIVIDER 0x4c -#define DSI_PLL_LFSR (1 << 31) -#define DSI_PLL_FRACTION_EN (1 << 30) -#define DSI_PLL_FRAC_COUNTER_SHIFT 27 -#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27) -#define DSI_PLL_USYNC_CNT_SHIFT 18 -#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18) -#define DSI_PLL_N1_DIV_SHIFT 16 -#define DSI_PLL_N1_DIV_MASK (3 << 16) -#define DSI_PLL_M1_DIV_SHIFT 0 -#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) -#define CCK_CZ_CLOCK_CONTROL 0x62 -#define CCK_GPLL_CLOCK_CONTROL 0x67 -#define CCK_DISPLAY_CLOCK_CONTROL 0x6b -#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c -#define CCK_TRUNK_FORCE_ON (1 << 17) -#define CCK_TRUNK_FORCE_OFF (1 << 16) -#define CCK_FREQUENCY_STATUS (0x1f << 8) -#define CCK_FREQUENCY_STATUS_SHIFT 8 -#define CCK_FREQUENCY_VALUES (0x1f << 0) - /* DPIO registers */ #define DPIO_DEVFN 0 @@ -1909,402 +739,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OCL2_LDOFUSE_PWR_DIS (1 << 6) #define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) -/* - * ICL Port/COMBO-PHY Registers - */ -#define _ICL_COMBOPHY_A 0x162000 -#define _ICL_COMBOPHY_B 0x6C000 -#define _EHL_COMBOPHY_C 0x160000 -#define _RKL_COMBOPHY_D 0x161000 -#define _ADL_COMBOPHY_E 0x16B000 - -#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \ - _ICL_COMBOPHY_B, \ - _EHL_COMBOPHY_C, \ - _RKL_COMBOPHY_D, \ - _ADL_COMBOPHY_E) - -/* ICL Port CL_DW registers */ -#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ - 4 * (dw)) - -#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy)) -#define CL_POWER_DOWN_ENABLE (1 << 4) -#define SUS_CLOCK_CONFIG (3 << 0) - -#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy)) -#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) -#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 -#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) -#define PWR_UP_ALL_LANES (0x0 << 4) -#define PWR_DOWN_LN_3_2_1 (0xe << 4) -#define PWR_DOWN_LN_3_2 (0xc << 4) -#define PWR_DOWN_LN_3 (0x8 << 4) -#define PWR_DOWN_LN_2_1_0 (0x7 << 4) -#define PWR_DOWN_LN_1_0 (0x3 << 4) -#define PWR_DOWN_LN_3_1 (0xa << 4) -#define PWR_DOWN_LN_3_1_0 (0xb << 4) -#define PWR_DOWN_LN_MASK (0xf << 4) -#define PWR_DOWN_LN_SHIFT 4 -#define EDP4K2K_MODE_OVRD_EN (1 << 3) -#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2) - -#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) -#define ICL_LANE_ENABLE_AUX (1 << 0) - -/* ICL Port COMP_DW registers */ -#define _ICL_PORT_COMP 0x100 -#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_COMP + 4 * (dw)) - -#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy)) -#define COMP_INIT (1 << 31) - -#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy)) - -#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy)) -#define PROCESS_INFO_DOT_0 (0 << 26) -#define PROCESS_INFO_DOT_1 (1 << 26) -#define PROCESS_INFO_DOT_4 (2 << 26) -#define PROCESS_INFO_MASK (7 << 26) -#define PROCESS_INFO_SHIFT 26 -#define VOLTAGE_INFO_0_85V (0 << 24) -#define VOLTAGE_INFO_0_95V (1 << 24) -#define VOLTAGE_INFO_1_05V (2 << 24) -#define VOLTAGE_INFO_MASK (3 << 24) -#define VOLTAGE_INFO_SHIFT 24 - -#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy)) -#define IREFGEN (1 << 24) - -#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy)) - -#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy)) - -/* ICL Port PCS registers */ -#define _ICL_PORT_PCS_AUX 0x300 -#define _ICL_PORT_PCS_GRP 0x600 -#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) -#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_PCS_AUX + 4 * (dw)) -#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_PCS_GRP + 4 * (dw)) -#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_PCS_LN(ln) + 4 * (dw)) -#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) -#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) -#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) -#define DCC_MODE_SELECT_MASK (0x3 << 20) -#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) -#define COMMON_KEEPER_EN (1 << 26) -#define LATENCY_OPTIM_MASK (0x3 << 2) -#define LATENCY_OPTIM_VAL(x) ((x) << 2) - -/* ICL Port TX registers */ -#define _ICL_PORT_TX_AUX 0x380 -#define _ICL_PORT_TX_GRP 0x680 -#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) - -#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_TX_AUX + 4 * (dw)) -#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_TX_GRP + 4 * (dw)) -#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_TX_LN(ln) + 4 * (dw)) - -#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) -#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) -#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy)) -#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) -#define SWING_SEL_UPPER_MASK (1 << 15) -#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) -#define SWING_SEL_LOWER_MASK (0x7 << 11) -#define FRC_LATENCY_OPTIM_MASK (0x7 << 8) -#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) -#define RCOMP_SCALAR(x) ((x) << 0) -#define RCOMP_SCALAR_MASK (0xFF << 0) - -#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) -#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) -#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) -#define LOADGEN_SELECT (1 << 31) -#define POST_CURSOR_1(x) ((x) << 12) -#define POST_CURSOR_1_MASK (0x3F << 12) -#define POST_CURSOR_2(x) ((x) << 6) -#define POST_CURSOR_2_MASK (0x3F << 6) -#define CURSOR_COEFF(x) ((x) << 0) -#define CURSOR_COEFF_MASK (0x3F << 0) - -#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) -#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) -#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy)) -#define TX_TRAINING_EN (1 << 31) -#define TAP2_DISABLE (1 << 30) -#define TAP3_DISABLE (1 << 29) -#define SCALING_MODE_SEL(x) ((x) << 18) -#define SCALING_MODE_SEL_MASK (0x7 << 18) -#define RTERM_SELECT(x) ((x) << 3) -#define RTERM_SELECT_MASK (0x7 << 3) - -#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) -#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) -#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) -#define N_SCALAR(x) ((x) << 24) -#define N_SCALAR_MASK (0x7F << 24) - -#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy)) -#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy)) -#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy)) -#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31) -#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29) -#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1) - -#define _ICL_DPHY_CHKN_REG 0x194 -#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) -#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) - -#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \ - _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) - -#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C -#define MG_TX1_LINK_PARAMS(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ - MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ - MG_TX_LINK_PARAMS_TX1LN1_PORT1) - -#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC -#define MG_TX2_LINK_PARAMS(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ - MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ - MG_TX_LINK_PARAMS_TX2LN1_PORT1) -#define CRI_USE_FS32 (1 << 5) - -#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C -#define MG_TX1_PISO_READLOAD(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ - MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ - MG_TX_PISO_READLOAD_TX1LN1_PORT1) - -#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC -#define MG_TX2_PISO_READLOAD(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ - MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ - MG_TX_PISO_READLOAD_TX2LN1_PORT1) -#define CRI_CALCINIT (1 << 1) - -#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 -#define MG_TX1_SWINGCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ - MG_TX_SWINGCTRL_TX1LN0_PORT2, \ - MG_TX_SWINGCTRL_TX1LN1_PORT1) - -#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 -#define MG_TX2_SWINGCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ - MG_TX_SWINGCTRL_TX2LN0_PORT2, \ - MG_TX_SWINGCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) -#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) - -#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544 -#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544 -#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 -#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 -#define MG_TX1_DRVCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ - MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ - MG_TX_DRVCTRL_TX1LN1_TXPORT1) - -#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 -#define MG_TX2_DRVCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ - MG_TX_DRVCTRL_TX2LN0_PORT2, \ - MG_TX_DRVCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) -#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) -#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) -#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) -#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) -#define CRI_LOADGEN_SEL(x) ((x) << 12) -#define CRI_LOADGEN_SEL_MASK (0x3 << 12) - -#define MG_CLKHUB_LN0_PORT1 0x16839C -#define MG_CLKHUB_LN1_PORT1 0x16879C -#define MG_CLKHUB_LN0_PORT2 0x16939C -#define MG_CLKHUB_LN1_PORT2 0x16979C -#define MG_CLKHUB_LN0_PORT3 0x16A39C -#define MG_CLKHUB_LN1_PORT3 0x16A79C -#define MG_CLKHUB_LN0_PORT4 0x16B39C -#define MG_CLKHUB_LN1_PORT4 0x16B79C -#define MG_CLKHUB(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \ - MG_CLKHUB_LN0_PORT2, \ - MG_CLKHUB_LN1_PORT1) -#define CFG_LOW_RATE_LKREN_EN (1 << 11) - -#define MG_TX_DCC_TX1LN0_PORT1 0x168110 -#define MG_TX_DCC_TX1LN1_PORT1 0x168510 -#define MG_TX_DCC_TX1LN0_PORT2 0x169110 -#define MG_TX_DCC_TX1LN1_PORT2 0x169510 -#define MG_TX_DCC_TX1LN0_PORT3 0x16A110 -#define MG_TX_DCC_TX1LN1_PORT3 0x16A510 -#define MG_TX_DCC_TX1LN0_PORT4 0x16B110 -#define MG_TX_DCC_TX1LN1_PORT4 0x16B510 -#define MG_TX1_DCC(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \ - MG_TX_DCC_TX1LN0_PORT2, \ - MG_TX_DCC_TX1LN1_PORT1) -#define MG_TX_DCC_TX2LN0_PORT1 0x168090 -#define MG_TX_DCC_TX2LN1_PORT1 0x168490 -#define MG_TX_DCC_TX2LN0_PORT2 0x169090 -#define MG_TX_DCC_TX2LN1_PORT2 0x169490 -#define MG_TX_DCC_TX2LN0_PORT3 0x16A090 -#define MG_TX_DCC_TX2LN1_PORT3 0x16A490 -#define MG_TX_DCC_TX2LN0_PORT4 0x16B090 -#define MG_TX_DCC_TX2LN1_PORT4 0x16B490 -#define MG_TX2_DCC(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \ - MG_TX_DCC_TX2LN0_PORT2, \ - MG_TX_DCC_TX2LN1_PORT1) -#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) -#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) -#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) - -#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0 -#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0 -#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0 -#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0 -#define MG_DP_MODE_LN0_ACU_PORT3 0x16A3A0 -#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 -#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 -#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 -#define MG_DP_MODE(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \ - MG_DP_MODE_LN0_ACU_PORT2, \ - MG_DP_MODE_LN1_ACU_PORT1) -#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) -#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) - -/* - * DG2 SNPS PHY registers (TC1 = PHY_E) - */ -#define _SNPS_PHY_A_BASE 0x168000 -#define _SNPS_PHY_B_BASE 0x169000 -#define _SNPS_PHY(phy) _PHY(phy, \ - _SNPS_PHY_A_BASE, \ - _SNPS_PHY_B_BASE) -#define _SNPS2(phy, reg) (_SNPS_PHY(phy) - \ - _SNPS_PHY_A_BASE + (reg)) -#define _MMIO_SNPS(phy, reg) _MMIO(_SNPS2(phy, reg)) -#define _MMIO_SNPS_LN(ln, phy, reg) _MMIO(_SNPS2(phy, \ - (reg) + (ln) * 0x10)) - -#define SNPS_PHY_MPLLB_CP(phy) _MMIO_SNPS(phy, 0x168000) -#define SNPS_PHY_MPLLB_CP_INT REG_GENMASK(31, 25) -#define SNPS_PHY_MPLLB_CP_INT_GS REG_GENMASK(23, 17) -#define SNPS_PHY_MPLLB_CP_PROP REG_GENMASK(15, 9) -#define SNPS_PHY_MPLLB_CP_PROP_GS REG_GENMASK(7, 1) - -#define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004) -#define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31) -#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30) -#define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29) -#define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26) -#define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24) -#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16) -#define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10) -#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9) -#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8) -#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5) -#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0) - -#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008) -#define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31) -#define SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN REG_BIT(30) -#define SNPS_PHY_MPLLB_FRACN_DEN REG_GENMASK(15, 0) - -#define SNPS_PHY_MPLLB_FRACN2(phy) _MMIO_SNPS(phy, 0x16800C) -#define SNPS_PHY_MPLLB_FRACN_REM REG_GENMASK(31, 16) -#define SNPS_PHY_MPLLB_FRACN_QUOT REG_GENMASK(15, 0) - -#define SNPS_PHY_MPLLB_SSCEN(phy) _MMIO_SNPS(phy, 0x168014) -#define SNPS_PHY_MPLLB_SSC_EN REG_BIT(31) -#define SNPS_PHY_MPLLB_SSC_UP_SPREAD REG_BIT(30) -#define SNPS_PHY_MPLLB_SSC_PEAK REG_GENMASK(29, 10) - -#define SNPS_PHY_MPLLB_SSCSTEP(phy) _MMIO_SNPS(phy, 0x168018) -#define SNPS_PHY_MPLLB_SSC_STEPSIZE REG_GENMASK(31, 11) - -#define SNPS_PHY_MPLLB_DIV2(phy) _MMIO_SNPS(phy, 0x16801C) -#define SNPS_PHY_MPLLB_HDMI_PIXEL_CLK_DIV REG_GENMASK(19, 18) -#define SNPS_PHY_MPLLB_HDMI_DIV REG_GENMASK(17, 15) -#define SNPS_PHY_MPLLB_REF_CLK_DIV REG_GENMASK(14, 12) -#define SNPS_PHY_MPLLB_MULTIPLIER REG_GENMASK(11, 0) - -#define SNPS_PHY_REF_CONTROL(phy) _MMIO_SNPS(phy, 0x168188) -#define SNPS_PHY_REF_CONTROL_REF_RANGE REG_GENMASK(31, 27) - -#define SNPS_PHY_TX_REQ(phy) _MMIO_SNPS(phy, 0x168200) -#define SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR REG_GENMASK(31, 30) - -#define SNPS_PHY_TX_EQ(ln, phy) _MMIO_SNPS_LN(ln, phy, 0x168300) -#define SNPS_PHY_TX_EQ_MAIN REG_GENMASK(23, 18) -#define SNPS_PHY_TX_EQ_POST REG_GENMASK(15, 10) -#define SNPS_PHY_TX_EQ_PRE REG_GENMASK(7, 2) - /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. */ @@ -2313,21 +747,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) -#define FIA1_BASE 0x163000 -#define FIA2_BASE 0x16E000 -#define FIA3_BASE 0x16F000 -#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) -#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) - -/* ICL PHY DFLEX registers */ -#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) -#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx))) - /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C #define _PORT_REF_DW3_BC 0x6C18C @@ -2553,64 +972,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define XEHP_VEBOX3_RING_BASE 0x1e8000 #define XEHP_VEBOX4_RING_BASE 0x1f8000 #define BLT_RING_BASE 0x22000 -#define RING_TAIL(base) _MMIO((base) + 0x30) -#define RING_HEAD(base) _MMIO((base) + 0x34) -#define RING_START(base) _MMIO((base) + 0x38) -#define RING_CTL(base) _MMIO((base) + 0x3c) -#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ -#define RING_SYNC_0(base) _MMIO((base) + 0x40) -#define RING_SYNC_1(base) _MMIO((base) + 0x44) -#define RING_SYNC_2(base) _MMIO((base) + 0x48) -#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) -#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) -#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) -#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) -#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) -#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE)) -#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) -#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) -#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE)) -#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) -#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) -#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) -#define GEN6_NOSYNC INVALID_MMIO_REG -#define RING_PSMI_CTL(base) _MMIO((base) + 0x50) -#define RING_MAX_IDLE(base) _MMIO((base) + 0x54) -#define RING_HWS_PGA(base) _MMIO((base) + 0x80) -#define RING_ID(base) _MMIO((base) + 0x8c) -#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080) - -#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4) -/* - * CMD_CCTL read/write fields take a MOCS value and _not_ a table index. - * The lsb of each can be considered a separate enabling bit for encryption. - * 6:0 == default MOCS value for reads => 6:1 == table index for reads. - * 13:7 == default MOCS value for writes => 13:8 == table index for writes. - * 15:14 == Reserved => 31:30 are set to 0. - */ -#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7) -#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0) -#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \ - CMD_CCTL_READ_OVERRIDE_MASK) -#define CMD_CCTL_MOCS_OVERRIDE(write, read) \ - (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \ - REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1)) - -#define BLIT_CCTL(base) _MMIO((base) + 0x204) -#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8) -#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0) -#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \ - BLIT_CCTL_SRC_MOCS_MASK) -#define BLIT_CCTL_MOCS(dst, src) \ - (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \ - REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1)) - -#define RING_RESET_CTL(base) _MMIO((base) + 0xd0) -#define RESET_CTL_CAT_ERROR REG_BIT(2) -#define RESET_CTL_READY_TO_RESET REG_BIT(1) -#define RESET_CTL_REQUEST_RESET REG_BIT(0) - -#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c) + + #define HSW_GTT_CACHE_EN _MMIO(0x4024) #define GTT_CACHE_EN_ALL 0xF0007FFF @@ -2627,190 +990,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070) #define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074) -#define GAMTARBMODE _MMIO(0x04a08) -#define ARB_MODE_BWGTLB_DISABLE (1 << 9) -#define ARB_MODE_SWIZZLE_BDW (1 << 1) -#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080) - -#define _RING_FAULT_REG_RCS 0x4094 -#define _RING_FAULT_REG_VCS 0x4194 -#define _RING_FAULT_REG_BCS 0x4294 -#define _RING_FAULT_REG_VECS 0x4394 -#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \ - _RING_FAULT_REG_RCS, \ - _RING_FAULT_REG_VCS, \ - _RING_FAULT_REG_VECS, \ - _RING_FAULT_REG_BCS)) -#define GEN8_RING_FAULT_REG _MMIO(0x4094) -#define GEN12_RING_FAULT_REG _MMIO(0xcec4) -#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7) -#define RING_FAULT_GTTSEL_MASK (1 << 11) -#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) -#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) -#define RING_FAULT_VALID (1 << 0) -#define DONE_REG _MMIO(0x40b0) -#define GEN12_GAM_DONE _MMIO(0xcf68) -#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) -#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) -#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) -#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4) -#define BSD_HWS_PGA_GEN7 _MMIO(0x04180) -#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208) -#define GEN12_VD0_AUX_NV _MMIO(0x4218) -#define GEN12_VD1_AUX_NV _MMIO(0x4228) -#define GEN12_VD2_AUX_NV _MMIO(0x4298) -#define GEN12_VD3_AUX_NV _MMIO(0x42A8) -#define GEN12_VE0_AUX_NV _MMIO(0x4238) -#define GEN12_VE1_AUX_NV _MMIO(0x42B8) -#define AUX_INV REG_BIT(0) -#define BLT_HWS_PGA_GEN7 _MMIO(0x04280) -#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380) -#define RING_ACTHD(base) _MMIO((base) + 0x74) -#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c) -#define RING_NOPID(base) _MMIO((base) + 0x94) -#define RING_IMR(base) _MMIO((base) + 0xa8) -#define RING_HWSTAM(base) _MMIO((base) + 0x98) -#define RING_TIMESTAMP(base) _MMIO((base) + 0x358) -#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4) -#define TAIL_ADDR 0x001FFFF8 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 -#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ -#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ -#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ - -#define MISC_STATUS0 _MMIO(0xA500) -#define MISC_STATUS1 _MMIO(0xA504) - -/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ -#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) -#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) - -#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) -#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2) -#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ -#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) -#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) -#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28) -#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28) -#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */ -#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0) -#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0) -#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0) -#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0) -#define RING_FORCE_TO_NONPRIV_MASK_VALID \ - (RING_FORCE_TO_NONPRIV_RANGE_MASK \ - | RING_FORCE_TO_NONPRIV_ACCESS_MASK) -#define RING_MAX_NONPRIV_SLOTS 12 - -#define GEN7_TLB_RD_ADDR _MMIO(0x4700) - -#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0) -#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18) - -#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) -#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF -#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7) - -#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) -#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) -#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) -#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) - -#define GEN8_RTCR _MMIO(0x4260) -#define GEN8_M1TCR _MMIO(0x4264) -#define GEN8_M2TCR _MMIO(0x4268) -#define GEN8_BTCR _MMIO(0x426c) -#define GEN8_VTCR _MMIO(0x4270) - -#if 0 -#define PRB0_TAIL _MMIO(0x2030) -#define PRB0_HEAD _MMIO(0x2034) -#define PRB0_START _MMIO(0x2038) -#define PRB0_CTL _MMIO(0x203c) -#define PRB1_TAIL _MMIO(0x2040) /* 915+ only */ -#define PRB1_HEAD _MMIO(0x2044) /* 915+ only */ -#define PRB1_START _MMIO(0x2048) /* 915+ only */ -#define PRB1_CTL _MMIO(0x204c) /* 915+ only */ -#endif -#define IPEIR_I965 _MMIO(0x2064) -#define IPEHR_I965 _MMIO(0x2068) -#define GEN7_SC_INSTDONE _MMIO(0x7100) -#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104) -#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108) -#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160) -#define GEN7_ROW_INSTDONE _MMIO(0xe164) -#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c) -#define MCFG_MCR_SELECTOR _MMIO(0xfd0) -#define SF_MCR_SELECTOR _MMIO(0xfd8) -#define GEN8_MCR_SELECTOR _MMIO(0xfdc) -#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26) -#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3) -#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24) -#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3) -#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27) -#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf) -#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24) -#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7) -#define RING_IPEIR(base) _MMIO((base) + 0x64) -#define RING_IPEHR(base) _MMIO((base) + 0x68) -#define RING_EIR(base) _MMIO((base) + 0xb0) -#define RING_EMR(base) _MMIO((base) + 0xb4) -#define RING_ESR(base) _MMIO((base) + 0xb8) -/* - * On GEN4, only the render ring INSTDONE exists and has a different - * layout than the GEN7+ version. - * The GEN2 counterpart of this register is GEN2_INSTDONE. - */ -#define RING_INSTDONE(base) _MMIO((base) + 0x6c) -#define RING_INSTPS(base) _MMIO((base) + 0x70) -#define RING_DMA_FADD(base) _MMIO((base) + 0x78) -#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */ -#define RING_INSTPM(base) _MMIO((base) + 0xc0) -#define RING_MI_MODE(base) _MMIO((base) + 0x9c) -#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84) -#define INSTPS _MMIO(0x2070) /* 965+ only */ -#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */ -#define ACTHD_I965 _MMIO(0x2074) -#define HWS_PGA _MMIO(0x2080) -#define HWS_ADDRESS_MASK 0xfffff000 -#define HWS_START_ADDRESS_SHIFT 4 -#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */ -#define PWRCTX_EN (1 << 0) -#define IPEIR(base) _MMIO((base) + 0x88) -#define IPEHR(base) _MMIO((base) + 0x8c) -#define GEN2_INSTDONE _MMIO(0x2090) -#define NOPID _MMIO(0x2094) -#define HWSTAM _MMIO(0x2098) -#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0) -#define RING_BBSTATE(base) _MMIO((base) + 0x110) -#define RING_BB_PPGTT (1 << 5) -#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */ -#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ -#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ -#define RING_BBADDR(base) _MMIO((base) + 0x140) -#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ -#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */ -#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */ -#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */ -#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */ - -#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10) -#define IECPUNIT_CLKGATE_DIS REG_BIT(22) - -#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18) -#define ALNUNIT_CLKGATE_DIS REG_BIT(13) - -#define ERROR_GEN6 _MMIO(0x40a0) #define GEN7_ERR_INT _MMIO(0x44040) #define ERR_INT_POISON (1 << 31) #define ERR_INT_MMIO_UNCLAIMED (1 << 13) @@ -2823,20 +1002,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ERR_INT_FIFO_UNDERRUN_A (1 << 0) #define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3)) -#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10) -#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14) -#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8) -#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc) -#define FAULT_VA_HIGH_BITS (0xf << 0) -#define FAULT_GTT_SEL (1 << 4) - -#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8) -#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc) -#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0) -#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4) - -#define GEN12_AUX_ERR_DBG _MMIO(0x43f4) - #define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM REG_BIT(31) @@ -2864,95 +1029,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define DERRMR_PIPEC_VBLANK (1 << 21) #define DERRMR_PIPEC_HBLANK (1 << 22) - -/* GM45+ chicken bits -- debug workaround bits that may be required - * for various sorts of correct behavior. The top 16 bits of each are - * the enables for writing to the corresponding low bit. - */ -#define _3D_CHICKEN _MMIO(0x2084) -#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) -#define _3D_CHICKEN2 _MMIO(0x208c) - -#define FF_SLICE_CHICKEN _MMIO(0x2088) -#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1) - -/* Disables pipelining of read flushes past the SF-WIZ interface. - * Required on all Ironlake steppings according to the B-Spec, but the - * particular danger of not doing so is not specified. - */ -# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) -#define _3D_CHICKEN3 _MMIO(0x2090) -#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12) -#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) -#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) -#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) -#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */ -#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ - -#define MI_MODE _MMIO(0x209c) -# define VS_TIMER_DISPATCH (1 << 6) -# define MI_FLUSH_ENABLE (1 << 12) -# define TGL_NESTED_BB_EN (1 << 12) -# define ASYNC_FLIP_PERF_DISABLE (1 << 14) -# define MODE_IDLE (1 << 9) -# define STOP_RING (1 << 8) - -#define GEN6_GT_MODE _MMIO(0x20d0) -#define GEN7_GT_MODE _MMIO(0x7008) -#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7)) -#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) -#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) -#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) -#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) -#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) -#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) -#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) - -/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ -#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) -#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) -#define GEN11_ENABLE_32_PLANE_MODE (1 << 7) - -#define SCCGCTL94DC _MMIO(0x94dc) -#define CG3DDISURB REG_BIT(14) - -#define MLTICTXCTL _MMIO(0xb170) -#define TDONRENDER REG_BIT(2) - -#define L3SQCREG1_CCS0 _MMIO(0xb200) -#define FLUSHALLNONCOH REG_BIT(5) - -/* WaClearTdlStateAckDirtyBits */ -#define GEN8_STATE_ACK _MMIO(0x20F0) -#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) -#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100) -#define GEN9_STATE_ACK_TDL0 (1 << 12) -#define GEN9_STATE_ACK_TDL1 (1 << 13) -#define GEN9_STATE_ACK_TDL2 (1 << 14) -#define GEN9_STATE_ACK_TDL3 (1 << 15) -#define GEN9_SUBSLICE_TDL_ACK_BITS \ - (GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \ - GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0) - -#define GFX_MODE _MMIO(0x2520) -#define GFX_MODE_GEN7 _MMIO(0x229c) -#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c) -#define GFX_RUN_LIST_ENABLE (1 << 15) -#define GFX_INTERRUPT_STEERING (1 << 14) -#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13) -#define GFX_SURFACE_FAULT_ENABLE (1 << 12) -#define GFX_REPLAY_MODE (1 << 11) -#define GFX_PSMI_GRANULARITY (1 << 10) -#define GFX_PPGTT_ENABLE (1 << 9) -#define GEN8_GFX_PPGTT_48B (1 << 7) - -#define GFX_FORWARD_VBLANK_MASK (3 << 5) -#define GFX_FORWARD_VBLANK_NEVER (0 << 5) -#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5) -#define GFX_FORWARD_VBLANK_COND (2 << 5) - -#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3) - #define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) #define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) #define SCPD0 _MMIO(0x209c) /* 915+ only */ @@ -2992,7 +1068,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */ #define INSTPM_TLB_INVALIDATE (1 << 9) #define INSTPM_SYNC_FLUSH (1 << 5) -#define ACTHD(base) _MMIO((base) + 0xc8) #define MEM_MODE _MMIO(0x20cc) #define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */ #define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */ @@ -3119,132 +1194,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MI_AGPBUSY_INT_EN (1 << 1) /* 85x only */ #define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */ -#define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */ -#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8) -#define CM0_IZ_OPT_DISABLE (1 << 6) -#define CM0_ZR_OPT_DISABLE (1 << 5) -#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5) -#define CM0_DEPTH_EVICT_DISABLE (1 << 4) -#define CM0_COLOR_EVICT_DISABLE (1 << 3) -#define CM0_DEPTH_WRITE_DISABLE (1 << 1) -#define CM0_RC_OP_FLUSH_DISABLE (1 << 0) -#define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */ -#define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008) -#define GFX_FLSH_CNTL_EN (1 << 0) -#define ECOSKPD _MMIO(0x21d0) -#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4) -#define ECO_GATING_CX_ONLY (1 << 3) -#define ECO_FLIP_DONE (1 << 0) - -#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ -#define RC_OP_FLUSH_ENABLE (1 << 0) -#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2) -#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ -#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6) -#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6) -#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1) - -#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0) -#define GEN6_BLITTER_LOCK_SHIFT 16 -#define GEN6_BLITTER_FBC_NOTIFY (1 << 3) - -#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050) -#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) -#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) -#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) -#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10) - -#define GEN6_RCS_PWR_FSM _MMIO(0x22ac) -#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) - -#define GEN10_CACHE_MODE_SS _MMIO(0xe420) -#define ENABLE_PREFETCH_INTO_IC REG_BIT(3) -#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4) - -/* Fuse readout registers for GT */ -#define HSW_PAVP_FUSE1 _MMIO(0x911C) -#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24) -#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16) -#define HSW_F1_EU_DIS_10EUS 0 -#define HSW_F1_EU_DIS_8EUS 1 -#define HSW_F1_EU_DIS_6EUS 2 - -#define CHV_FUSE_GT _MMIO(VLV_DISPLAY_BASE + 0x2168) -#define CHV_FGT_DISABLE_SS0 (1 << 10) -#define CHV_FGT_DISABLE_SS1 (1 << 11) -#define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 -#define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT) -#define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20 -#define CHV_FGT_EU_DIS_SS0_R1_MASK (0xf << CHV_FGT_EU_DIS_SS0_R1_SHIFT) -#define CHV_FGT_EU_DIS_SS1_R0_SHIFT 24 -#define CHV_FGT_EU_DIS_SS1_R0_MASK (0xf << CHV_FGT_EU_DIS_SS1_R0_SHIFT) -#define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 -#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) - -#define GEN8_FUSE2 _MMIO(0x9120) -#define GEN8_F2_SS_DIS_SHIFT 21 -#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT) -#define GEN8_F2_S_ENA_SHIFT 25 -#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT) - -#define GEN9_F2_SS_DIS_SHIFT 20 -#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) - -#define GEN10_F2_S_ENA_SHIFT 22 -#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT) -#define GEN10_F2_SS_DIS_SHIFT 18 -#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT) - -#define GEN10_MIRROR_FUSE3 _MMIO(0x9118) -#define GEN10_L3BANK_PAIR_COUNT 4 -#define GEN10_L3BANK_MASK 0x0F -/* on Xe_HP the same fuses indicates mslices instead of L3 banks */ -#define GEN12_MAX_MSLICES 4 -#define GEN12_MEML3_EN_MASK 0x0F - -#define GEN8_EU_DISABLE0 _MMIO(0x9134) -#define GEN8_EU_DIS0_S0_MASK 0xffffff -#define GEN8_EU_DIS0_S1_SHIFT 24 -#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT) - -#define GEN8_EU_DISABLE1 _MMIO(0x9138) -#define GEN8_EU_DIS1_S1_MASK 0xffff -#define GEN8_EU_DIS1_S2_SHIFT 16 -#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT) - -#define GEN8_EU_DISABLE2 _MMIO(0x913c) -#define GEN8_EU_DIS2_S2_MASK 0xff - -#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4) - -#define GEN10_EU_DISABLE3 _MMIO(0x9140) -#define GEN10_EU_DIS_SS_MASK 0xff - -#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) -#define GEN11_GT_VDBOX_DISABLE_MASK 0xff -#define GEN11_GT_VEBOX_DISABLE_SHIFT 16 -#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT) - -#define GEN11_EU_DISABLE _MMIO(0x9134) -#define GEN11_EU_DIS_MASK 0xFF - -#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138) -#define GEN11_GT_S_ENA_MASK 0xFF - -#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C) - -#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913C) -#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144) - -#define XEHP_EU_ENABLE _MMIO(0x9134) -#define XEHP_EU_ENA_MASK 0xFF - -#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) -#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) -#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) -#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) -#define GEN6_BSD_GO_INDICATOR (1 << 4) - /* On modern GEN architectures interrupt control consists of two sets * of registers. The first set pertains to the ring generating the * interrupt. The second control is for the functional block generating the @@ -3402,10 +1351,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FBC_LL_SIZE (1536) /* Framebuffer compression for GM45+ */ -#define DPFC_CB_BASE _MMIO(0x3200) -#define ILK_DPFC_CB_BASE _MMIO(0x43200) -#define DPFC_CONTROL _MMIO(0x3208) -#define ILK_DPFC_CONTROL _MMIO(0x43208) +#define DPFC_CB_BASE _MMIO(0x3200) +#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240) +#define DPFC_CONTROL _MMIO(0x3208) +#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248) #define DPFC_CTL_EN REG_BIT(31) #define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ #define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) @@ -3423,28 +1372,28 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) #define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) #define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) -#define DPFC_RECOMP_CTL _MMIO(0x320c) -#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) +#define DPFC_RECOMP_CTL _MMIO(0x320c) +#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c) #define DPFC_RECOMP_STALL_EN REG_BIT(27) #define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) #define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) -#define DPFC_STATUS _MMIO(0x3210) -#define ILK_DPFC_STATUS _MMIO(0x43210) +#define DPFC_STATUS _MMIO(0x3210) +#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250) #define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) #define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) -#define DPFC_STATUS2 _MMIO(0x3214) -#define ILK_DPFC_STATUS2 _MMIO(0x43214) +#define DPFC_STATUS2 _MMIO(0x3214) +#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254) #define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) -#define DPFC_FENCE_YOFF _MMIO(0x3218) -#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) -#define DPFC_CHICKEN _MMIO(0x3224) -#define ILK_DPFC_CHICKEN _MMIO(0x43224) +#define DPFC_FENCE_YOFF _MMIO(0x3218) +#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258) +#define DPFC_CHICKEN _MMIO(0x3224) +#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264) #define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ #define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ #define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ #define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ -#define GLK_FBC_STRIDE _MMIO(0x43228) +#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268) #define FBC_STRIDE_OVERRIDE REG_BIT(15) #define FBC_STRIDE_MASK REG_GENMASK(14, 0) #define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) @@ -3487,9 +1436,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IPS_CTL _MMIO(0x43408) #define IPS_ENABLE (1 << 31) -#define MSG_FBC_REND_STATE _MMIO(0x50380) +#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384) #define FBC_REND_NUKE REG_BIT(2) -#define FBC_REND_CACHE_CLEAN REG_BIT(1) +#define FBC_REND_CACHE_CLEAN REG_BIT(1) /* * GPIO regs @@ -3878,413 +1827,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) _PALETTE_B, _CHV_PALETTE_C) + \ (i) * 4) -/* MCH MMIO space */ - -/* - * MCHBAR mirror. - * - * This mirrors the MCHBAR MMIO space whose location is determined by - * device 0 function 0's pci config register 0x44 or 0x48 and matches it in - * every way. It is not accessible from the CP register read instructions. - * - * Starting from Haswell, you can't write registers using the MCHBAR mirror, - * just read. - */ -#define MCHBAR_MIRROR_BASE 0x10000 - -#define MCHBAR_MIRROR_BASE_SNB 0x140000 - -#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34) -#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48) -#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16) -#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4) -#define G4X_STOLEN_RESERVED_ENABLE (1 << 0) - -/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ -#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04) - -/* 915-945 and GM965 MCH register controlling DRAM channel access */ -#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200) -#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) -#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) -#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) -#define DCC_ADDRESSING_MODE_MASK (3 << 0) -#define DCC_CHANNEL_XOR_DISABLE (1 << 10) -#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) -#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204) -#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20) - -/* Pineview MCH register contains DDR3 setting */ -#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8) -#define CSHRDDR3CTL_DDR3 (1 << 2) - -/* 965 MCH register controlling DRAM channel configuration */ -#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206) -#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606) - -/* snb MCH registers for reading the DRAM channel configuration */ -#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004) -#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008) -#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C) -#define MAD_DIMM_ECC_MASK (0x3 << 24) -#define MAD_DIMM_ECC_OFF (0x0 << 24) -#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) -#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) -#define MAD_DIMM_ECC_ON (0x3 << 24) -#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) -#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) -#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ -#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ -#define MAD_DIMM_B_DUAL_RANK (0x1 << 18) -#define MAD_DIMM_A_DUAL_RANK (0x1 << 17) -#define MAD_DIMM_A_SELECT (0x1 << 16) -/* DIMM sizes are in multiples of 256mb. */ -#define MAD_DIMM_B_SIZE_SHIFT 8 -#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) -#define MAD_DIMM_A_SIZE_SHIFT 0 -#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) - -/* snb MCH registers for priority tuning */ -#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10) -#define MCH_SSKPD_WM0_MASK 0x3f -#define MCH_SSKPD_WM0_VAL 0xc - -/* Clocking configuration register */ -#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00) -#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */ -#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */ -#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ -#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ -#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ -#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ -#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */ -#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ -#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */ -#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */ -#define CLKCFG_FSB_MASK (7 << 0) -#define CLKCFG_MEM_533 (1 << 4) -#define CLKCFG_MEM_667 (2 << 4) -#define CLKCFG_MEM_800 (3 << 4) -#define CLKCFG_MEM_MASK (7 << 4) - -#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38) -#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f) - -#define TSC1 _MMIO(0x11001) -#define TSE (1 << 0) -#define TR1 _MMIO(0x11006) -#define TSFS _MMIO(0x11020) -#define TSFS_SLOPE_MASK 0x0000ff00 -#define TSFS_SLOPE_SHIFT 8 -#define TSFS_INTR_MASK 0x000000ff - -#define CRSTANDVID _MMIO(0x11100) -#define PXVFREQ(fstart) _MMIO(0x11110 + (fstart) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ -#define PXVFREQ_PX_MASK 0x7f000000 -#define PXVFREQ_PX_SHIFT 24 -#define VIDFREQ_BASE _MMIO(0x11110) -#define VIDFREQ1 _MMIO(0x11110) /* VIDFREQ1-4 (0x1111c) (Cantiga) */ -#define VIDFREQ2 _MMIO(0x11114) -#define VIDFREQ3 _MMIO(0x11118) -#define VIDFREQ4 _MMIO(0x1111c) -#define VIDFREQ_P0_MASK 0x1f000000 -#define VIDFREQ_P0_SHIFT 24 -#define VIDFREQ_P0_CSCLK_MASK 0x00f00000 -#define VIDFREQ_P0_CSCLK_SHIFT 20 -#define VIDFREQ_P0_CRCLK_MASK 0x000f0000 -#define VIDFREQ_P0_CRCLK_SHIFT 16 -#define VIDFREQ_P1_MASK 0x00001f00 -#define VIDFREQ_P1_SHIFT 8 -#define VIDFREQ_P1_CSCLK_MASK 0x000000f0 -#define VIDFREQ_P1_CSCLK_SHIFT 4 -#define VIDFREQ_P1_CRCLK_MASK 0x0000000f -#define INTTOEXT_BASE_ILK _MMIO(0x11300) -#define INTTOEXT_BASE _MMIO(0x11120) /* INTTOEXT1-8 (0x1113c) */ -#define INTTOEXT_MAP3_SHIFT 24 -#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) -#define INTTOEXT_MAP2_SHIFT 16 -#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) -#define INTTOEXT_MAP1_SHIFT 8 -#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) -#define INTTOEXT_MAP0_SHIFT 0 -#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) -#define MEMSWCTL _MMIO(0x11170) /* Ironlake only */ -#define MEMCTL_CMD_MASK 0xe000 -#define MEMCTL_CMD_SHIFT 13 -#define MEMCTL_CMD_RCLK_OFF 0 -#define MEMCTL_CMD_RCLK_ON 1 -#define MEMCTL_CMD_CHFREQ 2 -#define MEMCTL_CMD_CHVID 3 -#define MEMCTL_CMD_VMMOFF 4 -#define MEMCTL_CMD_VMMON 5 -#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears - when command complete */ -#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ -#define MEMCTL_FREQ_SHIFT 8 -#define MEMCTL_SFCAVM (1 << 7) -#define MEMCTL_TGT_VID_MASK 0x007f -#define MEMIHYST _MMIO(0x1117c) -#define MEMINTREN _MMIO(0x11180) /* 16 bits */ -#define MEMINT_RSEXIT_EN (1 << 8) -#define MEMINT_CX_SUPR_EN (1 << 7) -#define MEMINT_CONT_BUSY_EN (1 << 6) -#define MEMINT_AVG_BUSY_EN (1 << 5) -#define MEMINT_EVAL_CHG_EN (1 << 4) -#define MEMINT_MON_IDLE_EN (1 << 3) -#define MEMINT_UP_EVAL_EN (1 << 2) -#define MEMINT_DOWN_EVAL_EN (1 << 1) -#define MEMINT_SW_CMD_EN (1 << 0) -#define MEMINTRSTR _MMIO(0x11182) /* 16 bits */ -#define MEM_RSEXIT_MASK 0xc000 -#define MEM_RSEXIT_SHIFT 14 -#define MEM_CONT_BUSY_MASK 0x3000 -#define MEM_CONT_BUSY_SHIFT 12 -#define MEM_AVG_BUSY_MASK 0x0c00 -#define MEM_AVG_BUSY_SHIFT 10 -#define MEM_EVAL_CHG_MASK 0x0300 -#define MEM_EVAL_BUSY_SHIFT 8 -#define MEM_MON_IDLE_MASK 0x00c0 -#define MEM_MON_IDLE_SHIFT 6 -#define MEM_UP_EVAL_MASK 0x0030 -#define MEM_UP_EVAL_SHIFT 4 -#define MEM_DOWN_EVAL_MASK 0x000c -#define MEM_DOWN_EVAL_SHIFT 2 -#define MEM_SW_CMD_MASK 0x0003 -#define MEM_INT_STEER_GFX 0 -#define MEM_INT_STEER_CMR 1 -#define MEM_INT_STEER_SMI 2 -#define MEM_INT_STEER_SCI 3 -#define MEMINTRSTS _MMIO(0x11184) -#define MEMINT_RSEXIT (1 << 7) -#define MEMINT_CONT_BUSY (1 << 6) -#define MEMINT_AVG_BUSY (1 << 5) -#define MEMINT_EVAL_CHG (1 << 4) -#define MEMINT_MON_IDLE (1 << 3) -#define MEMINT_UP_EVAL (1 << 2) -#define MEMINT_DOWN_EVAL (1 << 1) -#define MEMINT_SW_CMD (1 << 0) -#define MEMMODECTL _MMIO(0x11190) -#define MEMMODE_BOOST_EN (1 << 31) -#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ -#define MEMMODE_BOOST_FREQ_SHIFT 24 -#define MEMMODE_IDLE_MODE_MASK 0x00030000 -#define MEMMODE_IDLE_MODE_SHIFT 16 -#define MEMMODE_IDLE_MODE_EVAL 0 -#define MEMMODE_IDLE_MODE_CONT 1 -#define MEMMODE_HWIDLE_EN (1 << 15) -#define MEMMODE_SWMODE_EN (1 << 14) -#define MEMMODE_RCLK_GATE (1 << 13) -#define MEMMODE_HW_UPDATE (1 << 12) -#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ -#define MEMMODE_FSTART_SHIFT 8 -#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ -#define MEMMODE_FMAX_SHIFT 4 -#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ -#define RCBMAXAVG _MMIO(0x1119c) -#define MEMSWCTL2 _MMIO(0x1119e) /* Cantiga only */ -#define SWMEMCMD_RENDER_OFF (0 << 13) -#define SWMEMCMD_RENDER_ON (1 << 13) -#define SWMEMCMD_SWFREQ (2 << 13) -#define SWMEMCMD_TARVID (3 << 13) -#define SWMEMCMD_VRM_OFF (4 << 13) -#define SWMEMCMD_VRM_ON (5 << 13) -#define CMDSTS (1 << 12) -#define SFCAVM (1 << 11) -#define SWFREQ_MASK 0x0380 /* P0-7 */ -#define SWFREQ_SHIFT 7 -#define TARVID_MASK 0x001f -#define MEMSTAT_CTG _MMIO(0x111a0) -#define RCBMINAVG _MMIO(0x111a0) -#define RCUPEI _MMIO(0x111b0) -#define RCDNEI _MMIO(0x111b4) -#define RSTDBYCTL _MMIO(0x111b8) -#define RS1EN (1 << 31) -#define RS2EN (1 << 30) -#define RS3EN (1 << 29) -#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */ -#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */ -#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */ -#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */ -#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */ -#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */ -#define RSX_STATUS_MASK (7 << 20) -#define RSX_STATUS_ON (0 << 20) -#define RSX_STATUS_RC1 (1 << 20) -#define RSX_STATUS_RC1E (2 << 20) -#define RSX_STATUS_RS1 (3 << 20) -#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */ -#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */ -#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */ -#define RSX_STATUS_RSVD2 (7 << 20) -#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */ -#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */ -#define JRSC (1 << 17) /* rsx coupled to cpu c-state */ -#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */ -#define RS1CONTSAV_MASK (3 << 14) -#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */ -#define RS1CONTSAV_RSVD (1 << 14) -#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */ -#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */ -#define NORMSLEXLAT_MASK (3 << 12) -#define SLOW_RS123 (0 << 12) -#define SLOW_RS23 (1 << 12) -#define SLOW_RS3 (2 << 12) -#define NORMAL_RS123 (3 << 12) -#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */ -#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ -#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */ -#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */ -#define RS_CSTATE_MASK (3 << 4) -#define RS_CSTATE_C367_RS1 (0 << 4) -#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4) -#define RS_CSTATE_RSVD (2 << 4) -#define RS_CSTATE_C367_RS2 (3 << 4) -#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */ -#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */ -#define VIDCTL _MMIO(0x111c0) -#define VIDSTS _MMIO(0x111c8) -#define VIDSTART _MMIO(0x111cc) /* 8 bits */ -#define MEMSTAT_ILK _MMIO(0x111f8) -#define MEMSTAT_VID_MASK 0x7f00 -#define MEMSTAT_VID_SHIFT 8 -#define MEMSTAT_PSTATE_MASK 0x00f8 -#define MEMSTAT_PSTATE_SHIFT 3 -#define MEMSTAT_MON_ACTV (1 << 2) -#define MEMSTAT_SRC_CTL_MASK 0x0003 -#define MEMSTAT_SRC_CTL_CORE 0 -#define MEMSTAT_SRC_CTL_TRB 1 -#define MEMSTAT_SRC_CTL_THM 2 -#define MEMSTAT_SRC_CTL_STDBY 3 -#define RCPREVBSYTUPAVG _MMIO(0x113b8) -#define RCPREVBSYTDNAVG _MMIO(0x113bc) -#define PMMISC _MMIO(0x11214) -#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */ -#define SDEW _MMIO(0x1124c) -#define CSIEW0 _MMIO(0x11250) -#define CSIEW1 _MMIO(0x11254) -#define CSIEW2 _MMIO(0x11258) -#define PEW(i) _MMIO(0x1125c + (i) * 4) /* 5 registers */ -#define DEW(i) _MMIO(0x11270 + (i) * 4) /* 3 registers */ -#define MCHAFE _MMIO(0x112c0) -#define CSIEC _MMIO(0x112e0) -#define DMIEC _MMIO(0x112e4) -#define DDREC _MMIO(0x112e8) -#define PEG0EC _MMIO(0x112ec) -#define PEG1EC _MMIO(0x112f0) -#define GFXEC _MMIO(0x112f4) -#define RPPREVBSYTUPAVG _MMIO(0x113b8) -#define RPPREVBSYTDNAVG _MMIO(0x113bc) -#define ECR _MMIO(0x11600) -#define ECR_GPFE (1 << 31) -#define ECR_IMONE (1 << 30) -#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ -#define OGW0 _MMIO(0x11608) -#define OGW1 _MMIO(0x1160c) -#define EG0 _MMIO(0x11610) -#define EG1 _MMIO(0x11614) -#define EG2 _MMIO(0x11618) -#define EG3 _MMIO(0x1161c) -#define EG4 _MMIO(0x11620) -#define EG5 _MMIO(0x11624) -#define EG6 _MMIO(0x11628) -#define EG7 _MMIO(0x1162c) -#define PXW(i) _MMIO(0x11664 + (i) * 4) /* 4 registers */ -#define PXWL(i) _MMIO(0x11680 + (i) * 8) /* 8 registers */ -#define LCFUSE02 _MMIO(0x116c0) -#define LCFUSE_HIV_MASK 0x000000ff -#define CSIPLL0 _MMIO(0x12c10) -#define DDRMPLL1 _MMIO(0X12c20) #define PEG_BAND_GAP_DATA _MMIO(0x14d68) -#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c) -#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 - -#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948) -#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070) -#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994) -#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) -#define RP0_CAP_MASK REG_GENMASK(7, 0) -#define RP1_CAP_MASK REG_GENMASK(15, 8) -#define RPN_CAP_MASK REG_GENMASK(23, 16) #define BXT_RP_STATE_CAP _MMIO(0x138170) #define GEN9_RP_STATE_LIMITS _MMIO(0x138148) #define XEHPSDV_RP_STATE_CAP _MMIO(0x250014) -/* - * Logical Context regs - */ -#define CCID(base) _MMIO((base) + 0x180) -#define CCID_EN BIT(0) -#define CCID_EXTENDED_STATE_RESTORE BIT(2) -#define CCID_EXTENDED_STATE_SAVE BIT(3) -/* - * Notes on SNB/IVB/VLV context size: - * - Power context is saved elsewhere (LLC or stolen) - * - Ring/execlist context is saved on SNB, not on IVB - * - Extended context size already includes render context size - * - We always need to follow the extended context size. - * SNB BSpec has comments indicating that we should use the - * render context size instead if execlists are disabled, but - * based on empirical testing that's just nonsense. - * - Pipelined/VF state is saved on SNB/IVB respectively - * - GT1 size just indicates how much of render context - * doesn't need saving on GT1 - */ -#define CXT_SIZE _MMIO(0x21a0) -#define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f) -#define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f) -#define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f) -#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f) -#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f) -#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ - GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ - GEN6_CXT_PIPELINE_SIZE(cxt_reg)) -#define GEN7_CXT_SIZE _MMIO(0x21a8) -#define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f) -#define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7) -#define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f) -#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f) -#define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7) -#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f) -#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ - GEN7_CXT_VFSTATE_SIZE(ctx_reg)) - -enum { - INTEL_ADVANCED_CONTEXT = 0, - INTEL_LEGACY_32B_CONTEXT, - INTEL_ADVANCED_AD_CONTEXT, - INTEL_LEGACY_64B_CONTEXT -}; - -enum { - FAULT_AND_HANG = 0, - FAULT_AND_HALT, /* Debug only */ - FAULT_AND_STREAM, - FAULT_AND_CONTINUE /* Unsupported */ -}; - -#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12) -#define GEN8_CTX_VALID (1 << 0) -#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1) -#define GEN8_CTX_FORCE_RESTORE (1 << 2) -#define GEN8_CTX_L3LLC_COHERENT (1 << 5) -#define GEN8_CTX_PRIVILEGE (1 << 8) -#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 - -#define GEN8_CTX_ID_SHIFT 32 -#define GEN8_CTX_ID_WIDTH 21 -#define GEN11_SW_CTX_ID_SHIFT 37 -#define GEN11_SW_CTX_ID_WIDTH 11 -#define GEN11_ENGINE_CLASS_SHIFT 61 -#define GEN11_ENGINE_CLASS_WIDTH 3 -#define GEN11_ENGINE_INSTANCE_SHIFT 48 -#define GEN11_ENGINE_INSTANCE_WIDTH 6 - -#define XEHP_SW_CTX_ID_SHIFT 39 -#define XEHP_SW_CTX_ID_WIDTH 16 -#define XEHP_SW_COUNTER_SHIFT 58 -#define XEHP_SW_COUNTER_WIDTH 6 - #define CHV_CLK_CTL1 _MMIO(0x101100) #define VLV_CLK_CTL2 _MMIO(0x101104) #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 @@ -4336,75 +1884,6 @@ enum { _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B) /* - * GEN10 clock gating regs - */ - -#define UNSLCGCTL9440 _MMIO(0x9440) -#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28) -#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27) -#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26) -#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24) -#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23) -#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22) -#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21) -#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17) -#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16) -#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15) -#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14) -#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6) - -#define UNSLCGCTL9444 _MMIO(0x9444) -#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30) -#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29) -#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28) -#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27) -#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26) -#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25) -#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24) -#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23) -#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22) -#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21) -#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20) -#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19) -#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18) -#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17) -#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) -#define LTCDD_CLKGATE_DIS REG_BIT(10) - -#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) -#define SARBUNIT_CLKGATE_DIS (1 << 5) -#define RCCUNIT_CLKGATE_DIS (1 << 7) -#define MSCUNIT_CLKGATE_DIS (1 << 10) -#define NODEDSS_CLKGATE_DIS REG_BIT(12) -#define L3_CLKGATE_DIS REG_BIT(16) -#define L3_CR2X_CLKGATE_DIS REG_BIT(17) - -#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524) -#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28) -#define GWUNIT_CLKGATE_DIS REG_BIT(16) - -#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528) -#define CPSSUNIT_CLKGATE_DIS REG_BIT(9) - -#define SSMCGCTL9530 _MMIO(0x9530) -#define RTFUNIT_CLKGATE_DIS REG_BIT(18) - -#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) -#define VFUNIT_CLKGATE_DIS REG_BIT(20) -#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */ -#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */ -#define GAMEDIA_CLKGATE_DIS REG_BIT(11) -#define HSUNIT_CLKGATE_DIS REG_BIT(8) -#define VSUNIT_CLKGATE_DIS REG_BIT(3) - -#define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) -#define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) -#define PSDUNIT_CLKGATE_DIS REG_BIT(5) - -#define INF_UNIT_LEVEL_CLKGATE _MMIO(0x9560) -#define CGPSF_CLKGATE_DIS (1 << 3) - -/* * Display engine regs */ @@ -4498,6 +1977,10 @@ enum { #define _VSYNC_A 0x60014 #define _EXITLINE_A 0x60018 #define _PIPEASRC 0x6001c +#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16) +#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w)) +#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0) +#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h)) #define _BCLRPAT_A 0x60020 #define _VSYNCSHIFT_A 0x60028 #define _PIPE_MULT_A 0x6002c @@ -6148,16 +3631,14 @@ enum { #define _PIPEB_DATA_M_G4X 0x71050 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ -#define TU_SIZE(x) (((x) - 1) << 25) /* default size 64 */ -#define TU_SIZE_SHIFT 25 -#define TU_SIZE_MASK (0x3f << 25) +#define TU_SIZE_MASK REG_GENMASK(30, 25) +#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */ -#define DATA_LINK_M_N_MASK (0xffffff) +#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0) #define DATA_LINK_N_MAX (0x800000) #define _PIPEA_DATA_N_G4X 0x70054 #define _PIPEB_DATA_N_G4X 0x71054 -#define PIPE_GMCH_DATA_N_MASK (0xffffff) /* * Computing Link M and N values for the Display Port link @@ -6172,11 +3653,8 @@ enum { #define _PIPEA_LINK_M_G4X 0x70060 #define _PIPEB_LINK_M_G4X 0x71060 -#define PIPEA_DP_LINK_M_MASK (0xffffff) - #define _PIPEA_LINK_N_G4X 0x70064 #define _PIPEB_LINK_N_G4X 0x71064 -#define PIPEA_DP_LINK_N_MASK (0xffffff) #define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) #define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) @@ -6187,65 +3665,61 @@ enum { /* Pipe A */ #define _PIPEADSL 0x70000 -#define DSL_LINEMASK_GEN2 0x00000fff -#define DSL_LINEMASK_GEN3 0x00001fff +#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ +#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) #define _PIPEACONF 0x70008 -#define PIPECONF_ENABLE (1 << 31) -#define PIPECONF_DISABLE 0 -#define PIPECONF_DOUBLE_WIDE (1 << 30) -#define I965_PIPECONF_ACTIVE (1 << 30) -#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */ -#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) /* pre-hsw */ -#define PIPECONF_FRAME_START_DELAY(x) ((x) << 27) /* pre-hsw: 0-3 */ -#define PIPECONF_SINGLE_WIDE 0 -#define PIPECONF_PIPE_UNLOCKED 0 -#define PIPECONF_PIPE_LOCKED (1 << 25) -#define PIPECONF_FORCE_BORDER (1 << 25) -#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */ -#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */ -#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */ -#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */ -#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */ -#define PIPECONF_GAMMA_MODE_SHIFT 24 -#define PIPECONF_INTERLACE_MASK (7 << 21) -#define PIPECONF_INTERLACE_MASK_HSW (3 << 21) -/* Note that pre-gen3 does not support interlaced display directly. Panel - * fitting must be disabled on pre-ilk for interlaced. */ -#define PIPECONF_PROGRESSIVE (0 << 21) -#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ -#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ -#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) -#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ -/* Ironlake and later have a complete new set of values for interlaced. PFIT - * means panel fitter required, PF means progressive fetch, DBL means power - * saving pixel doubling. */ -#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) -#define PIPECONF_INTERLACED_ILK (3 << 21) -#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ -#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ -#define PIPECONF_INTERLACE_MODE_MASK (7 << 21) -#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20) -#define PIPECONF_CXSR_DOWNCLOCK (1 << 16) -#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14) -#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) -#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */ -#define PIPECONF_BPC_MASK (0x7 << 5) -#define PIPECONF_8BPC (0 << 5) -#define PIPECONF_10BPC (1 << 5) -#define PIPECONF_6BPC (2 << 5) -#define PIPECONF_12BPC (3 << 5) -#define PIPECONF_DITHER_EN (1 << 4) -#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) -#define PIPECONF_DITHER_TYPE_SP (0 << 2) -#define PIPECONF_DITHER_TYPE_ST1 (1 << 2) -#define PIPECONF_DITHER_TYPE_ST2 (2 << 2) -#define PIPECONF_DITHER_TYPE_TEMP (3 << 2) +#define PIPECONF_ENABLE REG_BIT(31) +#define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ +#define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */ +#define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ +#define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ +#define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ +#define PIPECONF_PIPE_LOCKED REG_BIT(25) +#define PIPECONF_FORCE_BORDER REG_BIT(25) +#define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ +#define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0) +#define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1) +#define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ +#define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ +#define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ +#define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0) +#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */ +#define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */ +#define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6) +#define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */ +/* + * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display, + * DBL=power saving pixel doubling, PF-ID* requires panel fitter + */ +#define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ +#define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ +#define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0) +#define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1) +#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3) +#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ +#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ +#define PIPECONF_EDP_RR_MODE_SWITCH REG_BIT(20) +#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16) +#define PIPECONF_EDP_RR_MODE_SWITCH_VLV REG_BIT(14) +#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13) +#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ +#define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ +#define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0) +#define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1) +#define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2) +#define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3) +#define PIPECONF_DITHER_EN REG_BIT(4) +#define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) +#define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0) +#define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1) +#define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2) +#define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3) #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) @@ -6330,38 +3804,41 @@ enum { #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 -#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */ -#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */ -#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */ -#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11) -#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ +#define PIPEMISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ +#define PIPEMISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ +#define PIPEMISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ +#define PIPEMISC_OUTPUT_COLORSPACE_YUV REG_BIT(11) +#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ /* * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with * valid values of: 6, 8, 10 BPC. * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of: * 6, 8, 10, 12 BPC. */ -#define PIPEMISC_BPC_MASK (7 << 5) -#define PIPEMISC_8_BPC (0 << 5) -#define PIPEMISC_10_BPC (1 << 5) -#define PIPEMISC_6_BPC (2 << 5) -#define PIPEMISC_12_BPC_ADLP (4 << 5) /* adlp+ */ -#define PIPEMISC_DITHER_ENABLE (1 << 4) -#define PIPEMISC_DITHER_TYPE_MASK (3 << 2) -#define PIPEMISC_DITHER_TYPE_SP (0 << 2) +#define PIPEMISC_BPC_MASK REG_GENMASK(7, 5) +#define PIPEMISC_BPC_8 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 0) +#define PIPEMISC_BPC_10 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 1) +#define PIPEMISC_BPC_6 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 2) +#define PIPEMISC_BPC_12_ADLP REG_FIELD_PREP(PIPEMISC_BPC_MASK, 4) /* adlp+ */ +#define PIPEMISC_DITHER_ENABLE REG_BIT(4) +#define PIPEMISC_DITHER_TYPE_MASK REG_GENMASK(3, 2) +#define PIPEMISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 0) +#define PIPEMISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 1) +#define PIPEMISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 2) +#define PIPEMISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 3) #define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) #define _PIPE_MISC2_A 0x7002C #define _PIPE_MISC2_B 0x7102C -#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN (0x50 << 24) -#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS (0x14 << 24) -#define PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK (0xff << 24) +#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24) +#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80) +#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20) #define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A) /* Skylake+ pipe bottom (background) color */ #define _SKL_BOTTOM_COLOR_A 0x70034 -#define SKL_BOTTOM_COLOR_GAMMA_ENABLE (1 << 31) -#define SKL_BOTTOM_COLOR_CSC_ENABLE (1 << 30) +#define SKL_BOTTOM_COLOR_GAMMA_ENABLE REG_BIT(31) +#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30) #define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A) #define _ICL_PIPE_A_STATUS 0x70058 @@ -6702,49 +4179,32 @@ enum { #define _WM0_PIPEC_IVB 0x45200 #define WM0_PIPE_ILK(pipe) _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB) -#define WM0_PIPE_PLANE_MASK (0xffff << 16) -#define WM0_PIPE_PLANE_SHIFT 16 -#define WM0_PIPE_SPRITE_MASK (0xff << 8) -#define WM0_PIPE_SPRITE_SHIFT 8 -#define WM0_PIPE_CURSOR_MASK (0xff) +#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16) +#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8) +#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0) +#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x)) +#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x)) +#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x)) #define WM1_LP_ILK _MMIO(0x45108) -#define WM1_LP_SR_EN (1 << 31) -#define WM1_LP_LATENCY_SHIFT 24 -#define WM1_LP_LATENCY_MASK (0x7f << 24) -#define WM1_LP_FBC_MASK (0xf << 20) -#define WM1_LP_FBC_SHIFT 20 -#define WM1_LP_FBC_SHIFT_BDW 19 -#define WM1_LP_SR_MASK (0x7ff << 8) -#define WM1_LP_SR_SHIFT 8 -#define WM1_LP_CURSOR_MASK (0xff) #define WM2_LP_ILK _MMIO(0x4510c) -#define WM2_LP_EN (1 << 31) #define WM3_LP_ILK _MMIO(0x45110) -#define WM3_LP_EN (1 << 31) +#define WM_LP_ENABLE REG_BIT(31) +#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24) +#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19) +#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20) +#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8) +#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0) +#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x)) +#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x)) +#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x)) +#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x)) +#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x)) #define WM1S_LP_ILK _MMIO(0x45120) #define WM2S_LP_IVB _MMIO(0x45124) #define WM3S_LP_IVB _MMIO(0x45128) -#define WM1S_LP_EN (1 << 31) - -#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \ - (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \ - ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur)) - -/* Memory latency timer register */ -#define MLTR_ILK _MMIO(0x11222) -#define MLTR_WM1_SHIFT 0 -#define MLTR_WM2_SHIFT 8 -/* the unit of memory self-refresh latency time is 0.5us */ -#define ILK_SRLT_MASK 0x3f - - -/* the address where we get all kinds of latency value */ -#define SSKPD _MMIO(0x5d10) -#define SSKPD_WM_MASK 0x3f -#define SSKPD_WM0_SHIFT 0 -#define SSKPD_WM1_SHIFT 8 -#define SSKPD_WM2_SHIFT 16 -#define SSKPD_WM3_SHIFT 24 +#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */ +#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0) +#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x)) /* * The two pipe frame counter registers are not synchronized, so @@ -6778,44 +4238,50 @@ enum { /* Cursor A & B regs */ #define _CURACNTR 0x70080 /* Old style CUR*CNTR flags (desktop 8xx) */ -#define CURSOR_ENABLE 0x80000000 -#define CURSOR_GAMMA_ENABLE 0x40000000 -#define CURSOR_STRIDE_SHIFT 28 -#define CURSOR_STRIDE(x) ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */ -#define CURSOR_FORMAT_SHIFT 24 -#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) +#define CURSOR_ENABLE REG_BIT(31) +#define CURSOR_PIPE_GAMMA_ENABLE REG_BIT(30) +#define CURSOR_STRIDE_MASK REG_GENMASK(29, 28) +#define CURSOR_STRIDE(stride) REG_FIELD_PREP(CURSOR_STRIDE_MASK, ffs(stride) - 9) /* 256,512,1k,2k */ +#define CURSOR_FORMAT_MASK REG_GENMASK(26, 24) +#define CURSOR_FORMAT_2C REG_FIELD_PREP(CURSOR_FORMAT_MASK, 0) +#define CURSOR_FORMAT_3C REG_FIELD_PREP(CURSOR_FORMAT_MASK, 1) +#define CURSOR_FORMAT_4C REG_FIELD_PREP(CURSOR_FORMAT_MASK, 2) +#define CURSOR_FORMAT_ARGB REG_FIELD_PREP(CURSOR_FORMAT_MASK, 4) +#define CURSOR_FORMAT_XRGB REG_FIELD_PREP(CURSOR_FORMAT_MASK, 5) /* New style CUR*CNTR flags */ -#define MCURSOR_MODE 0x27 -#define MCURSOR_MODE_DISABLE 0x00 -#define MCURSOR_MODE_128_32B_AX 0x02 -#define MCURSOR_MODE_256_32B_AX 0x03 -#define MCURSOR_MODE_64_32B_AX 0x07 -#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX) -#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX) -#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX) #define MCURSOR_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */ #define MCURSOR_ARB_SLOTS(x) REG_FIELD_PREP(MCURSOR_ARB_SLOTS_MASK, (x)) /* icl+ */ -#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28) -#define MCURSOR_PIPE_SELECT_SHIFT 28 -#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28) -#define MCURSOR_GAMMA_ENABLE (1 << 26) -#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ -#define MCURSOR_ROTATE_180 (1 << 15) -#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14) +#define MCURSOR_PIPE_SEL_MASK REG_GENMASK(29, 28) +#define MCURSOR_PIPE_SEL(pipe) REG_FIELD_PREP(MCURSOR_PIPE_SEL_MASK, (pipe)) +#define MCURSOR_PIPE_GAMMA_ENABLE REG_BIT(26) +#define MCURSOR_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */ +#define MCURSOR_ROTATE_180 REG_BIT(15) +#define MCURSOR_TRICKLE_FEED_DISABLE REG_BIT(14) +#define MCURSOR_MODE_MASK 0x27 +#define MCURSOR_MODE_DISABLE 0x00 +#define MCURSOR_MODE_128_32B_AX 0x02 +#define MCURSOR_MODE_256_32B_AX 0x03 +#define MCURSOR_MODE_64_32B_AX 0x07 +#define MCURSOR_MODE_128_ARGB_AX (0x20 | MCURSOR_MODE_128_32B_AX) +#define MCURSOR_MODE_256_ARGB_AX (0x20 | MCURSOR_MODE_256_32B_AX) +#define MCURSOR_MODE_64_ARGB_AX (0x20 | MCURSOR_MODE_64_32B_AX) #define _CURABASE 0x70084 #define _CURAPOS 0x70088 -#define CURSOR_POS_MASK 0x007FF -#define CURSOR_POS_SIGN 0x8000 -#define CURSOR_X_SHIFT 0 -#define CURSOR_Y_SHIFT 16 -#define CURSIZE _MMIO(0x700a0) /* 845/865 */ +#define CURSOR_POS_Y_SIGN REG_BIT(31) +#define CURSOR_POS_Y_MASK REG_GENMASK(30, 16) +#define CURSOR_POS_Y(y) REG_FIELD_PREP(CURSOR_POS_Y_MASK, (y)) +#define CURSOR_POS_X_SIGN REG_BIT(15) +#define CURSOR_POS_X_MASK REG_GENMASK(14, 0) +#define CURSOR_POS_X(x) REG_FIELD_PREP(CURSOR_POS_X_MASK, (x)) +#define _CURASIZE 0x700a0 /* 845/865 */ +#define CURSOR_HEIGHT_MASK REG_GENMASK(21, 12) +#define CURSOR_HEIGHT(h) REG_FIELD_PREP(CURSOR_HEIGHT_MASK, (h)) +#define CURSOR_WIDTH_MASK REG_GENMASK(9, 0) +#define CURSOR_WIDTH(w) REG_FIELD_PREP(CURSOR_WIDTH_MASK, (w)) #define _CUR_FBC_CTL_A 0x700a0 /* ivb+ */ -#define CUR_FBC_CTL_EN (1 << 31) +#define CUR_FBC_EN REG_BIT(31) +#define CUR_FBC_HEIGHT_MASK REG_GENMASK(7, 0) +#define CUR_FBC_HEIGHT(h) REG_FIELD_PREP(CUR_FBC_HEIGHT_MASK, (h)) #define _CURASURFLIVE 0x700ac /* g4x+ */ #define _CURBCNTR 0x700c0 #define _CURBBASE 0x700c4 @@ -6828,6 +4294,7 @@ enum { #define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR) #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) +#define CURSIZE(pipe) _CURSOR2(pipe, _CURASIZE) #define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A) #define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE) @@ -6841,49 +4308,54 @@ enum { /* Display A control */ #define _DSPAADDR_VLV 0x7017C /* vlv/chv */ #define _DSPACNTR 0x70180 -#define DISPLAY_PLANE_ENABLE (1 << 31) -#define DISPLAY_PLANE_DISABLE 0 -#define DISPPLANE_GAMMA_ENABLE (1 << 30) -#define DISPPLANE_GAMMA_DISABLE 0 -#define DISPPLANE_PIXFORMAT_MASK (0xf << 26) -#define DISPPLANE_YUV422 (0x0 << 26) -#define DISPPLANE_8BPP (0x2 << 26) -#define DISPPLANE_BGRA555 (0x3 << 26) -#define DISPPLANE_BGRX555 (0x4 << 26) -#define DISPPLANE_BGRX565 (0x5 << 26) -#define DISPPLANE_BGRX888 (0x6 << 26) -#define DISPPLANE_BGRA888 (0x7 << 26) -#define DISPPLANE_RGBX101010 (0x8 << 26) -#define DISPPLANE_RGBA101010 (0x9 << 26) -#define DISPPLANE_BGRX101010 (0xa << 26) -#define DISPPLANE_BGRA101010 (0xb << 26) -#define DISPPLANE_RGBX161616 (0xc << 26) -#define DISPPLANE_RGBX888 (0xe << 26) -#define DISPPLANE_RGBA888 (0xf << 26) -#define DISPPLANE_STEREO_ENABLE (1 << 25) -#define DISPPLANE_STEREO_DISABLE 0 -#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ -#define DISPPLANE_SEL_PIPE_SHIFT 24 -#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT) -#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT) -#define DISPPLANE_SRC_KEY_ENABLE (1 << 22) -#define DISPPLANE_SRC_KEY_DISABLE 0 -#define DISPPLANE_LINE_DOUBLE (1 << 20) -#define DISPPLANE_NO_LINE_DOUBLE 0 -#define DISPPLANE_STEREO_POLARITY_FIRST 0 -#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18) -#define DISPPLANE_ALPHA_PREMULTIPLY (1 << 16) /* CHV pipe B */ -#define DISPPLANE_ROTATE_180 (1 << 15) -#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */ -#define DISPPLANE_TILED (1 << 10) -#define DISPPLANE_ASYNC_FLIP (1 << 9) /* g4x+ */ -#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */ +#define DISP_ENABLE REG_BIT(31) +#define DISP_PIPE_GAMMA_ENABLE REG_BIT(30) +#define DISP_FORMAT_MASK REG_GENMASK(29, 26) +#define DISP_FORMAT_8BPP REG_FIELD_PREP(DISP_FORMAT_MASK, 2) +#define DISP_FORMAT_BGRA555 REG_FIELD_PREP(DISP_FORMAT_MASK, 3) +#define DISP_FORMAT_BGRX555 REG_FIELD_PREP(DISP_FORMAT_MASK, 4) +#define DISP_FORMAT_BGRX565 REG_FIELD_PREP(DISP_FORMAT_MASK, 5) +#define DISP_FORMAT_BGRX888 REG_FIELD_PREP(DISP_FORMAT_MASK, 6) +#define DISP_FORMAT_BGRA888 REG_FIELD_PREP(DISP_FORMAT_MASK, 7) +#define DISP_FORMAT_RGBX101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 8) +#define DISP_FORMAT_RGBA101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 9) +#define DISP_FORMAT_BGRX101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 10) +#define DISP_FORMAT_BGRA101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 11) +#define DISP_FORMAT_RGBX161616 REG_FIELD_PREP(DISP_FORMAT_MASK, 12) +#define DISP_FORMAT_RGBX888 REG_FIELD_PREP(DISP_FORMAT_MASK, 14) +#define DISP_FORMAT_RGBA888 REG_FIELD_PREP(DISP_FORMAT_MASK, 15) +#define DISP_STEREO_ENABLE REG_BIT(25) +#define DISP_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */ +#define DISP_PIPE_SEL_MASK REG_GENMASK(25, 24) +#define DISP_PIPE_SEL(pipe) REG_FIELD_PREP(DISP_PIPE_SEL_MASK, (pipe)) +#define DISP_SRC_KEY_ENABLE REG_BIT(22) +#define DISP_LINE_DOUBLE REG_BIT(20) +#define DISP_STEREO_POLARITY_SECOND REG_BIT(18) +#define DISP_ALPHA_PREMULTIPLY REG_BIT(16) /* CHV pipe B */ +#define DISP_ROTATE_180 REG_BIT(15) +#define DISP_TRICKLE_FEED_DISABLE REG_BIT(14) /* g4x+ */ +#define DISP_TILED REG_BIT(10) +#define DISP_ASYNC_FLIP REG_BIT(9) /* g4x+ */ +#define DISP_MIRROR REG_BIT(8) /* CHV pipe B */ #define _DSPAADDR 0x70184 #define _DSPASTRIDE 0x70188 #define _DSPAPOS 0x7018C /* reserved */ +#define DISP_POS_Y_MASK REG_GENMASK(31, 0) +#define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y)) +#define DISP_POS_X_MASK REG_GENMASK(15, 0) +#define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x)) #define _DSPASIZE 0x70190 +#define DISP_HEIGHT_MASK REG_GENMASK(31, 0) +#define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h)) +#define DISP_WIDTH_MASK REG_GENMASK(15, 0) +#define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w)) #define _DSPASURF 0x7019C /* 965+ only */ +#define DISP_ADDR_MASK REG_GENMASK(31, 12) #define _DSPATILEOFF 0x701A4 /* 965+ only */ +#define DISP_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define DISP_OFFSET_Y(y) REG_FIELD_PREP(DISP_OFFSET_Y_MASK, (y)) +#define DISP_OFFSET_X_MASK REG_GENMASK(15, 0) +#define DISP_OFFSET_X(x) REG_FIELD_PREP(DISP_OFFSET_X_MASK, (x)) #define _DSPAOFFSET 0x701A4 /* HSW */ #define _DSPASURFLIVE 0x701AC #define _DSPAGAMC 0x701E0 @@ -6903,15 +4375,28 @@ enum { /* CHV pipe B blender and primary plane */ #define _CHV_BLEND_A 0x60a00 -#define CHV_BLEND_LEGACY (0 << 30) -#define CHV_BLEND_ANDROID (1 << 30) -#define CHV_BLEND_MPO (2 << 30) -#define CHV_BLEND_MASK (3 << 30) +#define CHV_BLEND_MASK REG_GENMASK(31, 30) +#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0) +#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1) +#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2) #define _CHV_CANVAS_A 0x60a04 +#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20) +#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10) +#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0) #define _PRIMPOS_A 0x60a08 +#define PRIM_POS_Y_MASK REG_GENMASK(31, 16) +#define PRIM_POS_Y(y) REG_FIELD_PREP(PRIM_POS_Y_MASK, (y)) +#define PRIM_POS_X_MASK REG_GENMASK(15, 0) +#define PRIM_POS_X(x) REG_FIELD_PREP(PRIM_POS_X_MASK, (x)) #define _PRIMSIZE_A 0x60a0c +#define PRIM_HEIGHT_MASK REG_GENMASK(31, 16) +#define PRIM_HEIGHT(h) REG_FIELD_PREP(PRIM_HEIGHT_MASK, (h)) +#define PRIM_WIDTH_MASK REG_GENMASK(15, 0) +#define PRIM_WIDTH(w) REG_FIELD_PREP(PRIM_WIDTH_MASK, (w)) #define _PRIMCNSTALPHA_A 0x60a10 -#define PRIM_CONST_ALPHA_ENABLE (1 << 31) +#define PRIM_CONST_ALPHA_ENABLE REG_BIT(31) +#define PRIM_CONST_ALPHA_MASK REG_GENMASK(7, 0) +#define PRIM_CONST_ALPHA(alpha) REG_FIELD_PREP(PRIM_CONST_ALPHA_MASK, (alpha)) #define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A) #define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A) @@ -6952,10 +4437,8 @@ enum { /* Display B control */ #define _DSPBCNTR (DISPLAY_MMIO_BASE(dev_priv) + 0x71180) -#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15) -#define DISPPLANE_ALPHA_TRANS_DISABLE 0 -#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 -#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) +#define DISP_ALPHA_TRANS_ENABLE REG_BIT(15) +#define DISP_SPRITE_ABOVE_OVERLAY REG_BIT(0) #define _DSPBADDR (DISPLAY_MMIO_BASE(dev_priv) + 0x71184) #define _DSPBSTRIDE (DISPLAY_MMIO_BASE(dev_priv) + 0x71188) #define _DSPBPOS (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C) @@ -6971,46 +4454,63 @@ enum { /* Sprite A control */ #define _DVSACNTR 0x72180 -#define DVS_ENABLE (1 << 31) -#define DVS_GAMMA_ENABLE (1 << 30) -#define DVS_YUV_RANGE_CORRECTION_DISABLE (1 << 27) -#define DVS_PIXFORMAT_MASK (3 << 25) -#define DVS_FORMAT_YUV422 (0 << 25) -#define DVS_FORMAT_RGBX101010 (1 << 25) -#define DVS_FORMAT_RGBX888 (2 << 25) -#define DVS_FORMAT_RGBX161616 (3 << 25) -#define DVS_PIPE_CSC_ENABLE (1 << 24) -#define DVS_SOURCE_KEY (1 << 22) -#define DVS_RGB_ORDER_XBGR (1 << 20) -#define DVS_YUV_FORMAT_BT709 (1 << 18) -#define DVS_YUV_ORDER_MASK (3 << 16) -#define DVS_YUV_ORDER_YUYV (0 << 16) -#define DVS_YUV_ORDER_UYVY (1 << 16) -#define DVS_YUV_ORDER_YVYU (2 << 16) -#define DVS_YUV_ORDER_VYUY (3 << 16) -#define DVS_ROTATE_180 (1 << 15) -#define DVS_DEST_KEY (1 << 2) -#define DVS_TRICKLE_FEED_DISABLE (1 << 14) -#define DVS_TILED (1 << 10) +#define DVS_ENABLE REG_BIT(31) +#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30) +#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27) +#define DVS_FORMAT_MASK REG_GENMASK(26, 25) +#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0) +#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1) +#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2) +#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3) +#define DVS_PIPE_CSC_ENABLE REG_BIT(24) +#define DVS_SOURCE_KEY REG_BIT(22) +#define DVS_RGB_ORDER_XBGR REG_BIT(20) +#define DVS_YUV_FORMAT_BT709 REG_BIT(18) +#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0) +#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1) +#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2) +#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3) +#define DVS_ROTATE_180 REG_BIT(15) +#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14) +#define DVS_TILED REG_BIT(10) +#define DVS_DEST_KEY REG_BIT(2) #define _DVSALINOFF 0x72184 #define _DVSASTRIDE 0x72188 #define _DVSAPOS 0x7218c +#define DVS_POS_Y_MASK REG_GENMASK(31, 16) +#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y)) +#define DVS_POS_X_MASK REG_GENMASK(15, 0) +#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x)) #define _DVSASIZE 0x72190 +#define DVS_HEIGHT_MASK REG_GENMASK(31, 16) +#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h)) +#define DVS_WIDTH_MASK REG_GENMASK(15, 0) +#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w)) #define _DVSAKEYVAL 0x72194 #define _DVSAKEYMSK 0x72198 #define _DVSASURF 0x7219c +#define DVS_ADDR_MASK REG_GENMASK(31, 12) #define _DVSAKEYMAXVAL 0x721a0 #define _DVSATILEOFF 0x721a4 +#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y)) +#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0) +#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x)) #define _DVSASURFLIVE 0x721ac #define _DVSAGAMC_G4X 0x721e0 /* g4x */ #define _DVSASCALE 0x72204 -#define DVS_SCALE_ENABLE (1 << 31) -#define DVS_FILTER_MASK (3 << 29) -#define DVS_FILTER_MEDIUM (0 << 29) -#define DVS_FILTER_ENHANCING (1 << 29) -#define DVS_FILTER_SOFTENING (2 << 29) -#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ -#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27) +#define DVS_SCALE_ENABLE REG_BIT(31) +#define DVS_FILTER_MASK REG_GENMASK(30, 29) +#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0) +#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1) +#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2) +#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ +#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27) +#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16) +#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w)) +#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0) +#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h)) #define _DVSAGAMC_ILK 0x72300 /* ilk/snb */ #define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */ @@ -7047,50 +4547,67 @@ enum { #define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */ #define _SPRA_CTL 0x70280 -#define SPRITE_ENABLE (1 << 31) -#define SPRITE_GAMMA_ENABLE (1 << 30) -#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1 << 28) -#define SPRITE_PIXFORMAT_MASK (7 << 25) -#define SPRITE_FORMAT_YUV422 (0 << 25) -#define SPRITE_FORMAT_RGBX101010 (1 << 25) -#define SPRITE_FORMAT_RGBX888 (2 << 25) -#define SPRITE_FORMAT_RGBX161616 (3 << 25) -#define SPRITE_FORMAT_YUV444 (4 << 25) -#define SPRITE_FORMAT_XR_BGR101010 (5 << 25) /* Extended range */ -#define SPRITE_PIPE_CSC_ENABLE (1 << 24) -#define SPRITE_SOURCE_KEY (1 << 22) -#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */ -#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19) -#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */ -#define SPRITE_YUV_ORDER_MASK (3 << 16) -#define SPRITE_YUV_ORDER_YUYV (0 << 16) -#define SPRITE_YUV_ORDER_UYVY (1 << 16) -#define SPRITE_YUV_ORDER_YVYU (2 << 16) -#define SPRITE_YUV_ORDER_VYUY (3 << 16) -#define SPRITE_ROTATE_180 (1 << 15) -#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14) -#define SPRITE_INT_GAMMA_DISABLE (1 << 13) -#define SPRITE_TILED (1 << 10) -#define SPRITE_DEST_KEY (1 << 2) +#define SPRITE_ENABLE REG_BIT(31) +#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30) +#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) +#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25) +#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0) +#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1) +#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2) +#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3) +#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4) +#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */ +#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24) +#define SPRITE_SOURCE_KEY REG_BIT(22) +#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */ +#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19) +#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */ +#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0) +#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1) +#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2) +#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3) +#define SPRITE_ROTATE_180 REG_BIT(15) +#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14) +#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13) +#define SPRITE_TILED REG_BIT(10) +#define SPRITE_DEST_KEY REG_BIT(2) #define _SPRA_LINOFF 0x70284 #define _SPRA_STRIDE 0x70288 #define _SPRA_POS 0x7028c +#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16) +#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y)) +#define SPRITE_POS_X_MASK REG_GENMASK(15, 0) +#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x)) #define _SPRA_SIZE 0x70290 +#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16) +#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h)) +#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0) +#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w)) #define _SPRA_KEYVAL 0x70294 #define _SPRA_KEYMSK 0x70298 #define _SPRA_SURF 0x7029c +#define SPRITE_ADDR_MASK REG_GENMASK(31, 12) #define _SPRA_KEYMAX 0x702a0 #define _SPRA_TILEOFF 0x702a4 +#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y)) +#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0) +#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x)) #define _SPRA_OFFSET 0x702a4 #define _SPRA_SURFLIVE 0x702ac #define _SPRA_SCALE 0x70304 -#define SPRITE_SCALE_ENABLE (1 << 31) -#define SPRITE_FILTER_MASK (3 << 29) -#define SPRITE_FILTER_MEDIUM (0 << 29) -#define SPRITE_FILTER_ENHANCING (1 << 29) -#define SPRITE_FILTER_SOFTENING (2 << 29) -#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ -#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27) +#define SPRITE_SCALE_ENABLE REG_BIT(31) +#define SPRITE_FILTER_MASK REG_GENMASK(30, 29) +#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0) +#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1) +#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2) +#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ +#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27) +#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16) +#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w)) +#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0) +#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h)) #define _SPRA_GAMC 0x70400 #define _SPRA_GAMC16 0x70440 #define _SPRA_GAMC17 0x7044c @@ -7130,48 +4647,67 @@ enum { #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) -#define SP_ENABLE (1 << 31) -#define SP_GAMMA_ENABLE (1 << 30) -#define SP_PIXFORMAT_MASK (0xf << 26) -#define SP_FORMAT_YUV422 (0x0 << 26) -#define SP_FORMAT_8BPP (0x2 << 26) -#define SP_FORMAT_BGR565 (0x5 << 26) -#define SP_FORMAT_BGRX8888 (0x6 << 26) -#define SP_FORMAT_BGRA8888 (0x7 << 26) -#define SP_FORMAT_RGBX1010102 (0x8 << 26) -#define SP_FORMAT_RGBA1010102 (0x9 << 26) -#define SP_FORMAT_BGRX1010102 (0xa << 26) /* CHV pipe B */ -#define SP_FORMAT_BGRA1010102 (0xb << 26) /* CHV pipe B */ -#define SP_FORMAT_RGBX8888 (0xe << 26) -#define SP_FORMAT_RGBA8888 (0xf << 26) -#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */ -#define SP_SOURCE_KEY (1 << 22) -#define SP_YUV_FORMAT_BT709 (1 << 18) -#define SP_YUV_ORDER_MASK (3 << 16) -#define SP_YUV_ORDER_YUYV (0 << 16) -#define SP_YUV_ORDER_UYVY (1 << 16) -#define SP_YUV_ORDER_YVYU (2 << 16) -#define SP_YUV_ORDER_VYUY (3 << 16) -#define SP_ROTATE_180 (1 << 15) -#define SP_TILED (1 << 10) -#define SP_MIRROR (1 << 8) /* CHV pipe B */ +#define SP_ENABLE REG_BIT(31) +#define SP_PIPE_GAMMA_ENABLE REG_BIT(30) +#define SP_FORMAT_MASK REG_GENMASK(29, 26) +#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0) +#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2) +#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5) +#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6) +#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7) +#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8) +#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9) +#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */ +#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */ +#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14) +#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15) +#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */ +#define SP_SOURCE_KEY REG_BIT(22) +#define SP_YUV_FORMAT_BT709 REG_BIT(18) +#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0) +#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1) +#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2) +#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3) +#define SP_ROTATE_180 REG_BIT(15) +#define SP_TILED REG_BIT(10) +#define SP_MIRROR REG_BIT(8) /* CHV pipe B */ #define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) #define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) #define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) +#define SP_POS_Y_MASK REG_GENMASK(31, 16) +#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y)) +#define SP_POS_X_MASK REG_GENMASK(15, 0) +#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x)) #define _SPASIZE (VLV_DISPLAY_BASE + 0x72190) +#define SP_HEIGHT_MASK REG_GENMASK(31, 16) +#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h)) +#define SP_WIDTH_MASK REG_GENMASK(15, 0) +#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w)) #define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194) #define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198) #define _SPASURF (VLV_DISPLAY_BASE + 0x7219c) +#define SP_ADDR_MASK REG_GENMASK(31, 12) #define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) +#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y)) +#define SP_OFFSET_X_MASK REG_GENMASK(15, 0) +#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x)) #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) -#define SP_CONST_ALPHA_ENABLE (1 << 31) +#define SP_CONST_ALPHA_ENABLE REG_BIT(31) +#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0) +#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha)) #define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) -#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */ -#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */ +#define SP_CONTRAST_MASK REG_GENMASK(26, 18) +#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */ +#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0) +#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */ #define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4) -#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */ -#define SP_SH_COS(x) (x) /* u3.7 */ +#define SP_SH_SIN_MASK REG_GENMASK(26, 16) +#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */ +#define SP_SH_COS_MASK REG_GENMASK(9, 0) +#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */ #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0) #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) @@ -7222,112 +4758,135 @@ enum { #define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900) #define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904) #define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908) -#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ -#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ +#define SPCSC_OOFF_MASK REG_GENMASK(26, 16) +#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */ +#define SPCSC_IOFF_MASK REG_GENMASK(10, 0) +#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */ #define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c) #define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910) #define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914) #define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918) #define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c) -#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ -#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ +#define SPCSC_C1_MASK REG_GENMASK(30, 16) +#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */ +#define SPCSC_C0_MASK REG_GENMASK(14, 0) +#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */ #define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920) #define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924) #define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928) -#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ -#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ +#define SPCSC_IMAX_MASK REG_GENMASK(26, 16) +#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */ +#define SPCSC_IMIN_MASK REG_GENMASK(10, 0) +#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */ #define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c) #define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930) #define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934) -#define SPCSC_OMAX(x) ((x) << 16) /* u10 */ -#define SPCSC_OMIN(x) ((x) << 0) /* u10 */ +#define SPCSC_OMAX_MASK REG_GENMASK(25, 16) +#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */ +#define SPCSC_OMIN_MASK REG_GENMASK(9, 0) +#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */ /* Skylake plane registers */ #define _PLANE_CTL_1_A 0x70180 #define _PLANE_CTL_2_A 0x70280 #define _PLANE_CTL_3_A 0x70380 -#define PLANE_CTL_ENABLE (1 << 31) +#define PLANE_CTL_ENABLE REG_BIT(31) #define PLANE_CTL_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */ #define PLANE_CTL_ARB_SLOTS(x) REG_FIELD_PREP(PLANE_CTL_ARB_SLOTS_MASK, (x)) /* icl+ */ -#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */ -#define PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE (1 << 28) +#define PLANE_CTL_PIPE_GAMMA_ENABLE REG_BIT(30) /* Pre-GLK */ +#define PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) /* * ICL+ uses the same PLANE_CTL_FORMAT bits, but the field definition * expanded to include bit 23 as well. However, the shift-24 based values * correctly map to the same formats in ICL, as long as bit 23 is set to 0 */ -#define PLANE_CTL_FORMAT_MASK (0xf << 24) -#define PLANE_CTL_FORMAT_YUV422 (0 << 24) -#define PLANE_CTL_FORMAT_NV12 (1 << 24) -#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24) -#define PLANE_CTL_FORMAT_P010 (3 << 24) -#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24) -#define PLANE_CTL_FORMAT_P012 (5 << 24) -#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24) -#define PLANE_CTL_FORMAT_P016 (7 << 24) -#define PLANE_CTL_FORMAT_XYUV (8 << 24) -#define PLANE_CTL_FORMAT_INDEXED (12 << 24) -#define PLANE_CTL_FORMAT_RGB_565 (14 << 24) -#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23) -#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */ -#define PLANE_CTL_FORMAT_Y210 (1 << 23) -#define PLANE_CTL_FORMAT_Y212 (3 << 23) -#define PLANE_CTL_FORMAT_Y216 (5 << 23) -#define PLANE_CTL_FORMAT_Y410 (7 << 23) -#define PLANE_CTL_FORMAT_Y412 (9 << 23) -#define PLANE_CTL_FORMAT_Y416 (0xb << 23) -#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) -#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21) -#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) -#define PLANE_CTL_ORDER_BGRX (0 << 20) -#define PLANE_CTL_ORDER_RGBX (1 << 20) -#define PLANE_CTL_YUV420_Y_PLANE (1 << 19) -#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) -#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) -#define PLANE_CTL_YUV422_ORDER_YUYV (0 << 16) -#define PLANE_CTL_YUV422_ORDER_UYVY (1 << 16) -#define PLANE_CTL_YUV422_ORDER_YVYU (2 << 16) -#define PLANE_CTL_YUV422_ORDER_VYUY (3 << 16) -#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) -#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) -#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */ -#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */ -#define PLANE_CTL_TILED_MASK (0x7 << 10) -#define PLANE_CTL_TILED_LINEAR (0 << 10) -#define PLANE_CTL_TILED_X (1 << 10) -#define PLANE_CTL_TILED_Y (4 << 10) -#define PLANE_CTL_TILED_YF (5 << 10) -#define PLANE_CTL_ASYNC_FLIP (1 << 9) -#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8) -#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */ -#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */ -#define PLANE_CTL_ALPHA_DISABLE (0 << 4) -#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4) -#define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4) -#define PLANE_CTL_ROTATE_MASK 0x3 -#define PLANE_CTL_ROTATE_0 0x0 -#define PLANE_CTL_ROTATE_90 0x1 -#define PLANE_CTL_ROTATE_180 0x2 -#define PLANE_CTL_ROTATE_270 0x3 +#define PLANE_CTL_FORMAT_MASK_SKL REG_GENMASK(27, 24) /* pre-icl */ +#define PLANE_CTL_FORMAT_MASK_ICL REG_GENMASK(27, 23) /* icl+ */ +#define PLANE_CTL_FORMAT_YUV422 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 0) +#define PLANE_CTL_FORMAT_NV12 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 1) +#define PLANE_CTL_FORMAT_XRGB_2101010 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 2) +#define PLANE_CTL_FORMAT_P010 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 3) +#define PLANE_CTL_FORMAT_XRGB_8888 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 4) +#define PLANE_CTL_FORMAT_P012 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 5) +#define PLANE_CTL_FORMAT_XRGB_16161616F REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 6) +#define PLANE_CTL_FORMAT_P016 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 7) +#define PLANE_CTL_FORMAT_XYUV REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 8) +#define PLANE_CTL_FORMAT_INDEXED REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 12) +#define PLANE_CTL_FORMAT_RGB_565 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 14) +#define PLANE_CTL_FORMAT_Y210 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 1) +#define PLANE_CTL_FORMAT_Y212 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 3) +#define PLANE_CTL_FORMAT_Y216 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 5) +#define PLANE_CTL_FORMAT_Y410 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 7) +#define PLANE_CTL_FORMAT_Y412 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 9) +#define PLANE_CTL_FORMAT_Y416 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 11) +#define PLANE_CTL_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-GLK */ +#define PLANE_CTL_KEY_ENABLE_MASK REG_GENMASK(22, 21) +#define PLANE_CTL_KEY_ENABLE_SOURCE REG_FIELD_PREP(PLANE_CTL_KEY_ENABLE_MASK, 1) +#define PLANE_CTL_KEY_ENABLE_DESTINATION REG_FIELD_PREP(PLANE_CTL_KEY_ENABLE_MASK, 2) +#define PLANE_CTL_ORDER_RGBX REG_BIT(20) +#define PLANE_CTL_YUV420_Y_PLANE REG_BIT(19) +#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) +#define PLANE_CTL_YUV422_ORDER_MASK REG_GENMASK(17, 16) +#define PLANE_CTL_YUV422_ORDER_YUYV REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 0) +#define PLANE_CTL_YUV422_ORDER_UYVY REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 1) +#define PLANE_CTL_YUV422_ORDER_YVYU REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 2) +#define PLANE_CTL_YUV422_ORDER_VYUY REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 3) +#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE REG_BIT(15) +#define PLANE_CTL_TRICKLE_FEED_DISABLE REG_BIT(14) +#define PLANE_CTL_CLEAR_COLOR_DISABLE REG_BIT(13) /* TGL+ */ +#define PLANE_CTL_PLANE_GAMMA_DISABLE REG_BIT(13) /* Pre-GLK */ +#define PLANE_CTL_TILED_MASK REG_GENMASK(12, 10) +#define PLANE_CTL_TILED_LINEAR REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 0) +#define PLANE_CTL_TILED_X REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 1) +#define PLANE_CTL_TILED_Y REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 4) +#define PLANE_CTL_TILED_YF REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 5) +#define PLANE_CTL_ASYNC_FLIP REG_BIT(9) +#define PLANE_CTL_FLIP_HORIZONTAL REG_BIT(8) +#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE REG_BIT(4) /* TGL+ */ +#define PLANE_CTL_ALPHA_MASK REG_GENMASK(5, 4) /* Pre-GLK */ +#define PLANE_CTL_ALPHA_DISABLE REG_FIELD_PREP(PLANE_CTL_ALPHA_MASK, 0) +#define PLANE_CTL_ALPHA_SW_PREMULTIPLY REG_FIELD_PREP(PLANE_CTL_ALPHA_MASK, 2) +#define PLANE_CTL_ALPHA_HW_PREMULTIPLY REG_FIELD_PREP(PLANE_CTL_ALPHA_MASK, 3) +#define PLANE_CTL_ROTATE_MASK REG_GENMASK(1, 0) +#define PLANE_CTL_ROTATE_0 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 0) +#define PLANE_CTL_ROTATE_90 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 1) +#define PLANE_CTL_ROTATE_180 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 2) +#define PLANE_CTL_ROTATE_270 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 3) #define _PLANE_STRIDE_1_A 0x70188 #define _PLANE_STRIDE_2_A 0x70288 #define _PLANE_STRIDE_3_A 0x70388 +#define PLANE_STRIDE__MASK REG_GENMASK(11, 0) +#define PLANE_STRIDE_(stride) REG_FIELD_PREP(PLANE_STRIDE__MASK, (stride)) #define _PLANE_POS_1_A 0x7018c #define _PLANE_POS_2_A 0x7028c #define _PLANE_POS_3_A 0x7038c +#define PLANE_POS_Y_MASK REG_GENMASK(31, 16) +#define PLANE_POS_Y(y) REG_FIELD_PREP(PLANE_POS_Y_MASK, (y)) +#define PLANE_POS_X_MASK REG_GENMASK(15, 0) +#define PLANE_POS_X(x) REG_FIELD_PREP(PLANE_POS_X_MASK, (x)) #define _PLANE_SIZE_1_A 0x70190 #define _PLANE_SIZE_2_A 0x70290 #define _PLANE_SIZE_3_A 0x70390 +#define PLANE_HEIGHT_MASK REG_GENMASK(31, 16) +#define PLANE_HEIGHT(h) REG_FIELD_PREP(PLANE_HEIGHT_MASK, (h)) +#define PLANE_WIDTH_MASK REG_GENMASK(15, 0) +#define PLANE_WIDTH(w) REG_FIELD_PREP(PLANE_WIDTH_MASK, (w)) #define _PLANE_SURF_1_A 0x7019c #define _PLANE_SURF_2_A 0x7029c #define _PLANE_SURF_3_A 0x7039c +#define PLANE_SURF_ADDR_MASK REG_GENMASK(31, 12) +#define PLANE_SURF_DECRYPT REG_BIT(2) #define _PLANE_OFFSET_1_A 0x701a4 #define _PLANE_OFFSET_2_A 0x702a4 #define _PLANE_OFFSET_3_A 0x703a4 +#define PLANE_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define PLANE_OFFSET_Y(y) REG_FIELD_PREP(PLANE_OFFSET_Y_MASK, (y)) +#define PLANE_OFFSET_X_MASK REG_GENMASK(15, 0) +#define PLANE_OFFSET_X(x) REG_FIELD_PREP(PLANE_OFFSET_X_MASK, (x)) #define _PLANE_KEYVAL_1_A 0x70194 #define _PLANE_KEYVAL_2_A 0x70294 #define _PLANE_KEYMSK_1_A 0x70198 @@ -7339,42 +4898,49 @@ enum { #define _PLANE_CC_VAL_1_A 0x701b4 #define _PLANE_CC_VAL_2_A 0x702b4 #define _PLANE_AUX_DIST_1_A 0x701c0 +#define PLANE_AUX_DISTANCE_MASK REG_GENMASK(31, 12) +#define PLANE_AUX_STRIDE_MASK REG_GENMASK(11, 0) +#define PLANE_AUX_STRIDE(stride) REG_FIELD_PREP(PLANE_AUX_STRIDE_MASK, (stride)) #define _PLANE_AUX_DIST_2_A 0x702c0 #define _PLANE_AUX_OFFSET_1_A 0x701c4 #define _PLANE_AUX_OFFSET_2_A 0x702c4 #define _PLANE_CUS_CTL_1_A 0x701c8 #define _PLANE_CUS_CTL_2_A 0x702c8 -#define PLANE_CUS_ENABLE (1 << 31) -#define PLANE_CUS_Y_PLANE_4_RKL (0 << 30) -#define PLANE_CUS_Y_PLANE_5_RKL (1 << 30) -#define PLANE_CUS_Y_PLANE_6_ICL (0 << 30) -#define PLANE_CUS_Y_PLANE_7_ICL (1 << 30) -#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19) -#define PLANE_CUS_HPHASE_0 (0 << 16) -#define PLANE_CUS_HPHASE_0_25 (1 << 16) -#define PLANE_CUS_HPHASE_0_5 (2 << 16) -#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15) -#define PLANE_CUS_VPHASE_0 (0 << 12) -#define PLANE_CUS_VPHASE_0_25 (1 << 12) -#define PLANE_CUS_VPHASE_0_5 (2 << 12) +#define PLANE_CUS_ENABLE REG_BIT(31) +#define PLANE_CUS_Y_PLANE_MASK REG_BIT(30) +#define PLANE_CUS_Y_PLANE_4_RKL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 0) +#define PLANE_CUS_Y_PLANE_5_RKL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 1) +#define PLANE_CUS_Y_PLANE_6_ICL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 0) +#define PLANE_CUS_Y_PLANE_7_ICL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 1) +#define PLANE_CUS_HPHASE_SIGN_NEGATIVE REG_BIT(19) +#define PLANE_CUS_HPHASE_MASK REG_GENMASK(17, 16) +#define PLANE_CUS_HPHASE_0 REG_FIELD_PREP(PLANE_CUS_HPHASE_MASK, 0) +#define PLANE_CUS_HPHASE_0_25 REG_FIELD_PREP(PLANE_CUS_HPHASE_MASK, 1) +#define PLANE_CUS_HPHASE_0_5 REG_FIELD_PREP(PLANE_CUS_HPHASE_MASK, 2) +#define PLANE_CUS_VPHASE_SIGN_NEGATIVE REG_BIT(15) +#define PLANE_CUS_VPHASE_MASK REG_GENMASK(13, 12) +#define PLANE_CUS_VPHASE_0 REG_FIELD_PREP(PLANE_CUS_VPHASE_MASK, 0) +#define PLANE_CUS_VPHASE_0_25 REG_FIELD_PREP(PLANE_CUS_VPHASE_MASK, 1) +#define PLANE_CUS_VPHASE_0_5 REG_FIELD_PREP(PLANE_CUS_VPHASE_MASK, 2) #define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */ #define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */ #define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ -#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ -#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) +#define PLANE_COLOR_PIPE_GAMMA_ENABLE REG_BIT(30) /* Pre-ICL */ +#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) +#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */ #define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */ -#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */ -#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ -#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) -#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 (1 << 17) -#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 (2 << 17) -#define PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020 (3 << 17) -#define PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020 (4 << 17) -#define PLANE_COLOR_PLANE_GAMMA_DISABLE (1 << 13) -#define PLANE_COLOR_ALPHA_MASK (0x3 << 4) -#define PLANE_COLOR_ALPHA_DISABLE (0 << 4) -#define PLANE_COLOR_ALPHA_SW_PREMULTIPLY (2 << 4) -#define PLANE_COLOR_ALPHA_HW_PREMULTIPLY (3 << 4) +#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */ +#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17) +#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0) +#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1) +#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 2) +#define PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 3) +#define PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 4) +#define PLANE_COLOR_PLANE_GAMMA_DISABLE REG_BIT(13) +#define PLANE_COLOR_ALPHA_MASK REG_GENMASK(5, 4) +#define PLANE_COLOR_ALPHA_DISABLE REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 0) +#define PLANE_COLOR_ALPHA_SW_PREMULTIPLY REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 2) +#define PLANE_COLOR_ALPHA_HW_PREMULTIPLY REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 3) #define _PLANE_BUF_CFG_1_A 0x7027c #define _PLANE_BUF_CFG_2_A 0x7037c #define _PLANE_NV12_BUF_CFG_1_A 0x70278 @@ -7457,8 +5023,6 @@ enum { _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B) #define PLANE_STRIDE(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) -#define PLANE_STRIDE_MASK REG_GENMASK(10, 0) -#define PLANE_STRIDE_MASK_XELPD REG_GENMASK(11, 0) #define _PLANE_POS_1_B 0x7118c #define _PLANE_POS_2_B 0x7128c @@ -7486,7 +5050,6 @@ enum { #define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B) #define PLANE_SURF(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) -#define PLANE_SURF_DECRYPT REG_BIT(2) #define _PLANE_OFFSET_1_B 0x711a4 #define _PLANE_OFFSET_2_B 0x712a4 @@ -7518,8 +5081,11 @@ enum { #define _PLANE_BUF_CFG_1_B 0x7127c #define _PLANE_BUF_CFG_2_B 0x7137c -#define DDB_ENTRY_MASK 0xFFF /* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ -#define DDB_ENTRY_END_SHIFT 16 +/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ +#define PLANE_BUF_END_MASK REG_GENMASK(27, 16) +#define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end)) +#define PLANE_BUF_START_MASK REG_GENMASK(11, 0) +#define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start)) #define _PLANE_BUF_CFG_1(pipe) \ _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B) #define _PLANE_BUF_CFG_2(pipe) \ @@ -7674,24 +5240,13 @@ enum { #define _PIPEA_DATA_M1 0x60030 -#define PIPE_DATA_M1_OFFSET 0 #define _PIPEA_DATA_N1 0x60034 -#define PIPE_DATA_N1_OFFSET 0 - #define _PIPEA_DATA_M2 0x60038 -#define PIPE_DATA_M2_OFFSET 0 #define _PIPEA_DATA_N2 0x6003c -#define PIPE_DATA_N2_OFFSET 0 - #define _PIPEA_LINK_M1 0x60040 -#define PIPE_LINK_M1_OFFSET 0 #define _PIPEA_LINK_N1 0x60044 -#define PIPE_LINK_N1_OFFSET 0 - #define _PIPEA_LINK_M2 0x60048 -#define PIPE_LINK_M2_OFFSET 0 #define _PIPEA_LINK_N2 0x6004c -#define PIPE_LINK_N2_OFFSET 0 /* PIPEB timing regs are same start from 0x61000 */ @@ -7948,7 +5503,8 @@ enum { #define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088) #define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154) -#define DMC_DEBUG3 _MMIO(0x101090) +#define TGL_DMC_DEBUG3 _MMIO(0x101090) +#define DG1_DMC_DEBUG3 _MMIO(0x13415c) /* Display Internal Timeout Register */ #define RM_TIMEOUT _MMIO(0x42060) @@ -8203,63 +5759,6 @@ enum { #define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4)) #define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4)) -#define GEN11_GT_INTR_DW0 _MMIO(0x190018) -#define GEN11_CSME (31) -#define GEN11_GUNIT (28) -#define GEN11_GUC (25) -#define GEN11_WDPERF (20) -#define GEN11_KCR (19) -#define GEN11_GTPM (16) -#define GEN11_BCS (15) -#define GEN11_RCS0 (0) - -#define GEN11_GT_INTR_DW1 _MMIO(0x19001c) -#define GEN11_VECS(x) (31 - (x)) -#define GEN11_VCS(x) (x) - -#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4)) - -#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060) -#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064) -#define GEN11_INTR_DATA_VALID (1 << 31) -#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16) -#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20) -#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff) -/* irq instances for OTHER_CLASS */ -#define OTHER_GUC_INSTANCE 0 -#define OTHER_GTPM_INSTANCE 1 -#define OTHER_KCR_INSTANCE 4 - -#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4)) - -#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070) -#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074) - -#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) - -#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030) -#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034) -#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038) -#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c) -#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040) -#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044) - -#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090) -#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0) -#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8) -#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac) -#define GEN12_VCS4_VCS5_INTR_MASK _MMIO(0x1900b0) -#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4) -#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0) -#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4) -#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8) -#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec) -#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0) -#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4) - -#define ENGINE1_MASK REG_GENMASK(31, 16) -#define ENGINE0_MASK REG_GENMASK(15, 0) - #define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) /* Required on all Ironlake and Sandybridge according to the B-Spec. */ #define ILK_ELPIN_409_SELECT (1 << 25) @@ -8413,11 +5912,14 @@ enum { #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) #define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) -#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) -#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) -#define ICL_DELAY_PMRSP REG_BIT(22) -#define DISABLE_FLR_SRC REG_BIT(15) -#define MASK_WAKEMEM REG_BIT(13) +#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) +#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) +#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25) +#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24) +#define LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23) +#define ICL_DELAY_PMRSP REG_BIT(22) +#define DISABLE_FLR_SRC REG_BIT(15) +#define MASK_WAKEMEM REG_BIT(13) #define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434) #define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27) @@ -8446,142 +5948,6 @@ enum { #define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29) #define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29) -#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) -#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14) - -#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) -#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8) -#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10) - -#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) -#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1) -#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) -#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11) - -#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC) -#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0) - -#define GEN8_CS_CHICKEN1 _MMIO(0x2580) -#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0) -#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) -#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0) -#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1) -#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0) -#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1) - -/* GEN7 chicken */ -#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) - #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10) - #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14) - -#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) - #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13) - #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12) - #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8) - #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0) - -#define GEN8_L3CNTLREG _MMIO(0x7034) - #define GEN8_ERRDETBCTRL (1 << 9) - -#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) -#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) -#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12) -#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) -#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) - -#define HIZ_CHICKEN _MMIO(0x7018) -# define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15) -# define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14) -# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3) - -#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) -#define DISABLE_PIXEL_MASK_CAMMING (1 << 14) - -#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) -#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) - -#define GEN7_SARCHKMD _MMIO(0xB000) -#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31) -#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30) - -#define GEN7_L3SQCREG1 _MMIO(0xB010) -#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 - -#define GEN8_L3SQCREG1 _MMIO(0xB100) -/* - * Note that on CHV the following has an off-by-one error wrt. to BSpec. - * Using the formula in BSpec leads to a hang, while the formula here works - * fine and matches the formulas for all other platforms. A BSpec change - * request has been filed to clarify this. - */ -#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) -#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) -#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14)) - -#define GEN7_L3CNTLREG1 _MMIO(0xB01C) -#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C -#define GEN7_L3AGDIS (1 << 19) -#define GEN7_L3CNTLREG2 _MMIO(0xB020) -#define GEN7_L3CNTLREG3 _MMIO(0xB024) - -#define GEN7_L3_CHICKEN_MODE_REGISTER _MMIO(0xB030) -#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 -#define GEN10_L3_CHICKEN_MODE_REGISTER _MMIO(0xB114) -#define GEN11_I2M_WRITE_DISABLE (1 << 28) - -#define GEN7_L3SQCREG4 _MMIO(0xb034) -#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27) - -#define GEN11_SCRATCH2 _MMIO(0xb140) -#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19) - -#define GEN8_L3SQCREG4 _MMIO(0xb118) -#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6) -#define GEN8_LQSC_RO_PERF_DIS (1 << 27) -#define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21) -#define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22) - -#define GEN11_L3SQCREG5 _MMIO(0xb158) -#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) - -#define XEHP_L3SCQREG7 _MMIO(0xb188) -#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) - -/* GEN8 chicken */ -#define HDC_CHICKEN0 _MMIO(0x7300) -#define ICL_HDC_MODE _MMIO(0xE5F4) -#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15) -#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14) -#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11) -#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5) -#define HDC_FORCE_NON_COHERENT (1 << 4) -#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10) - -#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0) -#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11) - -#define SARB_CHICKEN1 _MMIO(0xe90c) -#define COMP_CKN_IN REG_GENMASK(30, 29) - -#define GEN8_HDC_CHICKEN1 _MMIO(0x7304) - -/* GEN9 chicken */ -#define SLICE_ECO_CHICKEN0 _MMIO(0x7308) -#define PIXEL_MASK_CAMMING_DISABLE (1 << 14) - -#define GEN9_WM_CHICKEN3 _MMIO(0x5588) -#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9) - -/* WaCatErrorRejectionIssue */ -#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) -#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11) - -#define HSW_SCRATCH1 _MMIO(0xb038) -#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27) - -#define BDW_SCRATCH1 _MMIO(0xb11c) -#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) - /*GEN11 chicken */ #define _PIPEA_CHICKEN 0x70038 #define _PIPEB_CHICKEN 0x71038 @@ -8594,16 +5960,6 @@ enum { #define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12) #define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7) -#define VFLSKPD _MMIO(0x62a8) -#define DIS_OVER_FETCH_CACHE REG_BIT(1) -#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0) - -#define FF_MODE2 _MMIO(0x6604) -#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24) -#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224) -#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16) -#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4) - /* PCH */ #define PCH_DISPLAY_BASE 0xc0000u @@ -8700,6 +6056,7 @@ enum { /* south display engine interrupt: ICP/TGP */ #define SDE_GMBUS_ICP (1 << 23) #define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin)) +#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */ #define SDE_DDI_HOTPLUG_ICP(hpd_pin) REG_BIT(16 + _HPD_PIN_DDI(hpd_pin)) #define SDE_DDI_HOTPLUG_MASK_ICP (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \ SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \ @@ -9018,22 +6375,19 @@ enum { #define _PCH_TRANSBCONF 0xf1008 #define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) #define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */ -#define TRANS_DISABLE (0 << 31) -#define TRANS_ENABLE (1 << 31) -#define TRANS_STATE_MASK (1 << 30) -#define TRANS_STATE_DISABLE (0 << 30) -#define TRANS_STATE_ENABLE (1 << 30) -#define TRANS_FRAME_START_DELAY_MASK (3 << 27) /* ibx */ -#define TRANS_FRAME_START_DELAY(x) ((x) << 27) /* ibx: 0-3 */ -#define TRANS_INTERLACE_MASK (7 << 21) -#define TRANS_PROGRESSIVE (0 << 21) -#define TRANS_INTERLACED (3 << 21) -#define TRANS_LEGACY_INTERLACED_ILK (2 << 21) -#define TRANS_8BPC (0 << 5) -#define TRANS_10BPC (1 << 5) -#define TRANS_6BPC (2 << 5) -#define TRANS_12BPC (3 << 5) - +#define TRANS_ENABLE REG_BIT(31) +#define TRANS_STATE_ENABLE REG_BIT(30) +#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */ +#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */ +#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21) +#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0) +#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */ +#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3) +#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */ +#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0) +#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1) +#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2) +#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3) #define _TRANSA_CHICKEN1 0xf0060 #define _TRANSB_CHICKEN1 0xf1060 #define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) @@ -9243,22 +6597,19 @@ enum { #define _TRANS_DP_CTL_B 0xe1300 #define _TRANS_DP_CTL_C 0xe2300 #define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B) -#define TRANS_DP_OUTPUT_ENABLE (1 << 31) -#define TRANS_DP_PORT_SEL_MASK (3 << 29) -#define TRANS_DP_PORT_SEL_NONE (3 << 29) -#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29) -#define TRANS_DP_AUDIO_ONLY (1 << 26) -#define TRANS_DP_ENH_FRAMING (1 << 18) -#define TRANS_DP_8BPC (0 << 9) -#define TRANS_DP_10BPC (1 << 9) -#define TRANS_DP_6BPC (2 << 9) -#define TRANS_DP_12BPC (3 << 9) -#define TRANS_DP_BPC_MASK (3 << 9) -#define TRANS_DP_VSYNC_ACTIVE_HIGH (1 << 4) -#define TRANS_DP_VSYNC_ACTIVE_LOW 0 -#define TRANS_DP_HSYNC_ACTIVE_HIGH (1 << 3) -#define TRANS_DP_HSYNC_ACTIVE_LOW 0 -#define TRANS_DP_SYNC_MASK (3 << 3) +#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31) +#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29) +#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3) +#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B) +#define TRANS_DP_AUDIO_ONLY REG_BIT(26) +#define TRANS_DP_ENH_FRAMING REG_BIT(18) +#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9) +#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0) +#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1) +#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2) +#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3) +#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4) +#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3) #define _TRANS_DP2_CTL_A 0x600a0 #define _TRANS_DP2_CTL_B 0x610a0 @@ -9317,261 +6668,16 @@ enum { #define VLV_PMWGICZ _MMIO(0x1300a4) -#define RC6_LOCATION _MMIO(0xD40) -#define RC6_CTX_IN_DRAM (1 << 0) -#define RC6_CTX_BASE _MMIO(0xD48) -#define RC6_CTX_BASE_MASK 0xFFFFFFF0 -#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054) -#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054) -#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054) -#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054) -#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054) -#define IDLE_TIME_MASK 0xFFFFF -#define FORCEWAKE _MMIO(0xA18C) -#define FORCEWAKE_VLV _MMIO(0x1300b0) -#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4) -#define FORCEWAKE_MEDIA_VLV _MMIO(0x1300b8) -#define FORCEWAKE_ACK_MEDIA_VLV _MMIO(0x1300bc) -#define FORCEWAKE_ACK_HSW _MMIO(0x130044) -#define FORCEWAKE_ACK _MMIO(0x130090) -#define VLV_GTLC_WAKE_CTRL _MMIO(0x130090) -#define VLV_GTLC_RENDER_CTX_EXISTS (1 << 25) -#define VLV_GTLC_MEDIA_CTX_EXISTS (1 << 24) -#define VLV_GTLC_ALLOWWAKEREQ (1 << 0) - -#define VLV_GTLC_PW_STATUS _MMIO(0x130094) -#define VLV_GTLC_ALLOWWAKEACK (1 << 0) -#define VLV_GTLC_ALLOWWAKEERR (1 << 1) -#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) -#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) -#define FORCEWAKE_MT _MMIO(0xa188) /* multi-threaded */ -#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270) -#define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4) -#define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4) -#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278) -#define FORCEWAKE_GT_GEN9 _MMIO(0xa188) -#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88) -#define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4) -#define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4) -#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84) -#define FORCEWAKE_ACK_GT_GEN9 _MMIO(0x130044) -#define FORCEWAKE_KERNEL BIT(0) -#define FORCEWAKE_USER BIT(1) -#define FORCEWAKE_KERNEL_FALLBACK BIT(15) -#define FORCEWAKE_MT_ACK _MMIO(0x130040) -#define ECOBUS _MMIO(0xa180) -#define FORCEWAKE_MT_ENABLE (1 << 5) -#define VLV_SPAREG2H _MMIO(0xA194) -#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0) -#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) -#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) - -#define GTFIFODBG _MMIO(0x120000) -#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) -#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) -#define GT_FIFO_SBDROPERR (1 << 6) -#define GT_FIFO_BLOBDROPERR (1 << 5) -#define GT_FIFO_SB_READ_ABORTERR (1 << 4) -#define GT_FIFO_DROPERR (1 << 3) -#define GT_FIFO_OVFERR (1 << 2) -#define GT_FIFO_IAWRERR (1 << 1) -#define GT_FIFO_IARDERR (1 << 0) - -#define GTFIFOCTL _MMIO(0x120008) -#define GT_FIFO_FREE_ENTRIES_MASK 0x7f -#define GT_FIFO_NUM_RESERVED_ENTRIES 20 -#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) -#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) - -#define HSW_IDICR _MMIO(0x9008) -#define IDIHASHMSK(x) (((x) & 0x3f) << 16) #define HSW_EDRAM_CAP _MMIO(0x120010) #define EDRAM_ENABLED 0x1 #define EDRAM_NUM_BANKS(cap) (((cap) >> 1) & 0xf) #define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7) #define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) -#define GEN6_UCGCTL1 _MMIO(0x9400) -# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22) -# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) -# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) -# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) - -#define GEN6_UCGCTL2 _MMIO(0x9404) -# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31) -# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30) -# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22) -# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) -# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) -# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) - -#define GEN6_UCGCTL3 _MMIO(0x9408) -# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20) - -#define GEN7_UCGCTL4 _MMIO(0x940c) -#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25) -#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14) - -#define GEN6_RCGCTL1 _MMIO(0x9410) -#define GEN6_RCGCTL2 _MMIO(0x9414) -#define GEN6_RSTCTL _MMIO(0x9420) - -#define GEN8_UCGCTL6 _MMIO(0x9430) -#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24) -#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14) -#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28) - -#define UNSLCGCTL9430 _MMIO(0x9430) -#define MSQDUNIT_CLKGATE_DIS REG_BIT(3) - -#define GEN6_GFXPAUSE _MMIO(0xA000) -#define GEN6_RPNSWREQ _MMIO(0xA008) -#define GEN6_TURBO_DISABLE (1 << 31) -#define GEN6_FREQUENCY(x) ((x) << 25) -#define HSW_FREQUENCY(x) ((x) << 24) -#define GEN9_FREQUENCY(x) ((x) << 23) -#define GEN6_OFFSET(x) ((x) << 19) -#define GEN6_AGGRESSIVE_TURBO (0 << 15) -#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23 -#define GEN9_IGNORE_SLICE_RATIO (0 << 0) - -#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C) -#define GEN6_RC_CONTROL _MMIO(0xA090) -#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16) -#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17) -#define GEN6_RC_CTL_RC6_ENABLE (1 << 18) -#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20) -#define GEN6_RC_CTL_RC7_ENABLE (1 << 22) -#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24) -#define GEN7_RC_CTL_TO_MODE (1 << 28) -#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27) -#define GEN6_RC_CTL_HW_ENABLE (1 << 31) -#define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010) -#define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014) -#define GEN6_RPSTAT1 _MMIO(0xA01C) -#define GEN6_CAGF_SHIFT 8 -#define HSW_CAGF_SHIFT 7 -#define GEN9_CAGF_SHIFT 23 -#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) -#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) -#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) -#define GEN6_RP_CONTROL _MMIO(0xA024) -#define GEN6_RP_MEDIA_TURBO (1 << 11) -#define GEN6_RP_MEDIA_MODE_MASK (3 << 9) -#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9) -#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9) -#define GEN6_RP_MEDIA_HW_MODE (1 << 9) -#define GEN6_RP_MEDIA_SW_MODE (0 << 9) -#define GEN6_RP_MEDIA_IS_GFX (1 << 8) -#define GEN6_RP_ENABLE (1 << 7) -#define GEN6_RP_UP_IDLE_MIN (0x1 << 3) -#define GEN6_RP_UP_BUSY_AVG (0x2 << 3) -#define GEN6_RP_UP_BUSY_CONT (0x4 << 3) -#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0) -#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0) -#define GEN6_RPSWCTL_SHIFT 9 -#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT) -#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT) -#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C) -#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030) -#define GEN6_RP_CUR_UP_EI _MMIO(0xA050) -#define GEN6_RP_EI_MASK 0xffffff -#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK -#define GEN6_RP_CUR_UP _MMIO(0xA054) -#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK -#define GEN6_RP_PREV_UP _MMIO(0xA058) -#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C) -#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK -#define GEN6_RP_CUR_DOWN _MMIO(0xA060) -#define GEN6_RP_PREV_DOWN _MMIO(0xA064) -#define GEN6_RP_UP_EI _MMIO(0xA068) -#define GEN6_RP_DOWN_EI _MMIO(0xA06C) -#define GEN6_RP_IDLE_HYSTERSIS _MMIO(0xA070) -#define GEN6_RPDEUHWTC _MMIO(0xA080) -#define GEN6_RPDEUC _MMIO(0xA084) -#define GEN6_RPDEUCSW _MMIO(0xA088) -#define GEN6_RC_STATE _MMIO(0xA094) -#define RC_SW_TARGET_STATE_SHIFT 16 -#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT) -#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) -#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) -#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) -#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xA0A0) -#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8) -#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC) -#define GEN6_RC_SLEEP _MMIO(0xA0B0) -#define GEN6_RCUBMABDTMR _MMIO(0xA0B0) -#define GEN6_RC1e_THRESHOLD _MMIO(0xA0B4) -#define GEN6_RC6_THRESHOLD _MMIO(0xA0B8) -#define GEN6_RC6p_THRESHOLD _MMIO(0xA0BC) -#define VLV_RCEDATA _MMIO(0xA0BC) -#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) -#define GEN6_PMINTRMSK _MMIO(0xA168) -#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31) -#define ARAT_EXPIRED_INTRMSK (1 << 9) -#define GEN8_MISC_CTRL0 _MMIO(0xA180) -#define VLV_PWRDWNUPCTL _MMIO(0xA294) -#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) -#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) -#define GEN9_PG_ENABLE _MMIO(0xA210) -#define GEN9_RENDER_PG_ENABLE REG_BIT(0) -#define GEN9_MEDIA_PG_ENABLE REG_BIT(1) -#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2) -#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n)) -#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n)) -#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248) -#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250) -#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C) - #define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C) #define PIXEL_OVERLAP_CNT_MASK (3 << 30) #define PIXEL_OVERLAP_CNT_SHIFT 30 -#define GEN6_PMISR _MMIO(0x44020) -#define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */ -#define GEN6_PMIIR _MMIO(0x44028) -#define GEN6_PMIER _MMIO(0x4402C) -#define GEN6_PM_MBOX_EVENT (1 << 25) -#define GEN6_PM_THERMAL_EVENT (1 << 24) - -/* - * For Gen11 these are in the upper word of the GPM_WGBOXPERF - * registers. Shifting is handled on accessing the imr and ier. - */ -#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6) -#define GEN6_PM_RP_UP_THRESHOLD (1 << 5) -#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4) -#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2) -#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1) -#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \ - GEN6_PM_RP_UP_THRESHOLD | \ - GEN6_PM_RP_DOWN_EI_EXPIRED | \ - GEN6_PM_RP_DOWN_THRESHOLD | \ - GEN6_PM_RP_DOWN_TIMEOUT) - -#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4) -#define GEN7_GT_SCRATCH_REG_NUM 8 - -#define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098) -#define VLV_GFX_CLK_STATUS_BIT (1 << 3) -#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2) - -#define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104) -#define VLV_COUNTER_CONTROL _MMIO(0x138104) -#define VLV_COUNT_RANGE_HIGH (1 << 15) -#define VLV_MEDIA_RC0_COUNT_EN (1 << 5) -#define VLV_RENDER_RC0_COUNT_EN (1 << 4) -#define VLV_MEDIA_RC6_COUNT_EN (1 << 1) -#define VLV_RENDER_RC6_COUNT_EN (1 << 0) -#define GEN6_GT_GFX_RC6 _MMIO(0x138108) -#define VLV_GT_RENDER_RC6 _MMIO(0x138108) -#define VLV_GT_MEDIA_RC6 _MMIO(0x13810C) - -#define GEN6_GT_GFX_RC6p _MMIO(0x13810C) -#define GEN6_GT_GFX_RC6pp _MMIO(0x138110) -#define VLV_RENDER_C0_COUNT _MMIO(0x138118) -#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C) - #define GEN6_PCODE_MAILBOX _MMIO(0x138124) #define GEN6_PCODE_READY (1 << 31) #define GEN6_PCODE_ERROR_MASK 0xFF @@ -9638,82 +6744,6 @@ enum { #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 #define GEN6_PCODE_DATA1 _MMIO(0x13812C) -#define GEN6_GT_CORE_STATUS _MMIO(0x138060) -#define GEN6_CORE_CPD_STATE_MASK (7 << 4) -#define GEN6_RCn_MASK 7 -#define GEN6_RC0 0 -#define GEN6_RC3 2 -#define GEN6_RC6 3 -#define GEN6_RC7 4 - -#define GEN8_GT_SLICE_INFO _MMIO(0x138064) -#define GEN8_LSLICESTAT_MASK 0x7 - -#define CHV_POWER_SS0_SIG1 _MMIO(0xa720) -#define CHV_POWER_SS1_SIG1 _MMIO(0xa728) -#define CHV_SS_PG_ENABLE (1 << 1) -#define CHV_EU08_PG_ENABLE (1 << 9) -#define CHV_EU19_PG_ENABLE (1 << 17) -#define CHV_EU210_PG_ENABLE (1 << 25) - -#define CHV_POWER_SS0_SIG2 _MMIO(0xa724) -#define CHV_POWER_SS1_SIG2 _MMIO(0xa72c) -#define CHV_EU311_PG_ENABLE (1 << 1) - -#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4) -#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \ - ((slice) % 3) * 0x4) -#define GEN9_PGCTL_SLICE_ACK (1 << 0) -#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2)) -#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F) - -#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8) -#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \ - ((slice) % 3) * 0x8) -#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8) -#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \ - ((slice) % 3) * 0x8) -#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) -#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2) -#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4) -#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6) -#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8) -#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10) -#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12) -#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) - -#define GEN7_MISCCPCTL _MMIO(0x9424) -#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0) -#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2) -#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4) -#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6) - -#define GEN8_GARBCNTL _MMIO(0xB004) -#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7) -#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22) -#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0) -#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0) - -#define GEN11_GLBLINVL _MMIO(0xB404) -#define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5) -#define GEN11_BANK_HASH_ADDR_EXCL_BIT0 (1 << 5) - -#define GEN10_DFR_RATIO_EN_AND_CHICKEN _MMIO(0x9550) -#define DFR_DISABLE (1 << 9) - -#define GEN11_GACB_PERF_CTRL _MMIO(0x4B80) -#define GEN11_HASH_CTRL_MASK (0x3 << 12 | 0xf << 0) -#define GEN11_HASH_CTRL_BIT0 (1 << 0) -#define GEN11_HASH_CTRL_BIT4 (1 << 12) - -#define GEN11_LSN_UNSLCVC _MMIO(0xB43C) -#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9) -#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) - -#define GEN10_SAMPLER_MODE _MMIO(0xE18C) -#define ENABLE_SMALLPL REG_BIT(15) -#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5) - /* IVYBRIDGE DPF */ #define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ #define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14) @@ -9728,74 +6758,6 @@ enum { (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) #define GEN7_L3CDERRST1_ENABLE (1 << 7) -#define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4) -#define GEN7_L3LOG_SIZE 0x80 - -#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */ -#define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100) -#define GEN7_MAX_PS_THREAD_DEP (8 << 12) -#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10) -#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4) -#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3) - -#define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188) -#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5) -#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) - -#define GEN8_ROW_CHICKEN _MMIO(0xe4f0) -#define FLOW_CONTROL_ENABLE REG_BIT(15) -#define UGM_BACKUP_MODE REG_BIT(13) -#define MDQ_ARBITRATION_MODE REG_BIT(12) -#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8) -#define STALL_DOP_GATING_DISABLE REG_BIT(5) -#define THROTTLE_12_5 REG_GENMASK(4, 2) -#define DISABLE_EARLY_EOT REG_BIT(1) - -#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) -#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15) -#define GEN12_DISABLE_EARLY_READ REG_BIT(14) -#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12) -#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8) - -#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8) -#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15) -#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4) -#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32) -#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32) -#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32) -#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32) -#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32) - -#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) -#define DOP_CLOCK_GATING_DISABLE (1 << 0) -#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8) -#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1) - -#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) -#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13) -#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11) -#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) -#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) -#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4) - -#define HSW_ROW_CHICKEN3 _MMIO(0xe49c) -#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) - -#define HALF_SLICE_CHICKEN2 _MMIO(0xe180) -#define GEN8_ST_PO_DISABLE (1 << 13) - -#define HALF_SLICE_CHICKEN3 _MMIO(0xe184) -#define HSW_SAMPLE_C_PERFORMANCE (1 << 9) -#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8) -#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5) -#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1) - -#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) -#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15) -#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8) -#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4) -#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2) - /* Audio */ #define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020) #define INTEL_AUDIO_DEVCL 0x808629FB @@ -10844,149 +7806,6 @@ enum skl_power_gate { PORTTC1_PLL_ENABLE, \ PORTTC2_PLL_ENABLE) -#define _MG_REFCLKIN_CTL_PORT1 0x16892C -#define _MG_REFCLKIN_CTL_PORT2 0x16992C -#define _MG_REFCLKIN_CTL_PORT3 0x16A92C -#define _MG_REFCLKIN_CTL_PORT4 0x16B92C -#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) -#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) -#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \ - _MG_REFCLKIN_CTL_PORT1, \ - _MG_REFCLKIN_CTL_PORT2) - -#define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 -#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) -#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) -#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) -#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) -#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \ - _MG_CLKTOP2_CORECLKCTL1_PORT1, \ - _MG_CLKTOP2_CORECLKCTL1_PORT2) - -#define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 -#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) -#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) -#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) -#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12) -#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) -#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8 -#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) -#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \ - _MG_CLKTOP2_HSCLKCTL_PORT1, \ - _MG_CLKTOP2_HSCLKCTL_PORT2) - -#define _MG_PLL_DIV0_PORT1 0x168A00 -#define _MG_PLL_DIV0_PORT2 0x169A00 -#define _MG_PLL_DIV0_PORT3 0x16AA00 -#define _MG_PLL_DIV0_PORT4 0x16BA00 -#define MG_PLL_DIV0_FRACNEN_H (1 << 30) -#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8) -#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8 -#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8) -#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0) -#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0) -#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \ - _MG_PLL_DIV0_PORT2) - -#define _MG_PLL_DIV1_PORT1 0x168A04 -#define _MG_PLL_DIV1_PORT2 0x169A04 -#define _MG_PLL_DIV1_PORT3 0x16AA04 -#define _MG_PLL_DIV1_PORT4 0x16BA04 -#define MG_PLL_DIV1_IREF_NDIVRATIO(x) ((x) << 16) -#define MG_PLL_DIV1_DITHER_DIV_1 (0 << 12) -#define MG_PLL_DIV1_DITHER_DIV_2 (1 << 12) -#define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12) -#define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12) -#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4) -#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0) -#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0) -#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \ - _MG_PLL_DIV1_PORT2) - -#define _MG_PLL_LF_PORT1 0x168A08 -#define _MG_PLL_LF_PORT2 0x169A08 -#define _MG_PLL_LF_PORT3 0x16AA08 -#define _MG_PLL_LF_PORT4 0x16BA08 -#define MG_PLL_LF_TDCTARGETCNT(x) ((x) << 24) -#define MG_PLL_LF_AFCCNTSEL_256 (0 << 20) -#define MG_PLL_LF_AFCCNTSEL_512 (1 << 20) -#define MG_PLL_LF_GAINCTRL(x) ((x) << 16) -#define MG_PLL_LF_INT_COEFF(x) ((x) << 8) -#define MG_PLL_LF_PROP_COEFF(x) ((x) << 0) -#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \ - _MG_PLL_LF_PORT2) - -#define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C -#define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C -#define _MG_PLL_FRAC_LOCK_PORT3 0x16AA0C -#define _MG_PLL_FRAC_LOCK_PORT4 0x16BA0C -#define MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 (1 << 18) -#define MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 (1 << 16) -#define MG_PLL_FRAC_LOCK_LOCKTHRESH(x) ((x) << 11) -#define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10) -#define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8) -#define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0) -#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \ - _MG_PLL_FRAC_LOCK_PORT1, \ - _MG_PLL_FRAC_LOCK_PORT2) - -#define _MG_PLL_SSC_PORT1 0x168A10 -#define _MG_PLL_SSC_PORT2 0x169A10 -#define _MG_PLL_SSC_PORT3 0x16AA10 -#define _MG_PLL_SSC_PORT4 0x16BA10 -#define MG_PLL_SSC_EN (1 << 28) -#define MG_PLL_SSC_TYPE(x) ((x) << 26) -#define MG_PLL_SSC_STEPLENGTH(x) ((x) << 16) -#define MG_PLL_SSC_STEPNUM(x) ((x) << 10) -#define MG_PLL_SSC_FLLEN (1 << 9) -#define MG_PLL_SSC_STEPSIZE(x) ((x) << 0) -#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \ - _MG_PLL_SSC_PORT2) - -#define _MG_PLL_BIAS_PORT1 0x168A14 -#define _MG_PLL_BIAS_PORT2 0x169A14 -#define _MG_PLL_BIAS_PORT3 0x16AA14 -#define _MG_PLL_BIAS_PORT4 0x16BA14 -#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) -#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) -#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) -#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24) -#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) -#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16) -#define MG_PLL_BIAS_BIASCAL_EN (1 << 15) -#define MG_PLL_BIAS_CTRIM(x) ((x) << 8) -#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8) -#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) -#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) -#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) -#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) -#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \ - _MG_PLL_BIAS_PORT2) - -#define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT3 0x16AA18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT4 0x16BA18 -#define MG_PLL_TDC_COLDST_IREFINT_EN (1 << 27) -#define MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x) ((x) << 17) -#define MG_PLL_TDC_COLDST_COLDSTART (1 << 16) -#define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2) -#define MG_PLL_TDC_TDCSEL(x) ((x) << 0) -#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \ - _MG_PLL_TDC_COLDST_BIAS_PORT1, \ - _MG_PLL_TDC_COLDST_BIAS_PORT2) - #define _ICL_DPLL0_CFGCR0 0x164000 #define _ICL_DPLL1_CFGCR0 0x164080 #define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \ @@ -11043,6 +7862,12 @@ enum skl_power_gate { #define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \ _TGL_DPLL1_CFGCR0) +#define _TGL_DPLL0_DIV0 0x164B00 +#define _TGL_DPLL1_DIV0 0x164C00 +#define TGL_DPLL0_DIV0(pll) _MMIO_PLL(pll, _TGL_DPLL0_DIV0, _TGL_DPLL1_DIV0) +#define TGL_DPLL0_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25) +#define TGL_DPLL0_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(TGL_DPLL0_DIV0_AFC_STARTUP_MASK, (val)) + #define _TGL_DPLL0_CFGCR1 0x164288 #define _TGL_DPLL1_CFGCR1 0x164290 #define _TGL_TBTPLL_CFGCR1 0x1642A0 @@ -11089,7 +7914,15 @@ enum skl_power_gate { #define _DKL_PHY6_BASE 0x16D000 /* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */ +#define _DKL_PCS_DW5 0x14 +#define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PCS_DW5) +#define DKL_PCS_DW5_CORE_SOFTRESET REG_BIT(11) + #define _DKL_PLL_DIV0 0x200 +#define DKL_PLL_DIV0_AFC_STARTUP_MASK REG_GENMASK(27, 25) +#define DKL_PLL_DIV0_AFC_STARTUP(val) REG_FIELD_PREP(DKL_PLL_DIV0_AFC_STARTUP_MASK, (val)) #define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16) #define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16) #define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12) @@ -11099,6 +7932,10 @@ enum skl_power_gate { #define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT) #define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0) #define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0) +#define DKL_PLL_DIV0_MASK (DKL_PLL_DIV0_INTEG_COEFF_MASK | \ + DKL_PLL_DIV0_PROP_COEFF_MASK | \ + DKL_PLL_DIV0_FBPREDIV_MASK | \ + DKL_PLL_DIV0_FBDIV_INT_MASK) #define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ _DKL_PHY2_BASE) + \ _DKL_PLL_DIV0) @@ -11272,93 +8109,7 @@ enum skl_power_gate { #define DC_STATE_DEBUG_MASK_CORES (1 << 0) #define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1) -#define BXT_D_CR_DRP0_DUNIT8 0x1000 -#define BXT_D_CR_DRP0_DUNIT9 0x1200 -#define BXT_D_CR_DRP0_DUNIT_START 8 -#define BXT_D_CR_DRP0_DUNIT_END 11 -#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \ - _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\ - BXT_D_CR_DRP0_DUNIT9)) -#define BXT_DRAM_RANK_MASK 0x3 -#define BXT_DRAM_RANK_SINGLE 0x1 -#define BXT_DRAM_RANK_DUAL 0x3 -#define BXT_DRAM_WIDTH_MASK (0x3 << 4) -#define BXT_DRAM_WIDTH_SHIFT 4 -#define BXT_DRAM_WIDTH_X8 (0x0 << 4) -#define BXT_DRAM_WIDTH_X16 (0x1 << 4) -#define BXT_DRAM_WIDTH_X32 (0x2 << 4) -#define BXT_DRAM_WIDTH_X64 (0x3 << 4) -#define BXT_DRAM_SIZE_MASK (0x7 << 6) -#define BXT_DRAM_SIZE_SHIFT 6 -#define BXT_DRAM_SIZE_4GBIT (0x0 << 6) -#define BXT_DRAM_SIZE_6GBIT (0x1 << 6) -#define BXT_DRAM_SIZE_8GBIT (0x2 << 6) -#define BXT_DRAM_SIZE_12GBIT (0x3 << 6) -#define BXT_DRAM_SIZE_16GBIT (0x4 << 6) -#define BXT_DRAM_TYPE_MASK (0x7 << 22) -#define BXT_DRAM_TYPE_SHIFT 22 -#define BXT_DRAM_TYPE_DDR3 (0x0 << 22) -#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22) -#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22) -#define BXT_DRAM_TYPE_DDR4 (0x4 << 22) - -#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04) -#define DG1_GEAR_TYPE REG_BIT(16) - -#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000) -#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0) -#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0) -#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0) -#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0) -#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0) - -#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C) -#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010) -#define SKL_DRAM_S_SHIFT 16 -#define SKL_DRAM_SIZE_MASK 0x3F -#define SKL_DRAM_WIDTH_MASK (0x3 << 8) -#define SKL_DRAM_WIDTH_SHIFT 8 -#define SKL_DRAM_WIDTH_X8 (0x0 << 8) -#define SKL_DRAM_WIDTH_X16 (0x1 << 8) -#define SKL_DRAM_WIDTH_X32 (0x2 << 8) -#define SKL_DRAM_RANK_MASK (0x1 << 10) -#define SKL_DRAM_RANK_SHIFT 10 -#define SKL_DRAM_RANK_1 (0x0 << 10) -#define SKL_DRAM_RANK_2 (0x1 << 10) -#define SKL_DRAM_RANK_MASK (0x1 << 10) -#define ICL_DRAM_SIZE_MASK 0x7F -#define ICL_DRAM_WIDTH_MASK (0x3 << 7) -#define ICL_DRAM_WIDTH_SHIFT 7 -#define ICL_DRAM_WIDTH_X8 (0x0 << 7) -#define ICL_DRAM_WIDTH_X16 (0x1 << 7) -#define ICL_DRAM_WIDTH_X32 (0x2 << 7) -#define ICL_DRAM_RANK_MASK (0x3 << 9) -#define ICL_DRAM_RANK_SHIFT 9 -#define ICL_DRAM_RANK_1 (0x0 << 9) -#define ICL_DRAM_RANK_2 (0x1 << 9) -#define ICL_DRAM_RANK_3 (0x2 << 9) -#define ICL_DRAM_RANK_4 (0x3 << 9) - -#define SA_PERF_STATUS_0_0_0_MCHBAR_PC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918) -#define DG1_QCLK_RATIO_MASK REG_GENMASK(9, 2) -#define DG1_QCLK_REFERENCE REG_BIT(10) - -#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000) -#define DG1_DRAM_T_RDPRE_MASK REG_GENMASK(16, 11) -#define DG1_DRAM_T_RP_MASK REG_GENMASK(6, 0) -#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004) -#define DG1_DRAM_T_RCD_MASK REG_GENMASK(15, 9) -#define DG1_DRAM_T_RAS_MASK REG_GENMASK(8, 1) - -/* - * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, - * since on HSW we can't write to it using intel_uncore_write. - */ -#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C) #define D_COMP_BDW _MMIO(0x138144) -#define D_COMP_RCOMP_IN_PROGRESS (1 << 9) -#define D_COMP_COMP_FORCE (1 << 8) -#define D_COMP_COMP_DISABLE (1 << 0) /* Pipe WM_LINETIME - watermark line time */ #define _WM_LINETIME_A 0x45270 @@ -11648,93 +8399,6 @@ enum skl_power_gate { #define CGM_PIPE_GAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_GAMMA, _CGM_PIPE_B_GAMMA) + (i) * 8 + (w) * 4) #define CGM_PIPE_MODE(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_MODE, _CGM_PIPE_B_MODE) -/* MIPI DSI registers */ - -#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ -#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) - -/* Gen11 DSI */ -#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \ - dsi0, dsi1) - -#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) -#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF -#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) -#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF - -#define _ICL_DSI_ESC_CLK_DIV0 0x6b090 -#define _ICL_DSI_ESC_CLK_DIV1 0x6b890 -#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \ - _ICL_DSI_ESC_CLK_DIV0, \ - _ICL_DSI_ESC_CLK_DIV1) -#define _ICL_DPHY_ESC_CLK_DIV0 0x162190 -#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190 -#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \ - _ICL_DPHY_ESC_CLK_DIV0, \ - _ICL_DPHY_ESC_CLK_DIV1) -#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16) -#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16 -#define ICL_ESC_CLK_DIV_MASK 0x1ff -#define ICL_ESC_CLK_DIV_SHIFT 0 -#define DSI_MAX_ESC_CLK 20000 /* in KHz */ - -#define _ADL_MIPIO_REG 0x180 -#define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw)) -#define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16) -#define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16) -#define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f) - -#define _DSI_CMD_FRMCTL_0 0x6b034 -#define _DSI_CMD_FRMCTL_1 0x6b834 -#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \ - _DSI_CMD_FRMCTL_0,\ - _DSI_CMD_FRMCTL_1) -#define DSI_FRAME_UPDATE_REQUEST (1 << 31) -#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29) -#define DSI_NULL_PACKET_ENABLE (1 << 28) -#define DSI_FRAME_IN_PROGRESS (1 << 0) - -#define _DSI_INTR_MASK_REG_0 0x6b070 -#define _DSI_INTR_MASK_REG_1 0x6b870 -#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \ - _DSI_INTR_MASK_REG_0,\ - _DSI_INTR_MASK_REG_1) - -#define _DSI_INTR_IDENT_REG_0 0x6b074 -#define _DSI_INTR_IDENT_REG_1 0x6b874 -#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \ - _DSI_INTR_IDENT_REG_0,\ - _DSI_INTR_IDENT_REG_1) -#define DSI_TE_EVENT (1 << 31) -#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30) -#define DSI_TX_DATA (1 << 29) -#define DSI_ULPS_ENTRY_DONE (1 << 28) -#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27) -#define DSI_HOST_CHKSUM_ERROR (1 << 26) -#define DSI_HOST_MULTI_ECC_ERROR (1 << 25) -#define DSI_HOST_SINGL_ECC_ERROR (1 << 24) -#define DSI_HOST_CONTENTION_DETECTED (1 << 23) -#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22) -#define DSI_HOST_TIMEOUT_ERROR (1 << 21) -#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20) -#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19) -#define DSI_FRAME_UPDATE_DONE (1 << 16) -#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15) -#define DSI_INVALID_TX_LENGTH (1 << 13) -#define DSI_INVALID_VC (1 << 12) -#define DSI_INVALID_DATA_TYPE (1 << 11) -#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10) -#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9) -#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8) -#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7) -#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6) -#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5) -#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4) -#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3) -#define DSI_EOT_SYNC_ERROR (1 << 2) -#define DSI_SOT_SYNC_ERROR (1 << 1) -#define DSI_SOT_ERROR (1 << 0) - /* Gen4+ Timestamp and Pipe Frame time stamp registers */ #define GEN4_TIMESTAMP _MMIO(0x2358) #define ILK_TIMESTAMP_HI _MMIO(0x70070) @@ -11750,143 +8414,6 @@ enum skl_power_gate { #define PIPE_FRMTMSTMP(pipe) \ _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A) -/* BXT MIPI clock controls */ -#define BXT_MAX_VAR_OUTPUT_KHZ 39500 - -#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090) -#define BXT_MIPI1_DIV_SHIFT 26 -#define BXT_MIPI2_DIV_SHIFT 10 -#define BXT_MIPI_DIV_SHIFT(port) \ - _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \ - BXT_MIPI2_DIV_SHIFT) - -/* TX control divider to select actual TX clock output from (8x/var) */ -#define BXT_MIPI1_TX_ESCLK_SHIFT 26 -#define BXT_MIPI2_TX_ESCLK_SHIFT 10 -#define BXT_MIPI_TX_ESCLK_SHIFT(port) \ - _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \ - BXT_MIPI2_TX_ESCLK_SHIFT) -#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26) -#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10) -#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \ - _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \ - BXT_MIPI2_TX_ESCLK_FIXDIV_MASK) -#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \ - (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port)) -/* RX upper control divider to select actual RX clock output from 8x */ -#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21 -#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5 -#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \ - _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \ - BXT_MIPI2_RX_ESCLK_UPPER_SHIFT) -#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21) -#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5) -#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \ - _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \ - BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK) -#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \ - (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port)) -/* 8/3X divider to select the actual 8/3X clock output from 8x */ -#define BXT_MIPI1_8X_BY3_SHIFT 19 -#define BXT_MIPI2_8X_BY3_SHIFT 3 -#define BXT_MIPI_8X_BY3_SHIFT(port) \ - _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \ - BXT_MIPI2_8X_BY3_SHIFT) -#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19) -#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3) -#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \ - _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \ - BXT_MIPI2_8X_BY3_DIVIDER_MASK) -#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \ - (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port)) -/* RX lower control divider to select actual RX clock output from 8x */ -#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16 -#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0 -#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \ - _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \ - BXT_MIPI2_RX_ESCLK_LOWER_SHIFT) -#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16) -#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0) -#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \ - _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \ - BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK) -#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \ - (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port)) - -#define RX_DIVIDER_BIT_1_2 0x3 -#define RX_DIVIDER_BIT_3_4 0xC - -/* BXT MIPI mode configure */ -#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 -#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8 -#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \ - _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE) - -#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC -#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC -#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \ - _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE) - -#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100 -#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900 -#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \ - _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL) - -#define BXT_DSI_PLL_CTL _MMIO(0x161000) -#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16 -#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT) -#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT) -#define BXT_DSIC_16X_BY1 (0 << 10) -#define BXT_DSIC_16X_BY2 (1 << 10) -#define BXT_DSIC_16X_BY3 (2 << 10) -#define BXT_DSIC_16X_BY4 (3 << 10) -#define BXT_DSIC_16X_MASK (3 << 10) -#define BXT_DSIA_16X_BY1 (0 << 8) -#define BXT_DSIA_16X_BY2 (1 << 8) -#define BXT_DSIA_16X_BY3 (2 << 8) -#define BXT_DSIA_16X_BY4 (3 << 8) -#define BXT_DSIA_16X_MASK (3 << 8) -#define BXT_DSI_FREQ_SEL_SHIFT 8 -#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT) - -#define BXT_DSI_PLL_RATIO_MAX 0x7D -#define BXT_DSI_PLL_RATIO_MIN 0x22 -#define GLK_DSI_PLL_RATIO_MAX 0x6F -#define GLK_DSI_PLL_RATIO_MIN 0x22 -#define BXT_DSI_PLL_RATIO_MASK 0xFF -#define BXT_REF_CLOCK_KHZ 19200 - -#define BXT_DSI_PLL_ENABLE _MMIO(0x46080) -#define BXT_DSI_PLL_DO_ENABLE (1 << 31) -#define BXT_DSI_PLL_LOCKED (1 << 30) - -#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) -#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) -#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) - - /* BXT port control */ -#define _BXT_MIPIA_PORT_CTRL 0x6B0C0 -#define _BXT_MIPIC_PORT_CTRL 0x6B8C0 -#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) - -/* ICL DSI MODE control */ -#define _ICL_DSI_IO_MODECTL_0 0x6B094 -#define _ICL_DSI_IO_MODECTL_1 0x6B894 -#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \ - _ICL_DSI_IO_MODECTL_0, \ - _ICL_DSI_IO_MODECTL_1) -#define COMBO_PHY_MODE_DSI (1 << 0) - -/* TGL DSI Chicken register */ -#define _TGL_DSI_CHKN_REG_0 0x6B0C0 -#define _TGL_DSI_CHKN_REG_1 0x6B8C0 -#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ - _TGL_DSI_CHKN_REG_0, \ - _TGL_DSI_CHKN_REG_1) -#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12) -#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \ - (byte_clocks)) - /* Display Stream Splitter Control */ #define DSS_CTL1 _MMIO(0x67400) #define SPLITTER_ENABLE (1 << 31) @@ -11925,718 +8452,6 @@ enum skl_power_gate { _ICL_PIPE_DSS_CTL2_PB, \ _ICL_PIPE_DSS_CTL2_PC) -#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) -#define STAP_SELECT (1 << 0) - -#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054) -#define HS_IO_CTRL_SELECT (1 << 0) - -#define DPI_ENABLE (1 << 31) /* A + C */ -#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 -#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) -#define DUAL_LINK_MODE_SHIFT 26 -#define DUAL_LINK_MODE_MASK (1 << 26) -#define DUAL_LINK_MODE_FRONT_BACK (0 << 26) -#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26) -#define DITHERING_ENABLE (1 << 25) /* A + C */ -#define FLOPPED_HSTX (1 << 23) -#define DE_INVERT (1 << 19) /* XXX */ -#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18 -#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18) -#define AFE_LATCHOUT (1 << 17) -#define LP_OUTPUT_HOLD (1 << 16) -#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15 -#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15) -#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11 -#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11) -#define CSB_SHIFT 9 -#define CSB_MASK (3 << 9) -#define CSB_20MHZ (0 << 9) -#define CSB_10MHZ (1 << 9) -#define CSB_40MHZ (2 << 9) -#define BANDGAP_MASK (1 << 8) -#define BANDGAP_PNW_CIRCUIT (0 << 8) -#define BANDGAP_LNC_CIRCUIT (1 << 8) -#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5 -#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5) -#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */ -#define TEARING_EFFECT_SHIFT 2 /* A + C */ -#define TEARING_EFFECT_MASK (3 << 2) -#define TEARING_EFFECT_OFF (0 << 2) -#define TEARING_EFFECT_DSI (1 << 2) -#define TEARING_EFFECT_GPIO (2 << 2) -#define LANE_CONFIGURATION_SHIFT 0 -#define LANE_CONFIGURATION_MASK (3 << 0) -#define LANE_CONFIGURATION_4LANE (0 << 0) -#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0) -#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) - -#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) -#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) -#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL) -#define TEARING_EFFECT_DELAY_SHIFT 0 -#define TEARING_EFFECT_DELAY_MASK (0xffff << 0) - -/* XXX: all bits reserved */ -#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) - -/* MIPI DSI Controller and D-PHY registers */ - -#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) -#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) -#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY) -#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ -#define ULPS_STATE_MASK (3 << 1) -#define ULPS_STATE_ENTER (2 << 1) -#define ULPS_STATE_EXIT (1 << 1) -#define ULPS_STATE_NORMAL_OPERATION (0 << 1) -#define DEVICE_READY (1 << 0) - -#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) -#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) -#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT) -#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) -#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) -#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN) -#define TEARING_EFFECT (1 << 31) -#define SPL_PKT_SENT_INTERRUPT (1 << 30) -#define GEN_READ_DATA_AVAIL (1 << 29) -#define LP_GENERIC_WR_FIFO_FULL (1 << 28) -#define HS_GENERIC_WR_FIFO_FULL (1 << 27) -#define RX_PROT_VIOLATION (1 << 26) -#define RX_INVALID_TX_LENGTH (1 << 25) -#define ACK_WITH_NO_ERROR (1 << 24) -#define TURN_AROUND_ACK_TIMEOUT (1 << 23) -#define LP_RX_TIMEOUT (1 << 22) -#define HS_TX_TIMEOUT (1 << 21) -#define DPI_FIFO_UNDERRUN (1 << 20) -#define LOW_CONTENTION (1 << 19) -#define HIGH_CONTENTION (1 << 18) -#define TXDSI_VC_ID_INVALID (1 << 17) -#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16) -#define TXCHECKSUM_ERROR (1 << 15) -#define TXECC_MULTIBIT_ERROR (1 << 14) -#define TXECC_SINGLE_BIT_ERROR (1 << 13) -#define TXFALSE_CONTROL_ERROR (1 << 12) -#define RXDSI_VC_ID_INVALID (1 << 11) -#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10) -#define RXCHECKSUM_ERROR (1 << 9) -#define RXECC_MULTIBIT_ERROR (1 << 8) -#define RXECC_SINGLE_BIT_ERROR (1 << 7) -#define RXFALSE_CONTROL_ERROR (1 << 6) -#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5) -#define RX_LP_TX_SYNC_ERROR (1 << 4) -#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3) -#define RXEOT_SYNC_ERROR (1 << 2) -#define RXSOT_SYNC_ERROR (1 << 1) -#define RXSOT_ERROR (1 << 0) - -#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) -#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) -#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG) -#define CMD_MODE_DATA_WIDTH_MASK (7 << 13) -#define CMD_MODE_NOT_SUPPORTED (0 << 13) -#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) -#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13) -#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13) -#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13) -#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13) -#define VID_MODE_FORMAT_MASK (0xf << 7) -#define VID_MODE_NOT_SUPPORTED (0 << 7) -#define VID_MODE_FORMAT_RGB565 (1 << 7) -#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7) -#define VID_MODE_FORMAT_RGB666 (3 << 7) -#define VID_MODE_FORMAT_RGB888 (4 << 7) -#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5 -#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5) -#define VID_MODE_CHANNEL_NUMBER_SHIFT 3 -#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3) -#define DATA_LANES_PRG_REG_SHIFT 0 -#define DATA_LANES_PRG_REG_MASK (7 << 0) - -#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) -#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) -#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT) -#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff - -#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) -#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) -#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT) -#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff - -#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) -#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) -#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) -#define TURN_AROUND_TIMEOUT_MASK 0x3f - -#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) -#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) -#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) -#define DEVICE_RESET_TIMER_MASK 0xffff - -#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) -#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) -#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION) -#define VERTICAL_ADDRESS_SHIFT 16 -#define VERTICAL_ADDRESS_MASK (0xffff << 16) -#define HORIZONTAL_ADDRESS_SHIFT 0 -#define HORIZONTAL_ADDRESS_MASK 0xffff - -#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) -#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) -#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) -#define DBI_FIFO_EMPTY_HALF (0 << 0) -#define DBI_FIFO_EMPTY_QUARTER (1 << 0) -#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) - -/* regs below are bits 15:0 */ -#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) -#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) -#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) - -#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) -#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) -#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT) - -#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) -#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) -#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT) - -#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) -#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) -#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) - -#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) -#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) -#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) - -#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) -#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) -#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT) - -#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) -#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) -#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT) - -#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) -#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) -#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) - -/* regs above are bits 15:0 */ - -#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) -#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) -#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL) -#define DPI_LP_MODE (1 << 6) -#define BACKLIGHT_OFF (1 << 5) -#define BACKLIGHT_ON (1 << 4) -#define COLOR_MODE_OFF (1 << 3) -#define COLOR_MODE_ON (1 << 2) -#define TURN_ON (1 << 1) -#define SHUTDOWN (1 << 0) - -#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) -#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) -#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA) -#define COMMAND_BYTE_SHIFT 0 -#define COMMAND_BYTE_MASK (0x3f << 0) - -#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) -#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) -#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT) -#define MASTER_INIT_TIMER_SHIFT 0 -#define MASTER_INIT_TIMER_MASK (0xffff << 0) - -#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) -#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) -#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \ - _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) -#define MAX_RETURN_PKT_SIZE_SHIFT 0 -#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) - -#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) -#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) -#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) -#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) -#define DISABLE_VIDEO_BTA (1 << 3) -#define IP_TG_CONFIG (1 << 2) -#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0) -#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) -#define VIDEO_MODE_BURST (3 << 0) - -#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) -#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) -#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) -#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) -#define BXT_DPHY_DEFEATURE_EN (1 << 8) -#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) -#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) -#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) -#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4) -#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3) -#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2) -#define CLOCKSTOP (1 << 1) -#define EOT_DISABLE (1 << 0) - -#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) -#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) -#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK) -#define LP_BYTECLK_SHIFT 0 -#define LP_BYTECLK_MASK (0xffff << 0) - -#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4) -#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4) -#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT) - -#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098) -#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898) -#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING) - -/* bits 31:0 */ -#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) -#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) -#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA) - -/* bits 31:0 */ -#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) -#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) -#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA) - -#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) -#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) -#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL) -#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) -#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) -#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL) -#define LONG_PACKET_WORD_COUNT_SHIFT 8 -#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) -#define SHORT_PACKET_PARAM_SHIFT 8 -#define SHORT_PACKET_PARAM_MASK (0xffff << 8) -#define VIRTUAL_CHANNEL_SHIFT 6 -#define VIRTUAL_CHANNEL_MASK (3 << 6) -#define DATA_TYPE_SHIFT 0 -#define DATA_TYPE_MASK (0x3f << 0) -/* data type values, see include/video/mipi_display.h */ - -#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) -#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) -#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT) -#define DPI_FIFO_EMPTY (1 << 28) -#define DBI_FIFO_EMPTY (1 << 27) -#define LP_CTRL_FIFO_EMPTY (1 << 26) -#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25) -#define LP_CTRL_FIFO_FULL (1 << 24) -#define HS_CTRL_FIFO_EMPTY (1 << 18) -#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17) -#define HS_CTRL_FIFO_FULL (1 << 16) -#define LP_DATA_FIFO_EMPTY (1 << 10) -#define LP_DATA_FIFO_HALF_EMPTY (1 << 9) -#define LP_DATA_FIFO_FULL (1 << 8) -#define HS_DATA_FIFO_EMPTY (1 << 2) -#define HS_DATA_FIFO_HALF_EMPTY (1 << 1) -#define HS_DATA_FIFO_FULL (1 << 0) - -#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) -#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) -#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) -#define DBI_HS_LP_MODE_MASK (1 << 0) -#define DBI_LP_MODE (1 << 0) -#define DBI_HS_MODE (0 << 0) - -#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) -#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) -#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM) -#define EXIT_ZERO_COUNT_SHIFT 24 -#define EXIT_ZERO_COUNT_MASK (0x3f << 24) -#define TRAIL_COUNT_SHIFT 16 -#define TRAIL_COUNT_MASK (0x1f << 16) -#define CLK_ZERO_COUNT_SHIFT 8 -#define CLK_ZERO_COUNT_MASK (0xff << 8) -#define PREPARE_COUNT_SHIFT 0 -#define PREPARE_COUNT_MASK (0x3f << 0) - -#define _ICL_DSI_T_INIT_MASTER_0 0x6b088 -#define _ICL_DSI_T_INIT_MASTER_1 0x6b888 -#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \ - _ICL_DSI_T_INIT_MASTER_0,\ - _ICL_DSI_T_INIT_MASTER_1) - -#define _DPHY_CLK_TIMING_PARAM_0 0x162180 -#define _DPHY_CLK_TIMING_PARAM_1 0x6c180 -#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ - _DPHY_CLK_TIMING_PARAM_0,\ - _DPHY_CLK_TIMING_PARAM_1) -#define _DSI_CLK_TIMING_PARAM_0 0x6b080 -#define _DSI_CLK_TIMING_PARAM_1 0x6b880 -#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ - _DSI_CLK_TIMING_PARAM_0,\ - _DSI_CLK_TIMING_PARAM_1) -#define CLK_PREPARE_OVERRIDE (1 << 31) -#define CLK_PREPARE(x) ((x) << 28) -#define CLK_PREPARE_MASK (0x7 << 28) -#define CLK_PREPARE_SHIFT 28 -#define CLK_ZERO_OVERRIDE (1 << 27) -#define CLK_ZERO(x) ((x) << 20) -#define CLK_ZERO_MASK (0xf << 20) -#define CLK_ZERO_SHIFT 20 -#define CLK_PRE_OVERRIDE (1 << 19) -#define CLK_PRE(x) ((x) << 16) -#define CLK_PRE_MASK (0x3 << 16) -#define CLK_PRE_SHIFT 16 -#define CLK_POST_OVERRIDE (1 << 15) -#define CLK_POST(x) ((x) << 8) -#define CLK_POST_MASK (0x7 << 8) -#define CLK_POST_SHIFT 8 -#define CLK_TRAIL_OVERRIDE (1 << 7) -#define CLK_TRAIL(x) ((x) << 0) -#define CLK_TRAIL_MASK (0xf << 0) -#define CLK_TRAIL_SHIFT 0 - -#define _DPHY_DATA_TIMING_PARAM_0 0x162184 -#define _DPHY_DATA_TIMING_PARAM_1 0x6c184 -#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ - _DPHY_DATA_TIMING_PARAM_0,\ - _DPHY_DATA_TIMING_PARAM_1) -#define _DSI_DATA_TIMING_PARAM_0 0x6B084 -#define _DSI_DATA_TIMING_PARAM_1 0x6B884 -#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ - _DSI_DATA_TIMING_PARAM_0,\ - _DSI_DATA_TIMING_PARAM_1) -#define HS_PREPARE_OVERRIDE (1 << 31) -#define HS_PREPARE(x) ((x) << 24) -#define HS_PREPARE_MASK (0x7 << 24) -#define HS_PREPARE_SHIFT 24 -#define HS_ZERO_OVERRIDE (1 << 23) -#define HS_ZERO(x) ((x) << 16) -#define HS_ZERO_MASK (0xf << 16) -#define HS_ZERO_SHIFT 16 -#define HS_TRAIL_OVERRIDE (1 << 15) -#define HS_TRAIL(x) ((x) << 8) -#define HS_TRAIL_MASK (0x7 << 8) -#define HS_TRAIL_SHIFT 8 -#define HS_EXIT_OVERRIDE (1 << 7) -#define HS_EXIT(x) ((x) << 0) -#define HS_EXIT_MASK (0x7 << 0) -#define HS_EXIT_SHIFT 0 - -#define _DPHY_TA_TIMING_PARAM_0 0x162188 -#define _DPHY_TA_TIMING_PARAM_1 0x6c188 -#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ - _DPHY_TA_TIMING_PARAM_0,\ - _DPHY_TA_TIMING_PARAM_1) -#define _DSI_TA_TIMING_PARAM_0 0x6b098 -#define _DSI_TA_TIMING_PARAM_1 0x6b898 -#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ - _DSI_TA_TIMING_PARAM_0,\ - _DSI_TA_TIMING_PARAM_1) -#define TA_SURE_OVERRIDE (1 << 31) -#define TA_SURE(x) ((x) << 16) -#define TA_SURE_MASK (0x1f << 16) -#define TA_SURE_SHIFT 16 -#define TA_GO_OVERRIDE (1 << 15) -#define TA_GO(x) ((x) << 8) -#define TA_GO_MASK (0xf << 8) -#define TA_GO_SHIFT 8 -#define TA_GET_OVERRIDE (1 << 7) -#define TA_GET(x) ((x) << 0) -#define TA_GET_MASK (0xf << 0) -#define TA_GET_SHIFT 0 - -/* DSI transcoder configuration */ -#define _DSI_TRANS_FUNC_CONF_0 0x6b030 -#define _DSI_TRANS_FUNC_CONF_1 0x6b830 -#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \ - _DSI_TRANS_FUNC_CONF_0,\ - _DSI_TRANS_FUNC_CONF_1) -#define OP_MODE_MASK (0x3 << 28) -#define OP_MODE_SHIFT 28 -#define CMD_MODE_NO_GATE (0x0 << 28) -#define CMD_MODE_TE_GATE (0x1 << 28) -#define VIDEO_MODE_SYNC_EVENT (0x2 << 28) -#define VIDEO_MODE_SYNC_PULSE (0x3 << 28) -#define TE_SOURCE_GPIO (1 << 27) -#define LINK_READY (1 << 20) -#define PIX_FMT_MASK (0x3 << 16) -#define PIX_FMT_SHIFT 16 -#define PIX_FMT_RGB565 (0x0 << 16) -#define PIX_FMT_RGB666_PACKED (0x1 << 16) -#define PIX_FMT_RGB666_LOOSE (0x2 << 16) -#define PIX_FMT_RGB888 (0x3 << 16) -#define PIX_FMT_RGB101010 (0x4 << 16) -#define PIX_FMT_RGB121212 (0x5 << 16) -#define PIX_FMT_COMPRESSED (0x6 << 16) -#define BGR_TRANSMISSION (1 << 15) -#define PIX_VIRT_CHAN(x) ((x) << 12) -#define PIX_VIRT_CHAN_MASK (0x3 << 12) -#define PIX_VIRT_CHAN_SHIFT 12 -#define PIX_BUF_THRESHOLD_MASK (0x3 << 10) -#define PIX_BUF_THRESHOLD_SHIFT 10 -#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10) -#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10) -#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10) -#define PIX_BUF_THRESHOLD_FULL (0x3 << 10) -#define CONTINUOUS_CLK_MASK (0x3 << 8) -#define CONTINUOUS_CLK_SHIFT 8 -#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8) -#define CLK_HS_OR_LP (0x2 << 8) -#define CLK_HS_CONTINUOUS (0x3 << 8) -#define LINK_CALIBRATION_MASK (0x3 << 4) -#define LINK_CALIBRATION_SHIFT 4 -#define CALIBRATION_DISABLED (0x0 << 4) -#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4) -#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4) -#define BLANKING_PACKET_ENABLE (1 << 2) -#define S3D_ORIENTATION_LANDSCAPE (1 << 1) -#define EOTP_DISABLED (1 << 0) - -#define _DSI_CMD_RXCTL_0 0x6b0d4 -#define _DSI_CMD_RXCTL_1 0x6b8d4 -#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \ - _DSI_CMD_RXCTL_0,\ - _DSI_CMD_RXCTL_1) -#define READ_UNLOADS_DW (1 << 16) -#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15) -#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14) -#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13) -#define RECEIVED_RESET_TRIGGER (1 << 12) -#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11) -#define RECEIVED_CRC_WAS_LOST (1 << 10) -#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0) -#define NUMBER_RX_PLOAD_DW_SHIFT 0 - -#define _DSI_CMD_TXCTL_0 0x6b0d0 -#define _DSI_CMD_TXCTL_1 0x6b8d0 -#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \ - _DSI_CMD_TXCTL_0,\ - _DSI_CMD_TXCTL_1) -#define KEEP_LINK_IN_HS (1 << 24) -#define FREE_HEADER_CREDIT_MASK (0x1f << 8) -#define FREE_HEADER_CREDIT_SHIFT 0x8 -#define FREE_PLOAD_CREDIT_MASK (0xff << 0) -#define FREE_PLOAD_CREDIT_SHIFT 0 -#define MAX_HEADER_CREDIT 0x10 -#define MAX_PLOAD_CREDIT 0x40 - -#define _DSI_CMD_TXHDR_0 0x6b100 -#define _DSI_CMD_TXHDR_1 0x6b900 -#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \ - _DSI_CMD_TXHDR_0,\ - _DSI_CMD_TXHDR_1) -#define PAYLOAD_PRESENT (1 << 31) -#define LP_DATA_TRANSFER (1 << 30) -#define VBLANK_FENCE (1 << 29) -#define PARAM_WC_MASK (0xffff << 8) -#define PARAM_WC_LOWER_SHIFT 8 -#define PARAM_WC_UPPER_SHIFT 16 -#define VC_MASK (0x3 << 6) -#define VC_SHIFT 6 -#define DT_MASK (0x3f << 0) -#define DT_SHIFT 0 - -#define _DSI_CMD_TXPYLD_0 0x6b104 -#define _DSI_CMD_TXPYLD_1 0x6b904 -#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \ - _DSI_CMD_TXPYLD_0,\ - _DSI_CMD_TXPYLD_1) - -#define _DSI_LP_MSG_0 0x6b0d8 -#define _DSI_LP_MSG_1 0x6b8d8 -#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \ - _DSI_LP_MSG_0,\ - _DSI_LP_MSG_1) -#define LPTX_IN_PROGRESS (1 << 17) -#define LINK_IN_ULPS (1 << 16) -#define LINK_ULPS_TYPE_LP11 (1 << 8) -#define LINK_ENTER_ULPS (1 << 0) - -/* DSI timeout registers */ -#define _DSI_HSTX_TO_0 0x6b044 -#define _DSI_HSTX_TO_1 0x6b844 -#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \ - _DSI_HSTX_TO_0,\ - _DSI_HSTX_TO_1) -#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16) -#define HSTX_TIMEOUT_VALUE_SHIFT 16 -#define HSTX_TIMEOUT_VALUE(x) ((x) << 16) -#define HSTX_TIMED_OUT (1 << 0) - -#define _DSI_LPRX_HOST_TO_0 0x6b048 -#define _DSI_LPRX_HOST_TO_1 0x6b848 -#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \ - _DSI_LPRX_HOST_TO_0,\ - _DSI_LPRX_HOST_TO_1) -#define LPRX_TIMED_OUT (1 << 16) -#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0) -#define LPRX_TIMEOUT_VALUE_SHIFT 0 -#define LPRX_TIMEOUT_VALUE(x) ((x) << 0) - -#define _DSI_PWAIT_TO_0 0x6b040 -#define _DSI_PWAIT_TO_1 0x6b840 -#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \ - _DSI_PWAIT_TO_0,\ - _DSI_PWAIT_TO_1) -#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16) -#define PRESET_TIMEOUT_VALUE_SHIFT 16 -#define PRESET_TIMEOUT_VALUE(x) ((x) << 16) -#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0) -#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0 -#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0) - -#define _DSI_TA_TO_0 0x6b04c -#define _DSI_TA_TO_1 0x6b84c -#define DSI_TA_TO(tc) _MMIO_DSI(tc, \ - _DSI_TA_TO_0,\ - _DSI_TA_TO_1) -#define TA_TIMED_OUT (1 << 16) -#define TA_TIMEOUT_VALUE_MASK (0xffff << 0) -#define TA_TIMEOUT_VALUE_SHIFT 0 -#define TA_TIMEOUT_VALUE(x) ((x) << 0) - -/* bits 31:0 */ -#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) -#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) -#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL) - -#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088) -#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888) -#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) -#define LP_HS_SSW_CNT_SHIFT 16 -#define LP_HS_SSW_CNT_MASK (0xffff << 16) -#define HS_LP_PWR_SW_CNT_SHIFT 0 -#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) - -#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) -#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) -#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) -#define STOP_STATE_STALL_COUNTER_SHIFT 0 -#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) - -#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) -#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) -#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) -#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) -#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) -#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1) -#define RX_CONTENTION_DETECTED (1 << 0) - -/* XXX: only pipe A ?!? */ -#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100) -#define DBI_TYPEC_ENABLE (1 << 31) -#define DBI_TYPEC_WIP (1 << 30) -#define DBI_TYPEC_OPTION_SHIFT 28 -#define DBI_TYPEC_OPTION_MASK (3 << 28) -#define DBI_TYPEC_FREQ_SHIFT 24 -#define DBI_TYPEC_FREQ_MASK (0xf << 24) -#define DBI_TYPEC_OVERRIDE (1 << 8) -#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0 -#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0) - - -/* MIPI adapter registers */ - -#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) -#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) -#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL) -#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ -#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) -#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) -#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5) -#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5) -#define READ_REQUEST_PRIORITY_SHIFT 3 -#define READ_REQUEST_PRIORITY_MASK (3 << 3) -#define READ_REQUEST_PRIORITY_LOW (0 << 3) -#define READ_REQUEST_PRIORITY_HIGH (3 << 3) -#define RGB_FLIP_TO_BGR (1 << 2) - -#define BXT_PIPE_SELECT_SHIFT 7 -#define BXT_PIPE_SELECT_MASK (7 << 7) -#define BXT_PIPE_SELECT(pipe) ((pipe) << 7) -#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */ -#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */ -#define GLK_MIPIIO_RESET_RELEASED (1 << 28) -#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */ -#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */ -#define GLK_LP_WAKE (1 << 22) -#define GLK_LP11_LOW_PWR_MODE (1 << 21) -#define GLK_LP00_LOW_PWR_MODE (1 << 20) -#define GLK_FIREWALL_ENABLE (1 << 16) -#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10) -#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10 -#define BXT_DSC_ENABLE (1 << 3) -#define BXT_RGB_FLIP (1 << 2) -#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */ -#define GLK_MIPIIO_ENABLE (1 << 0) - -#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) -#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) -#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS) -#define DATA_MEM_ADDRESS_SHIFT 5 -#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) -#define DATA_VALID (1 << 0) - -#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) -#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) -#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH) -#define DATA_LENGTH_SHIFT 0 -#define DATA_LENGTH_MASK (0xfffff << 0) - -#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) -#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) -#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) -#define COMMAND_MEM_ADDRESS_SHIFT 5 -#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) -#define AUTO_PWG_ENABLE (1 << 2) -#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) -#define COMMAND_VALID (1 << 0) - -#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) -#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) -#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH) -#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ -#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) - -#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) -#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) -#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ - -#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) -#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) -#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) -#define READ_DATA_VALID(n) (1 << (n)) - -/* MOCS (Memory Object Control State) registers */ -#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ -#define GEN9_LNCFCMOCS_REG_COUNT 32 - -#define __GEN9_RCS0_MOCS0 0xc800 -#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4) -#define __GEN9_VCS0_MOCS0 0xc900 -#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4) -#define __GEN9_VCS1_MOCS0 0xca00 -#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4) -#define __GEN9_VECS0_MOCS0 0xcb00 -#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4) -#define __GEN9_BCS0_MOCS0 0xcc00 -#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4) -#define __GEN11_VCS2_MOCS0 0x10000 -#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4) - -#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008) -#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0) - -#define GEN9_SCRATCH1 _MMIO(0xb11c) -#define EVICTION_PERF_FIX_ENABLE REG_BIT(8) - -#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0) -#define PMFLUSHDONE_LNICRSDROP (1 << 20) -#define PMFLUSH_GAPL3UNBLOCK (1 << 21) -#define PMFLUSHDONE_LNEBLK (1 << 22) - -#define XEHP_L3NODEARBCFG _MMIO(0xb0b4) -#define XEHP_LNESPARE REG_BIT(19) - -#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */ - #define GEN12_GSMBASE _MMIO(0x108100) #define GEN12_DSMBASE _MMIO(0x1080C0) @@ -12645,21 +8460,12 @@ enum skl_power_gate { #define SGGI_DIS REG_BIT(15) #define SGR_DIS REG_BIT(13) -/* gamt regs */ -#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) -#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ -#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */ -#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ -#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ - -#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */ -#define MMCD_PCLA (1 << 31) -#define MMCD_HOTSPOT_EN (1 << 27) - #define _ICL_PHY_MISC_A 0x64C00 #define _ICL_PHY_MISC_B 0x64C04 -#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \ - _ICL_PHY_MISC_B) +#define _DG2_PHY_MISC_TC1 0x64C14 /* TC1="PHY E" but offset as if "PHY F" */ +#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, _ICL_PHY_MISC_B) +#define DG2_PHY_MISC(port) ((port) == PHY_E ? _MMIO(_DG2_PHY_MISC_TC1) : \ + ICL_PHY_MISC(port)) #define ICL_PHY_MISC_MUX_DDID (1 << 28) #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) #define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20) @@ -12999,6 +8805,14 @@ enum skl_power_gate { #define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1) #define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0) +#define PRIMARY_SPI_TRIGGER _MMIO(0x102040) +#define PRIMARY_SPI_ADDRESS _MMIO(0x102080) +#define PRIMARY_SPI_REGIONID _MMIO(0x102084) +#define SPI_STATIC_REGIONS _MMIO(0x102090) +#define OPTIONROM_SPI_REGIONID_MASK REG_GENMASK(7, 0) +#define OROM_OFFSET _MMIO(0x1020c0) +#define OROM_OFFSET_MASK REG_GENMASK(20, 16) + /* This register controls the Display State Buffer (DSB) engines. */ #define _DSBSL_INSTANCE_BASE 0x70B00 #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ @@ -13021,9 +8835,6 @@ enum skl_power_gate { #define CLKGATE_DIS_MISC _MMIO(0x46534) #define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21) -#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C) -#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14) - #define GEN12_CULLBIT1 _MMIO(0x6100) #define GEN12_CULLBIT2 _MMIO(0x7030) #define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC) diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h new file mode 100644 index 000000000000..d78d78fce431 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_REG_DEFS__ +#define __I915_REG_DEFS__ + +#include <linux/bitfield.h> +#include <linux/bits.h> + +/** + * REG_BIT() - Prepare a u32 bit value + * @__n: 0-based bit number + * + * Local wrapper for BIT() to force u32, with compile time checks. + * + * @return: Value with bit @__n set. + */ +#define REG_BIT(__n) \ + ((u32)(BIT(__n) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ + ((__n) < 0 || (__n) > 31)))) + +/** + * REG_GENMASK() - Prepare a continuous u32 bitmask + * @__high: 0-based high bit + * @__low: 0-based low bit + * + * Local wrapper for GENMASK() to force u32, with compile time checks. + * + * @return: Continuous bitmask from @__high to @__low, inclusive. + */ +#define REG_GENMASK(__high, __low) \ + ((u32)(GENMASK(__high, __low) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ + __is_constexpr(__low) && \ + ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) + +/** + * REG_GENMASK64() - Prepare a continuous u64 bitmask + * @__high: 0-based high bit + * @__low: 0-based low bit + * + * Local wrapper for GENMASK_ULL() to force u64, with compile time checks. + * + * @return: Continuous bitmask from @__high to @__low, inclusive. + */ +#define REG_GENMASK64(__high, __low) \ + ((u64)(GENMASK_ULL(__high, __low) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ + __is_constexpr(__low) && \ + ((__low) < 0 || (__high) > 63 || (__low) > (__high))))) + +/* + * Local integer constant expression version of is_power_of_2(). + */ +#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0)) + +/** + * REG_FIELD_PREP() - Prepare a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to put in the field + * + * Local copy of FIELD_PREP() to generate an integer constant expression, force + * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). + * + * @return: @__val masked and shifted into the field defined by @__mask. + */ +#define REG_FIELD_PREP(__mask, __val) \ + ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ + BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ + BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \ + BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ + BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) + +/** + * REG_FIELD_GET() - Extract a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to extract the bitfield value from + * + * Local wrapper for FIELD_GET() to force u32 and for consistency with + * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). + * + * @return: Masked and shifted value of the field defined by @__mask in @__val. + */ +#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val)) + +/** + * REG_FIELD_GET64() - Extract a u64 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to extract the bitfield value from + * + * Local wrapper for FIELD_GET() to force u64 and for consistency with + * REG_GENMASK64(). + * + * @return: Masked and shifted value of the field defined by @__mask in @__val. + */ +#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val)) + +typedef struct { + u32 reg; +} i915_reg_t; + +#define _MMIO(r) ((const i915_reg_t){ .reg = (r) }) + +#define INVALID_MMIO_REG _MMIO(0) + +static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg) +{ + return reg.reg; +} + +static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b) +{ + return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b); +} + +static inline bool i915_mmio_reg_valid(i915_reg_t reg) +{ + return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); +} + +#define VLV_DISPLAY_BASE 0x180000 + +#define GEN12_SFC_DONE_MAX 4 + +#endif /* __I915_REG_DEFS__ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index ba3a70b2cc57..582770360ad1 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -36,6 +36,7 @@ #include "gt/intel_context.h" #include "gt/intel_engine.h" #include "gt/intel_engine_heartbeat.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_reset.h" #include "gt/intel_ring.h" @@ -43,6 +44,7 @@ #include "i915_active.h" #include "i915_deps.h" +#include "i915_driver.h" #include "i915_drv.h" #include "i915_trace.h" #include "intel_pm.h" diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f7b55f34dba8..889f5b7dc78e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "i915_suspend.h" +#include "intel_pci_config.h" static void intel_save_swf(struct drm_i915_private *dev_priv) { diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index fae4d1f4f275..a4d1759375b9 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -30,6 +30,7 @@ #include <linux/stat.h> #include <linux/sysfs.h> +#include "gt/intel_gt_regs.h" #include "gt/intel_rc6.h" #include "gt/intel_rps.h" #include "gt/sysfs_engines.h" diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 7a5925072466..bfafd0afd117 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -37,21 +37,6 @@ struct timer_list; #define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" -#undef WARN_ON -/* Many gcc seem to no see through this and fall over :( */ -#if 0 -#define WARN_ON(x) ({ \ - bool __i915_warn_cond = (x); \ - if (__builtin_constant_p(__i915_warn_cond)) \ - BUILD_BUG_ON(__i915_warn_cond); \ - WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) -#else -#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") -#endif - -#undef WARN_ON_ONCE -#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") - #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ __stringify(x), (long)(x)) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 30307e34d2dc..845cd88f8313 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -26,14 +26,15 @@ #include <drm/drm_gem.h> #include "display/intel_frontbuffer.h" - #include "gem/i915_gem_lmem.h" +#include "gem/i915_gem_tiling.h" #include "gt/intel_engine.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_sw_fence_work.h" #include "i915_trace.h" #include "i915_vma.h" @@ -967,30 +968,39 @@ err_st_alloc: } static struct scatterlist * -remap_pages(struct drm_i915_gem_object *obj, - unsigned int offset, unsigned int alignment_pad, - unsigned int width, unsigned int height, - unsigned int src_stride, unsigned int dst_stride, - struct sg_table *st, struct scatterlist *sg) +add_padding_pages(unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a convenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + + return sg; +} + +static struct scatterlist * +remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int offset, unsigned int alignment_pad, + unsigned int width, unsigned int height, + unsigned int src_stride, unsigned int dst_stride, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) { unsigned int row; if (!width || !height) return sg; - if (alignment_pad) { - st->nents++; - - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a convenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, alignment_pad * 4096, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = alignment_pad * 4096; - sg = sg_next(sg); - } + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); for (row = 0; row < height; row++) { unsigned int left = width * I915_GTT_PAGE_SIZE; @@ -1027,18 +1037,98 @@ remap_pages(struct drm_i915_gem_object *obj, if (!left) continue; + sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); + } + + *gtt_offset += alignment_pad + dst_stride * height; + + return sg; +} + +static struct scatterlist * +remap_contiguous_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, + unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + struct scatterlist *iter; + unsigned int offset; + + iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); + GEM_BUG_ON(!iter); + + do { + unsigned int len; + + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) + return sg; - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a conenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, left, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = left; - sg = sg_next(sg); - } + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); +} + +static struct scatterlist * +remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, unsigned int alignment_pad, + unsigned int size, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + if (!size) + return sg; + + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); + + sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); + sg = sg_next(sg); + + *gtt_offset += alignment_pad + size; + + return sg; +} + +static struct scatterlist * +remap_color_plane_pages(const struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj, + int color_plane, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + unsigned int alignment_pad = 0; + + if (rem_info->plane_alignment) + alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; + + if (rem_info->plane[color_plane].linear) + sg = remap_linear_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].size, + st, sg, + gtt_offset); + + else + sg = remap_tiled_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].width, + rem_info->plane[color_plane].height, + rem_info->plane[color_plane].src_stride, + rem_info->plane[color_plane].dst_stride, + st, sg, + gtt_offset); return sg; } @@ -1067,21 +1157,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info, st->nents = 0; sg = st->sgl; - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { - unsigned int alignment_pad = 0; - - if (rem_info->plane_alignment) - alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; - - sg = remap_pages(obj, - rem_info->plane[i].offset, alignment_pad, - rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, - st, sg); - - gtt_offset += alignment_pad + - rem_info->plane[i].dst_stride * rem_info->plane[i].height; - } + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) + sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); i915_sg_trim(st); @@ -1103,9 +1180,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, struct drm_i915_gem_object *obj) { struct sg_table *st; - struct scatterlist *sg, *iter; + struct scatterlist *sg; unsigned int count = view->partial.size; - unsigned int offset; int ret = -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -1116,34 +1192,14 @@ intel_partial_pages(const struct i915_ggtt_view *view, if (ret) goto err_sg_alloc; - iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); - GEM_BUG_ON(!iter); - - sg = st->sgl; st->nents = 0; - do { - unsigned int len; - len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; - - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) { - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ + sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); - return st; - } + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); + return st; err_sg_alloc: kfree(st); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 04fd266d70e2..94da5aa37391 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -114,7 +114,7 @@ void intel_device_info_print_static(const struct intel_device_info *info, DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG -#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name)); +#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name)) DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG } @@ -170,6 +170,10 @@ static const u16 subplatform_portf_ids[] = { INTEL_ICL_PORT_F_IDS(0), }; +static const u16 subplatform_n_ids[] = { + INTEL_ADLN_IDS(0), +}; + static const u16 subplatform_rpls_ids[] = { INTEL_RPLS_IDS(0), }; @@ -210,6 +214,9 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { mask = BIT(INTEL_SUBPLATFORM_PORTF); + } else if (find_devid(devid, subplatform_n_ids, + ARRAY_SIZE(subplatform_n_ids))) { + mask = BIT(INTEL_SUBPLATFORM_N); } else if (find_devid(devid, subplatform_rpls_ids, ARRAY_SIZE(subplatform_rpls_ids))) { mask = BIT(INTEL_SUBPLATFORM_RPL_S); @@ -328,6 +335,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) "Display fused off, disabling\n"); info->display.pipe_mask = 0; info->display.cpu_transcoder_mask = 0; + info->display.fbc_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); info->display.pipe_mask &= ~BIT(PIPE_C); @@ -339,6 +347,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { info->display.pipe_mask &= ~BIT(PIPE_A); info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + info->display.fbc_mask &= ~BIT(INTEL_FBC_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { info->display.pipe_mask &= ~BIT(PIPE_B); @@ -359,7 +368,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->display.has_hdcp = 0; if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - info->display.has_fbc = 0; + info->display.fbc_mask = 0; if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) info->display.has_dmc = 0; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 09ce63c8f78f..27dcfe6f2429 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -114,6 +114,9 @@ enum intel_platform { /* ADL-S */ #define INTEL_SUBPLATFORM_RPL_S 0 +/* ADL-P */ +#define INTEL_SUBPLATFORM_N 0 + enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, @@ -158,7 +161,6 @@ enum intel_ppgtt_type { func(has_dp_mst); \ func(has_dsb); \ func(has_dsc); \ - func(has_fbc); \ func(has_fpga_dbg); \ func(has_gmch); \ func(has_hdcp); \ @@ -208,6 +210,7 @@ struct intel_device_info { u8 pipe_mask; u8 cpu_transcoder_mask; + u8 fbc_mask; u8 abox_mask; #define DEFINE_FLAG(name) u8 name:1 diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 84bb212bae4b..174c95c3e10f 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -4,7 +4,9 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "intel_dram.h" +#include "intel_mchbar_regs.h" #include "intel_pcode.h" struct dram_dimm_info { @@ -389,10 +391,8 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv) u32 val = 0; int ret; - ret = sandybridge_pcode_read(dev_priv, - ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, - &val, NULL); + ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h new file mode 100644 index 000000000000..2aad2f0cc8db --- /dev/null +++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_MCHBAR_REGS__ +#define __INTEL_MCHBAR_REGS__ + +#include "i915_reg_defs.h" + +/* + * MCHBAR mirror. + * + * This mirrors the MCHBAR MMIO space whose location is determined by + * device 0 function 0's pci config register 0x44 or 0x48 and matches it in + * every way. It is not accessible from the CP register read instructions. + * + * Starting from Haswell, you can't write registers using the MCHBAR mirror, + * just read. + */ + +#define MCHBAR_MIRROR_BASE 0x10000 +#define MCHBAR_MIRROR_BASE_SNB 0x140000 + +#define CTG_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x34) +#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48) +#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16) +#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4) +#define G4X_STOLEN_RESERVED_ENABLE (1 << 0) + +/* Pineview MCH register contains DDR3 setting */ +#define CSHRDDR3CTL _MMIO(MCHBAR_MIRROR_BASE + 0x1a8) +#define CSHRDDR3CTL_DDR3 (1 << 2) + +/* 915-945 and GM965 MCH register controlling DRAM channel access */ +#define DCC _MMIO(MCHBAR_MIRROR_BASE + 0x200) +#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) +#define DCC_ADDRESSING_MODE_MASK (3 << 0) +#define DCC_CHANNEL_XOR_DISABLE (1 << 10) +#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) +#define DCC2 _MMIO(MCHBAR_MIRROR_BASE + 0x204) +#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20) + +/* 965 MCH register controlling DRAM channel configuration */ +#define C0DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x206) +#define C1DRB3_BW _MMIO(MCHBAR_MIRROR_BASE + 0x606) + +/* Clocking configuration register */ +#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00) +#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */ +#define CLKCFG_FSB_400_ALT (5 << 0) /* hrawclk 100 */ +#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ +#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ +#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ +#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ +#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */ +#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ +#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */ +#define CLKCFG_FSB_1600_ALT (6 << 0) /* hrawclk 400 */ +#define CLKCFG_FSB_MASK (7 << 0) +#define CLKCFG_MEM_533 (1 << 4) +#define CLKCFG_MEM_667 (2 << 4) +#define CLKCFG_MEM_800 (3 << 4) +#define CLKCFG_MEM_MASK (7 << 4) + +#define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f) +#define HPLLVCO _MMIO(MCHBAR_MIRROR_BASE + 0xc38) + +#define TSC1 _MMIO(MCHBAR_MIRROR_BASE + 0x1001) +#define TSE (1 << 0) +#define TR1 _MMIO(MCHBAR_MIRROR_BASE + 0x1006) +#define TSFS _MMIO(MCHBAR_MIRROR_BASE + 0x1020) +#define TSFS_SLOPE_MASK 0x0000ff00 +#define TSFS_SLOPE_SHIFT 8 +#define TSFS_INTR_MASK 0x000000ff + +/* Memory latency timer register */ +#define MLTR_ILK _MMIO(MCHBAR_MIRROR_BASE + 0x1222) +/* the unit of memory self-refresh latency time is 0.5us */ +#define MLTR_WM2_MASK REG_GENMASK(13, 8) +#define MLTR_WM1_MASK REG_GENMASK(5, 0) + +#define CSIPLL0 _MMIO(MCHBAR_MIRROR_BASE + 0x2c10) +#define DDRMPLL1 _MMIO(MCHBAR_MIRROR_BASE + 0x2c20) + +#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4) +#define ILK_GRDOM_FULL (0 << 1) +#define ILK_GRDOM_RENDER (1 << 1) +#define ILK_GRDOM_MEDIA (3 << 1) +#define ILK_GRDOM_MASK (3 << 1) +#define ILK_GRDOM_RESET_ENABLE (1 << 0) + +#define BXT_D_CR_DRP0_DUNIT8 0x1000 +#define BXT_D_CR_DRP0_DUNIT9 0x1200 +#define BXT_D_CR_DRP0_DUNIT_START 8 +#define BXT_D_CR_DRP0_DUNIT_END 11 +#define BXT_D_CR_DRP0_DUNIT(x) _MMIO(MCHBAR_MIRROR_BASE_SNB + \ + _PICK_EVEN((x) - 8, BXT_D_CR_DRP0_DUNIT8,\ + BXT_D_CR_DRP0_DUNIT9)) +#define BXT_DRAM_RANK_MASK 0x3 +#define BXT_DRAM_RANK_SINGLE 0x1 +#define BXT_DRAM_RANK_DUAL 0x3 +#define BXT_DRAM_WIDTH_MASK (0x3 << 4) +#define BXT_DRAM_WIDTH_SHIFT 4 +#define BXT_DRAM_WIDTH_X8 (0x0 << 4) +#define BXT_DRAM_WIDTH_X16 (0x1 << 4) +#define BXT_DRAM_WIDTH_X32 (0x2 << 4) +#define BXT_DRAM_WIDTH_X64 (0x3 << 4) +#define BXT_DRAM_SIZE_MASK (0x7 << 6) +#define BXT_DRAM_SIZE_SHIFT 6 +#define BXT_DRAM_SIZE_4GBIT (0x0 << 6) +#define BXT_DRAM_SIZE_6GBIT (0x1 << 6) +#define BXT_DRAM_SIZE_8GBIT (0x2 << 6) +#define BXT_DRAM_SIZE_12GBIT (0x3 << 6) +#define BXT_DRAM_SIZE_16GBIT (0x4 << 6) +#define BXT_DRAM_TYPE_MASK (0x7 << 22) +#define BXT_DRAM_TYPE_SHIFT 22 +#define BXT_DRAM_TYPE_DDR3 (0x0 << 22) +#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22) +#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22) +#define BXT_DRAM_TYPE_DDR4 (0x4 << 22) + +#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000) +#define DG1_DRAM_T_RDPRE_MASK REG_GENMASK(16, 11) +#define DG1_DRAM_T_RP_MASK REG_GENMASK(6, 0) +#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004) +#define DG1_DRAM_T_RCD_MASK REG_GENMASK(15, 9) +#define DG1_DRAM_T_RAS_MASK REG_GENMASK(8, 1) + +#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000) +#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0) +#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0) +#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0) +#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0) +#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0) + +/* snb MCH registers for reading the DRAM channel configuration */ +#define MAD_DIMM_C0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5004) +#define MAD_DIMM_C1 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5008) +#define MAD_DIMM_C2 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C) +#define MAD_DIMM_ECC_MASK (0x3 << 24) +#define MAD_DIMM_ECC_OFF (0x0 << 24) +#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24) +#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24) +#define MAD_DIMM_ECC_ON (0x3 << 24) +#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22) +#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21) +#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */ +#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */ +#define MAD_DIMM_B_DUAL_RANK (0x1 << 18) +#define MAD_DIMM_A_DUAL_RANK (0x1 << 17) +#define MAD_DIMM_A_SELECT (0x1 << 16) +/* DIMM sizes are in multiples of 256mb. */ +#define MAD_DIMM_B_SIZE_SHIFT 8 +#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT) +#define MAD_DIMM_A_SIZE_SHIFT 0 +#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) + +#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C) +#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010) +#define SKL_DRAM_S_SHIFT 16 +#define SKL_DRAM_SIZE_MASK 0x3F +#define SKL_DRAM_WIDTH_MASK (0x3 << 8) +#define SKL_DRAM_WIDTH_SHIFT 8 +#define SKL_DRAM_WIDTH_X8 (0x0 << 8) +#define SKL_DRAM_WIDTH_X16 (0x1 << 8) +#define SKL_DRAM_WIDTH_X32 (0x2 << 8) +#define SKL_DRAM_RANK_MASK (0x1 << 10) +#define SKL_DRAM_RANK_SHIFT 10 +#define SKL_DRAM_RANK_1 (0x0 << 10) +#define SKL_DRAM_RANK_2 (0x1 << 10) +#define SKL_DRAM_RANK_MASK (0x1 << 10) +#define ICL_DRAM_SIZE_MASK 0x7F +#define ICL_DRAM_WIDTH_MASK (0x3 << 7) +#define ICL_DRAM_WIDTH_SHIFT 7 +#define ICL_DRAM_WIDTH_X8 (0x0 << 7) +#define ICL_DRAM_WIDTH_X16 (0x1 << 7) +#define ICL_DRAM_WIDTH_X32 (0x2 << 7) +#define ICL_DRAM_RANK_MASK (0x3 << 9) +#define ICL_DRAM_RANK_SHIFT 9 +#define ICL_DRAM_RANK_1 (0x0 << 9) +#define ICL_DRAM_RANK_2 (0x1 << 9) +#define ICL_DRAM_RANK_3 (0x2 << 9) +#define ICL_DRAM_RANK_4 (0x3 << 9) + +#define SA_PERF_STATUS_0_0_0_MCHBAR_PC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918) +#define DG1_QCLK_RATIO_MASK REG_GENMASK(9, 2) +#define DG1_QCLK_REFERENCE REG_BIT(10) + +#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948) +#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994) +#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) +#define RP0_CAP_MASK REG_GENMASK(7, 0) +#define RP1_CAP_MASK REG_GENMASK(15, 8) +#define RPN_CAP_MASK REG_GENMASK(23, 16) + +/* snb MCH registers for priority tuning */ +#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10) +#define SSKPD_NEW_WM0_MASK_HSW REG_GENMASK64(63, 56) +#define SSKPD_WM4_MASK_HSW REG_GENMASK64(40, 32) +#define SSKPD_WM3_MASK_HSW REG_GENMASK64(28, 20) +#define SSKPD_WM2_MASK_HSW REG_GENMASK64(19, 12) +#define SSKPD_WM1_MASK_HSW REG_GENMASK64(11, 4) +#define SSKPD_OLD_WM0_MASK_HSW REG_GENMASK64(3, 0) +#define SSKPD_WM3_MASK_SNB REG_GENMASK(29, 24) +#define SSKPD_WM2_MASK_SNB REG_GENMASK(21, 16) +#define SSKPD_WM1_MASK_SNB REG_GENMASK(13, 8) +#define SSKPD_WM0_MASK_SNB REG_GENMASK(5, 0) + +/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ +#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04) +#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04) +#define DG1_GEAR_TYPE REG_BIT(16) + +/* + * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, + * since on HSW we can't write to it using intel_uncore_write. + */ +#define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5f0c) +#define D_COMP_RCOMP_IN_PROGRESS (1 << 9) +#define D_COMP_COMP_FORCE (1 << 8) +#define D_COMP_COMP_DISABLE (1 << 0) + +#define BXT_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070) + +#endif /* __INTEL_MCHBAR_REGS */ diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index da8f82c2342f..4f7a61d5502e 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -130,6 +130,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) case INTEL_PCH_ADP_DEVICE_ID_TYPE: case INTEL_PCH_ADP2_DEVICE_ID_TYPE: case INTEL_PCH_ADP3_DEVICE_ID_TYPE: + case INTEL_PCH_ADP4_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 6bff77521094..6fd20408f7bf 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -58,6 +58,7 @@ enum intel_pch { #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 +#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ diff --git a/drivers/gpu/drm/i915/intel_pci_config.h b/drivers/gpu/drm/i915/intel_pci_config.h new file mode 100644 index 000000000000..12cd9d4f23de --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pci_config.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_PCI_CONFIG_H__ +#define __INTEL_PCI_CONFIG_H__ + +/* BSM in include/drm/i915_drm.h */ + +#define MCHBAR_I915 0x44 +#define MCHBAR_I965 0x48 +#define MCHBAR_SIZE (4 * 4096) + +#define DEVEN 0x54 +#define DEVEN_MCHBAR_EN (1 << 28) + +#define HPLLCC 0xc0 /* 85x only */ +#define GC_CLOCK_CONTROL_MASK (0x7 << 0) +#define GC_CLOCK_133_200 (0 << 0) +#define GC_CLOCK_100_200 (1 << 0) +#define GC_CLOCK_100_133 (2 << 0) +#define GC_CLOCK_133_266 (3 << 0) +#define GC_CLOCK_133_200_2 (4 << 0) +#define GC_CLOCK_133_266_2 (5 << 0) +#define GC_CLOCK_166_266 (6 << 0) +#define GC_CLOCK_166_250 (7 << 0) + +#define I915_GDRST 0xc0 +#define GRDOM_FULL (0 << 2) +#define GRDOM_RENDER (1 << 2) +#define GRDOM_MEDIA (3 << 2) +#define GRDOM_MASK (3 << 2) +#define GRDOM_RESET_STATUS (1 << 1) +#define GRDOM_RESET_ENABLE (1 << 0) + +/* BSpec only has register offset, PCI device and bit found empirically */ +#define I830_CLOCK_GATE 0xc8 /* device 0 */ +#define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2) + +#define GCDGMBUS 0xcc + +#define GCFGC2 0xda +#define GCFGC 0xf0 /* 915+ only */ +#define GC_LOW_FREQUENCY_ENABLE (1 << 7) +#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) +#define GC_DISPLAY_CLOCK_333_320_MHZ (4 << 4) +#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) +#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) +#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) +#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) +#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) +#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) +#define GC_DISPLAY_CLOCK_MASK (7 << 4) +#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) +#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) +#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) +#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) +#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) +#define I965_GC_RENDER_CLOCK_MASK (0xf << 0) +#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) +#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) +#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) +#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) +#define I945_GC_RENDER_CLOCK_MASK (7 << 0) +#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) +#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) +#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) +#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) +#define I915_GC_RENDER_CLOCK_MASK (7 << 0) +#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) +#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) +#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) + +#define ASLE 0xe4 +#define ASLS 0xfc + +#define SWSCI 0xe8 +#define SWSCI_SCISEL (1 << 15) +#define SWSCI_GSSCIE (1 << 0) + +/* legacy/combination backlight modes, also called LBB */ +#define LBPC 0xf4 + +#endif /* __INTEL_PCI_CONFIG_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c index e8c886e4e78d..391a37492ce5 100644 --- a/drivers/gpu/drm/i915/intel_pcode.c +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -4,6 +4,7 @@ */ #include "i915_drv.h" +#include "i915_reg.h" #include "intel_pcode.h" static int gen6_check_mailbox_status(u32 mbox) @@ -51,11 +52,10 @@ static int gen7_check_mailbox_status(u32 mbox) } } -static int __sandybridge_pcode_rw(struct drm_i915_private *i915, - u32 mbox, u32 *val, u32 *val1, - int fast_timeout_us, - int slow_timeout_ms, - bool is_read) +static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox, + u32 *val, u32 *val1, + int fast_timeout_us, int slow_timeout_ms, + bool is_read) { struct intel_uncore *uncore = &i915->uncore; @@ -94,15 +94,12 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915, return gen6_check_mailbox_status(mbox); } -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, - u32 *val, u32 *val1) +int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1) { int err; mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, val, val1, - 500, 20, - true); + err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true); mutex_unlock(&i915->sb_lock); if (err) { @@ -114,17 +111,14 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, return err; } -int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, - u32 mbox, u32 val, - int fast_timeout_us, - int slow_timeout_ms) +int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, + int fast_timeout_us, int slow_timeout_ms) { int err; mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, &val, NULL, - fast_timeout_us, slow_timeout_ms, - false); + err = __snb_pcode_rw(i915, mbox, &val, NULL, + fast_timeout_us, slow_timeout_ms, false); mutex_unlock(&i915->sb_lock); if (err) { @@ -140,9 +134,7 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, u32 request, u32 reply_mask, u32 reply, u32 *status) { - *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL, - 500, 0, - true); + *status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true); return *status || ((request & reply_mask) == reply); } diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h index 50806649d4b6..0962a17fac48 100644 --- a/drivers/gpu/drm/i915/intel_pcode.h +++ b/drivers/gpu/drm/i915/intel_pcode.h @@ -10,13 +10,11 @@ struct drm_i915_private; -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, - u32 *val, u32 *val1); -int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, - u32 val, int fast_timeout_us, - int slow_timeout_ms); -#define sandybridge_pcode_write(i915, mbox, val) \ - sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0) +int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1); +int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, + int fast_timeout_us, int slow_timeout_ms); +#define snb_pcode_write(i915, mbox, val) \ + snb_pcode_write_timeout(i915, mbox, val, 500, 0) int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 434b1f8b7fe3..5af16ca4dabd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -43,16 +43,23 @@ #include "display/intel_sprite.h" #include "display/skl_universal_plane.h" +#include "gt/intel_engine_regs.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_llc.h" #include "i915_drv.h" #include "i915_fixed.h" #include "i915_irq.h" +#include "intel_mchbar_regs.h" #include "intel_pcode.h" #include "intel_pm.h" #include "vlv_sideband.h" #include "../../../platform/x86/intel_ips.h" +struct drm_i915_clock_gating_funcs { + void (*init_clock_gating)(struct drm_i915_private *i915); +}; + /* Stores plane specific WM parameters */ struct skl_wm_params { bool x_tiled, y_tiled; @@ -78,8 +85,6 @@ struct intel_wm_config { static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { - enum pipe pipe; - if (HAS_LLC(dev_priv)) { /* * WaCompressedResourceDisplayNewHashMode:skl,kbl @@ -93,16 +98,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) SKL_DE_COMPRESSED_HASH_MODE); } - for_each_pipe(dev_priv, pipe) { - /* - * "Plane N strech max must be programmed to 11b (x1) - * when Async flips are enabled on that plane." - */ - if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active(dev_priv)) - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), - SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1); - } - /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); @@ -160,8 +155,9 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcHighMemBwCorruptionAvoidance:bxt * Display WA #0883: bxt */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_DISABLE_DUMMY0); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_DISABLE_DUMMY0); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -876,7 +872,7 @@ static bool intel_crtc_active(struct intel_crtc *crtc) * crtc->state->active once we have proper CRTC states wired up * for atomic. */ - return crtc->active && crtc->base.primary->state->fb && + return crtc && crtc->active && crtc->base.primary->state->fb && crtc->config->hw.adjusted_mode.crtc_clock; } @@ -915,15 +911,13 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) crtc = single_enabled_crtc(dev_priv); if (crtc) { - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; int cpp = fb->format->cpp[0]; - int clock = pipe_mode->crtc_clock; /* Display SR */ - wm = intel_calculate_wm(clock, &pnv_display_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, pnv_display_wm.fifo_size, cpp, latency->display_sr); reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); @@ -933,7 +927,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ - wm = intel_calculate_wm(clock, &pnv_cursor_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, pnv_display_wm.fifo_size, 4, latency->cursor_sr); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); @@ -942,7 +936,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); /* Display HPLL off SR */ - wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); @@ -951,7 +945,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); /* cursor HPLL off SR */ - wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); @@ -1154,7 +1148,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; unsigned int latency = dev_priv->wm.pri_latency[level] * 10; - unsigned int clock, htotal, cpp, width, wm; + unsigned int pixel_rate, htotal, cpp, width, wm; if (latency == 0) return USHRT_MAX; @@ -1175,21 +1169,20 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, level != G4X_WM_LEVEL_NORMAL) cpp = max(cpp, 4u); - clock = pipe_mode->crtc_clock; + pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; - - width = drm_rect_width(&plane_state->uapi.dst); + width = drm_rect_width(&plane_state->uapi.src) >> 16; if (plane->id == PLANE_CURSOR) { - wm = intel_wm_method2(clock, htotal, width, cpp, latency); + wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); } else if (plane->id == PLANE_PRIMARY && level == G4X_WM_LEVEL_NORMAL) { - wm = intel_wm_method1(clock, cpp, latency); + wm = intel_wm_method1(pixel_rate, cpp, latency); } else { unsigned int small, large; - small = intel_wm_method1(clock, cpp, latency); - large = intel_wm_method2(clock, htotal, width, cpp, latency); + small = intel_wm_method1(pixel_rate, cpp, latency); + large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); wm = min(small, large); } @@ -1674,7 +1667,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - unsigned int clock, htotal, cpp, width, wm; + unsigned int pixel_rate, htotal, cpp, width, wm; if (dev_priv->wm.pri_latency[level] == 0) return USHRT_MAX; @@ -1683,9 +1676,9 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, return 0; cpp = plane_state->hw.fb->format->cpp[0]; - clock = pipe_mode->crtc_clock; + pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; - width = crtc_state->pipe_src_w; + width = drm_rect_width(&plane_state->uapi.src) >> 16; if (plane->id == PLANE_CURSOR) { /* @@ -1696,7 +1689,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, */ wm = 63; } else { - wm = vlv_wm_method2(clock, htotal, width, cpp, + wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, dev_priv->wm.pri_latency[level] * 10); } @@ -2277,14 +2270,14 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; - int clock = pipe_mode->crtc_clock; + int pixel_rate = crtc->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; - int hdisplay = crtc->config->pipe_src_w; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; int cpp = fb->format->cpp[0]; int entries; - entries = intel_wm_method2(clock, htotal, - hdisplay, cpp, sr_latency_ns / 100); + entries = intel_wm_method2(pixel_rate, htotal, + width, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); srwm = I965_FIFO_SIZE - entries; if (srwm < 0) @@ -2294,7 +2287,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) "self-refresh entries: %d, wm: %d\n", entries, srwm); - entries = intel_wm_method2(clock, htotal, + entries = intel_wm_method2(pixel_rate, htotal, crtc->base.cursor->state->crtc_w, 4, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, @@ -2358,7 +2351,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) int cwm, srwm = 1; int fifo_size; int planea_wm, planeb_wm; - struct intel_crtc *crtc, *enabled = NULL; + struct intel_crtc *crtc; if (IS_I945GM(dev_priv)) wm_info = &i945_wm_info; @@ -2373,8 +2366,6 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); crtc = intel_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2384,10 +2375,9 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - planea_wm = intel_calculate_wm(pipe_mode->crtc_clock, + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); - enabled = crtc; } else { planea_wm = fifo_size - wm_info->guard_size; if (planea_wm > (long)wm_info->max_wm) @@ -2403,8 +2393,6 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); crtc = intel_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2414,13 +2402,9 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - planeb_wm = intel_calculate_wm(pipe_mode->crtc_clock, + planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); - if (enabled == NULL) - enabled = crtc; - else - enabled = NULL; } else { planeb_wm = fifo_size - wm_info->guard_size; if (planeb_wm > (long)wm_info->max_wm) @@ -2430,14 +2414,15 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - if (IS_I915GM(dev_priv) && enabled) { + crtc = single_enabled_crtc(dev_priv); + if (IS_I915GM(dev_priv) && crtc) { struct drm_i915_gem_object *obj; - obj = intel_fb_obj(enabled->base.primary->state->fb); + obj = intel_fb_obj(crtc->base.primary->state->fb); /* self-refresh seems busted with untiled */ if (!i915_gem_object_is_tiled(obj)) - enabled = NULL; + crtc = NULL; } /* @@ -2449,16 +2434,16 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) intel_set_memory_cxsr(dev_priv, false); /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev_priv) && enabled) { + if (HAS_FW_BLC(dev_priv) && crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; const struct drm_display_mode *pipe_mode = - &enabled->config->hw.pipe_mode; + &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = - enabled->base.primary->state->fb; - int clock = pipe_mode->crtc_clock; + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; - int hdisplay = enabled->config->pipe_src_w; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; int cpp; int entries; @@ -2467,7 +2452,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - entries = intel_wm_method2(clock, htotal, hdisplay, cpp, + entries = intel_wm_method2(pixel_rate, htotal, width, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); drm_dbg_kms(&dev_priv->drm, @@ -2497,14 +2482,13 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); - if (enabled) + if (crtc) intel_set_memory_cxsr(dev_priv, true); } static void i845_update_wm(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; - const struct drm_display_mode *pipe_mode; u32 fwater_lo; int planea_wm; @@ -2512,8 +2496,7 @@ static void i845_update_wm(struct drm_i915_private *dev_priv) if (crtc == NULL) return; - pipe_mode = &crtc->config->hw.pipe_mode; - planea_wm = intel_calculate_wm(pipe_mode->crtc_clock, + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, &i845_wm_info, i845_get_fifo_size(dev_priv, PLANE_A), 4, pessimal_latency_ns); @@ -2604,7 +2587,7 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, method2 = ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.dst), + drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); return min(method1, method2); @@ -2632,7 +2615,7 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); method2 = ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.dst), + drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); return min(method1, method2); } @@ -2657,7 +2640,7 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, return ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.dst), + drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); } @@ -2673,7 +2656,7 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, cpp = plane_state->hw.fb->format->cpp[0]; - return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst), + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, cpp); } @@ -2888,9 +2871,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ - ret = sandybridge_pcode_read(dev_priv, - GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); + ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, + &val, NULL); if (ret) { drm_err(&dev_priv->drm, @@ -2908,9 +2890,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ - ret = sandybridge_pcode_read(dev_priv, - GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); + ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, + &val, NULL); if (ret) { drm_err(&dev_priv->drm, "SKL Mailbox read error = %d\n", ret); @@ -2966,27 +2947,27 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD); - wm[0] = (sskpd >> 56) & 0xFF; + wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); if (wm[0] == 0) - wm[0] = sskpd & 0xF; - wm[1] = (sskpd >> 4) & 0xFF; - wm[2] = (sskpd >> 12) & 0xFF; - wm[3] = (sskpd >> 20) & 0x1FF; - wm[4] = (sskpd >> 32) & 0x1FF; + wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); + wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); + wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); + wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); + wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); } else if (DISPLAY_VER(dev_priv) >= 6) { u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD); - wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; - wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; - wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; - wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; + wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); + wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); + wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); + wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); } else if (DISPLAY_VER(dev_priv) >= 5) { u32 mltr = intel_uncore_read(uncore, MLTR_ILK); /* ILK primary LP0 latency is 700 ns */ wm[0] = 7; - wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; - wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; + wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); + wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); } else { MISSING_CASE(INTEL_DEVID(dev_priv)); } @@ -3199,12 +3180,8 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, } pipe_wm->pipe_enabled = crtc_state->hw.active; - if (sprstate) { - pipe_wm->sprites_enabled = sprstate->uapi.visible; - pipe_wm->sprites_scaled = sprstate->uapi.visible && - (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 || - drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16); - } + pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); + pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); usable_level = max_level; @@ -3433,29 +3410,28 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, * disabled. Doing otherwise could cause underruns. */ results->wm_lp[wm_lp - 1] = - (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) | - (r->pri_val << WM1_LP_SR_SHIFT) | - r->cur_val; + WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | + WM_LP_PRIMARY(r->pri_val) | + WM_LP_CURSOR(r->cur_val); if (r->enable) - results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; + results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; if (DISPLAY_VER(dev_priv) >= 8) - results->wm_lp[wm_lp - 1] |= - r->fbc_val << WM1_LP_FBC_SHIFT_BDW; + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); else - results->wm_lp[wm_lp - 1] |= - r->fbc_val << WM1_LP_FBC_SHIFT; + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); + + results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); /* - * Always set WM1S_LP_EN when spr_val != 0, even if the + * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the * level is disabled. Doing otherwise could cause underruns. */ if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { drm_WARN_ON(&dev_priv->drm, wm_lp != 1); - results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; - } else - results->wm_lp_spr[wm_lp - 1] = r->spr_val; + results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; + } } /* LP0 register values */ @@ -3468,9 +3444,9 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, continue; results->wm_pipe[pipe] = - (r->pri_val << WM0_PIPE_PLANE_SHIFT) | - (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | - r->cur_val; + WM0_PIPE_PRIMARY(r->pri_val) | + WM0_PIPE_SPRITE(r->spr_val) | + WM0_PIPE_CURSOR(r->cur_val); } } @@ -3562,24 +3538,24 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, struct ilk_wm_values *previous = &dev_priv->wm.hw; bool changed = false; - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { - previous->wm_lp[2] &= ~WM1_LP_SR_EN; + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { + previous->wm_lp[2] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); changed = true; } - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { - previous->wm_lp[1] &= ~WM1_LP_SR_EN; + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { + previous->wm_lp[1] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); changed = true; } - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { - previous->wm_lp[0] &= ~WM1_LP_SR_EN; + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { + previous->wm_lp[0] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); changed = true; } /* - * Don't touch WM1S_LP_EN here. + * Don't touch WM_LP_SPRITE_ENABLE here. * Doing so could cause underruns. */ @@ -3700,9 +3676,9 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) u32 val = 0; int ret; - ret = sandybridge_pcode_read(dev_priv, - GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, - &val, NULL); + ret = snb_pcode_read(dev_priv, + GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, + &val, NULL); if (!ret) { dev_priv->sagv_block_time_us = val; return; @@ -3749,8 +3725,8 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) return 0; drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); - ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_ENABLE); + ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_ENABLE); /* We don't need to wait for SAGV when enabling */ @@ -3805,48 +3781,55 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; } -void intel_sagv_pre_plane_update(struct intel_atomic_state *state) +static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state; - const struct intel_bw_state *old_bw_state; - u32 new_mask = 0; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); - /* - * Just return if we can't control SAGV or don't have it. - * This is different from situation when we have SAGV but just can't - * afford it due to DBuf limitation - in case if SAGV is completely - * disabled in a BIOS, we are not even allowed to send a PCode request, - * as it will throw an error. So have to check it here. - */ - if (!intel_has_sagv(dev_priv)) + if (!new_bw_state) return; - new_bw_state = intel_atomic_get_new_bw_state(state); + if (!intel_can_enable_sagv(i915, new_bw_state)) + intel_disable_sagv(i915); +} + +static void skl_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + if (!new_bw_state) return; - if (DISPLAY_VER(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) { - intel_disable_sagv(dev_priv); - return; - } + if (intel_can_enable_sagv(i915, new_bw_state)) + intel_enable_sagv(i915); +} - old_bw_state = intel_atomic_get_old_bw_state(state); - /* - * Nothing to mask - */ - if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask) +static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) return; + old_mask = old_bw_state->qgv_points_mask; new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - /* - * If new mask is zero - means there is nothing to mask, - * we can only unmask, which should be done in unmask. - */ - if (!new_mask) + if (old_mask == new_mask) return; + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + /* * Restrict required qgv points before updating the configuration. * According to BSpec we can't mask and unmask qgv points at the same @@ -3856,12 +3839,41 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state) icl_pcode_restrict_qgv_points(dev_priv, new_mask); } -void intel_sagv_post_plane_update(struct intel_atomic_state *state) +static void icl_sagv_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state; - const struct intel_bw_state *old_bw_state; - u32 new_mask = 0; + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + new_mask = new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Allow required qgv points after updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(dev_priv, new_mask); +} + +void intel_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); /* * Just return if we can't control SAGV or don't have it. @@ -3870,34 +3882,33 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state) * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here. */ - if (!intel_has_sagv(dev_priv)) + if (!intel_has_sagv(i915)) return; - new_bw_state = intel_atomic_get_new_bw_state(state); - if (!new_bw_state) - return; + if (DISPLAY_VER(i915) >= 11) + icl_sagv_pre_plane_update(state); + else + skl_sagv_pre_plane_update(state); +} - if (DISPLAY_VER(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) { - intel_enable_sagv(dev_priv); - return; - } +void intel_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); - old_bw_state = intel_atomic_get_old_bw_state(state); /* - * Nothing to unmask + * Just return if we can't control SAGV or don't have it. + * This is different from situation when we have SAGV but just can't + * afford it due to DBuf limitation - in case if SAGV is completely + * disabled in a BIOS, we are not even allowed to send a PCode request, + * as it will throw an error. So have to check it here. */ - if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask) + if (!intel_has_sagv(i915)) return; - new_mask = new_bw_state->qgv_points_mask; - - /* - * Allow required qgv points after updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(dev_priv, new_mask); + if (DISPLAY_VER(i915) >= 11) + icl_sagv_post_plane_update(state); + else + skl_sagv_post_plane_update(state); } static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) @@ -4029,6 +4040,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4044,20 +4066,18 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } +static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, + u16 start, u16 end) +{ + entry->start = start; + entry->end = end; + + return end; +} + static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv) { return INTEL_INFO(dev_priv)->dbuf.size / @@ -4196,8 +4216,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) int ret; if (new_dbuf_state->weight[pipe] == 0) { - new_dbuf_state->ddb[pipe].start = 0; - new_dbuf_state->ddb[pipe].end = 0; + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); goto out; } @@ -4213,8 +4232,10 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) start = ddb_range_size * weight_start / weight_total; end = ddb_range_size * weight_end / weight_total; - new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start; - new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end; + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], + ddb_slices.start - mbus_offset + start, + ddb_slices.start - mbus_offset + end); + out: if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], @@ -4252,7 +4273,9 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane); + static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, int level, unsigned int latency, const struct skl_wm_params *wp, @@ -4263,6 +4286,7 @@ static unsigned int skl_cursor_allocation(const struct intel_crtc_state *crtc_state, int num_active) { + struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level wm = {}; @@ -4279,7 +4303,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, for (level = 0; level <= max_level; level++) { unsigned int latency = dev_priv->wm.skl_latency[level]; - skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm); + skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); if (wm.min_ddb_alloc == U16_MAX) break; @@ -4289,14 +4313,13 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, return max(num_active == 1 ? 32 : 8, min_ddb_alloc); } -static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv, - struct skl_ddb_entry *entry, u32 reg) +static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) { - entry->start = reg & DDB_ENTRY_MASK; - entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK; - + skl_ddb_entry_init(entry, + REG_FIELD_GET(PLANE_BUF_START_MASK, reg), + REG_FIELD_GET(PLANE_BUF_END_MASK, reg)); if (entry->end) - entry->end += 1; + entry->end++; } static void @@ -4312,7 +4335,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, /* Cursor doesn't support NV12/planar, so no extra calculation needed */ if (plane_id == PLANE_CURSOR) { val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe)); - skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + skl_ddb_entry_init_from_hw(ddb_y, val); return; } @@ -4320,13 +4343,13 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, /* No DDB allocated for disabled planes */ if (val & PLANE_CTL_ENABLE) - fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK, + fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK_SKL, val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ALPHA_MASK); if (DISPLAY_VER(dev_priv) >= 11) { val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); - skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + skl_ddb_entry_init_from_hw(ddb_y, val); } else { val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); @@ -4335,8 +4358,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) swap(val, val2); - skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); - skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2); + skl_ddb_entry_init_from_hw(ddb_y, val); + skl_ddb_entry_init_from_hw(ddb_uv, val2); } } @@ -4364,55 +4387,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, intel_display_power_put(dev_priv, power_domain, wakeref); } -/* - * Determines the downscale amount of a plane for the purposes of watermark calculations. - * The bspec defines downscale amount as: - * - * """ - * Horizontal down scale amount = maximum[1, Horizontal source size / - * Horizontal destination size] - * Vertical down scale amount = maximum[1, Vertical source size / - * Vertical destination size] - * Total down scale amount = Horizontal down scale amount * - * Vertical down scale amount - * """ - * - * Return value is provided in 16.16 fixed point form to retain fractional part. - * Caller should take care of dividing & rounding off the value. - */ -static uint_fixed_16_16_t -skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 src_w, src_h, dst_w, dst_h; - uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; - uint_fixed_16_16_t downscale_h, downscale_w; - - if (drm_WARN_ON(&dev_priv->drm, - !intel_wm_plane_visible(crtc_state, plane_state))) - return u32_to_fixed16(0); - - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - * - * n.b., src is 16.16 fixed point, dst is whole integer. - */ - src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - dst_w = drm_rect_width(&plane_state->uapi.dst); - dst_h = drm_rect_height(&plane_state->uapi.dst); - - fp_w_ratio = div_fixed16(src_w, dst_w); - fp_h_ratio = div_fixed16(src_h, dst_h); - downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); - downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); - - return mul_fixed16(downscale_w, downscale_h); -} - struct dbuf_slice_conf_entry { u8 active_pipes; u8 dbuf_mask[I915_MAX_PIPES]; @@ -4717,6 +4691,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { }; static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { + /* + * Keep the join_mbus cases first so check_mbus_joined() + * will prefer them over the !join_mbus cases. + */ { .active_pipes = BIT(PIPE_A), .dbuf_mask = { @@ -4732,6 +4710,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { .join_mbus = true, }, { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + .join_mbus = false, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = false, + }, + { .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .dbuf_mask = { [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), @@ -4835,7 +4827,7 @@ static bool check_mbus_joined(u8 active_pipes, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes) return dbuf_slices[i].join_mbus; } @@ -4847,13 +4839,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes) return check_mbus_joined(active_pipes, adlp_allowed_dbufs); } -static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, +static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, const struct dbuf_slice_conf_entry *dbuf_slices) { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { - if (dbuf_slices[i].active_pipes == active_pipes) + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { + if (dbuf_slices[i].active_pipes == active_pipes && + dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe]; } return 0; @@ -4864,7 +4857,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, * returns correspondent DBuf slice mask as stated in BSpec for particular * platform. */ -static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { /* * FIXME: For ICL this is still a bit unclear as prev BSpec revision @@ -4878,37 +4871,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) * still here - we will need it once those additional constraints * pop up. */ - return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + icl_allowed_dbufs); } -static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + tgl_allowed_dbufs); } -static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) +static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + adlp_allowed_dbufs); } -static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) +static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) { - return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs); + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + dg2_allowed_dbufs); } -static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes) +static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; if (IS_DG2(dev_priv)) - return dg2_compute_dbuf_slices(pipe, active_pipes); + return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (IS_ALDERLAKE_P(dev_priv)) - return adlp_compute_dbuf_slices(pipe, active_pipes); + return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(dev_priv) == 12) - return tgl_compute_dbuf_slices(pipe, active_pipes); + return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); else if (DISPLAY_VER(dev_priv) == 11) - return icl_compute_dbuf_slices(pipe, active_pipes); + return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); /* * For anything else just return one slice yet. * Should be extended for other platforms. @@ -4916,6 +4913,28 @@ static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes) return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; } +static bool +use_min_ddb(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + return DISPLAY_VER(i915) >= 13 && + crtc_state->uapi.async_flip && + plane->async_flip; +} + +static bool +use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + return DISPLAY_VER(i915) >= 13 && + crtc_state->uapi.async_flip && + plane->async_flip; +} + static u64 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, @@ -4923,10 +4942,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); const struct drm_framebuffer *fb = plane_state->hw.fb; - u32 data_rate; - u32 width = 0, height = 0; - uint_fixed_16_16_t down_scale_amount; - u64 rate; + int width, height; if (!plane_state->uapi.visible) return 0; @@ -4934,6 +4950,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, if (plane->id == PLANE_CURSOR) return 0; + /* + * We calculate extra ddb based on ratio plane rate/total data rate + * in case, in some cases we should not allocate extra ddb for the plane, + * so do not count its data rate, if this is the case. + */ + if (use_min_ddb(crtc_state, plane)) + return 0; + if (color_plane == 1 && !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) return 0; @@ -4952,14 +4976,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, height /= 2; } - data_rate = width * height; - - down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state); - - rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); - - rate *= fb->format->cpp[color_plane]; - return rate; + return width * height * fb->format->cpp[color_plane]; } static u64 @@ -5116,9 +5133,31 @@ static bool icl_need_wm1_wa(struct drm_i915_private *i915, (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); } +struct skl_plane_ddb_iter { + u64 data_rate; + u16 total[I915_MAX_PLANES]; + u16 uv_total[I915_MAX_PLANES]; + u16 start, size; +}; + +static u16 +skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, + const struct skl_wm_level *wm, + u64 data_rate) +{ + u16 extra; + + extra = min_t(u16, iter->size, + DIV64_U64_ROUND_UP(iter->size * data_rate, iter->data_rate)); + iter->size -= extra; + iter->data_rate -= data_rate; + + return wm->min_ddb_alloc + extra; +} + static int -skl_allocate_plane_ddb(struct intel_atomic_state *state, - struct intel_crtc *crtc) +skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = @@ -5127,10 +5166,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, intel_atomic_get_new_dbuf_state(state); const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; int num_active = hweight8(dbuf_state->active_pipes); - u16 alloc_size, start = 0; - u16 total[I915_MAX_PLANES] = {}; - u16 uv_total[I915_MAX_PLANES] = {}; - u64 total_data_rate; + struct skl_plane_ddb_iter iter = {}; enum plane_id plane_id; u32 blocks; int level; @@ -5143,24 +5179,21 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, return 0; if (DISPLAY_VER(dev_priv) >= 11) - total_data_rate = - icl_get_total_relative_data_rate(state, crtc); + iter.data_rate = icl_get_total_relative_data_rate(state, crtc); else - total_data_rate = - skl_get_total_relative_data_rate(state, crtc); + iter.data_rate = skl_get_total_relative_data_rate(state, crtc); - alloc_size = skl_ddb_entry_size(alloc); - if (alloc_size == 0) + iter.size = skl_ddb_entry_size(alloc); + if (iter.size == 0) return 0; /* Allocate fixed number of blocks for cursor. */ - total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); - alloc_size -= total[PLANE_CURSOR]; - crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start = - alloc->end - total[PLANE_CURSOR]; - crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; + iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); + iter.size -= iter.total[PLANE_CURSOR]; + skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR], + alloc->end - iter.total[PLANE_CURSOR], alloc->end); - if (total_data_rate == 0) + if (iter.data_rate == 0) return 0; /* @@ -5174,7 +5207,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { - if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) { + if (wm->wm[level].min_ddb_alloc > iter.total[PLANE_CURSOR]) { drm_WARN_ON(&dev_priv->drm, wm->wm[level].min_ddb_alloc != U16_MAX); blocks = U32_MAX; @@ -5187,8 +5220,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, blocks += wm->uv_wm[level].min_ddb_alloc; } - if (blocks <= alloc_size) { - alloc_size -= blocks; + if (blocks <= iter.size) { + iter.size -= blocks; break; } } @@ -5197,7 +5230,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, drm_dbg_kms(&dev_priv->drm, "Requested display configuration exceeds system DDB limitations"); drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n", - blocks, alloc_size); + blocks, iter.size); return -EINVAL; } @@ -5209,8 +5242,6 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - u64 rate; - u16 extra; if (plane_id == PLANE_CURSOR) continue; @@ -5219,32 +5250,24 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, * We've accounted for all active planes; remaining planes are * all disabled. */ - if (total_data_rate == 0) + if (iter.data_rate == 0) break; - rate = crtc_state->plane_data_rate[plane_id]; - extra = min_t(u16, alloc_size, - DIV64_U64_ROUND_UP(alloc_size * rate, - total_data_rate)); - total[plane_id] = wm->wm[level].min_ddb_alloc + extra; - alloc_size -= extra; - total_data_rate -= rate; + iter.total[plane_id] = + skl_allocate_plane_ddb(&iter, &wm->wm[level], + crtc_state->plane_data_rate[plane_id]); - if (total_data_rate == 0) + if (iter.data_rate == 0) break; - rate = crtc_state->uv_plane_data_rate[plane_id]; - extra = min_t(u16, alloc_size, - DIV64_U64_ROUND_UP(alloc_size * rate, - total_data_rate)); - uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra; - alloc_size -= extra; - total_data_rate -= rate; + iter.uv_total[plane_id] = + skl_allocate_plane_ddb(&iter, &wm->uv_wm[level], + crtc_state->uv_plane_data_rate[plane_id]); } - drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0); + drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0); /* Set the actual DDB start/end points for each plane */ - start = alloc->start; + iter.start = alloc->start; for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_ddb_entry *plane_alloc = &crtc_state->wm.skl.plane_ddb_y[plane_id]; @@ -5256,20 +5279,16 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, /* Gen11+ uses a separate plane for UV watermarks */ drm_WARN_ON(&dev_priv->drm, - DISPLAY_VER(dev_priv) >= 11 && uv_total[plane_id]); + DISPLAY_VER(dev_priv) >= 11 && iter.uv_total[plane_id]); /* Leave disabled planes at (0,0) */ - if (total[plane_id]) { - plane_alloc->start = start; - start += total[plane_id]; - plane_alloc->end = start; - } + if (iter.total[plane_id]) + iter.start = skl_ddb_entry_init(plane_alloc, iter.start, + iter.start + iter.total[plane_id]); - if (uv_total[plane_id]) { - uv_plane_alloc->start = start; - start += uv_total[plane_id]; - uv_plane_alloc->end = start; - } + if (iter.uv_total[plane_id]) + iter.start = skl_ddb_entry_init(uv_plane_alloc, iter.start, + iter.start + iter.uv_total[plane_id]); } /* @@ -5284,7 +5303,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, &crtc_state->wm.skl.optimal.planes[plane_id]; skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], - total[plane_id], uv_total[plane_id]); + iter.total[plane_id], + iter.uv_total[plane_id]); if (icl_need_wm1_wa(dev_priv, plane_id) && level == 1 && wm->wm[0].enable) { @@ -5303,9 +5323,9 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - skl_check_wm_level(&wm->trans_wm, total[plane_id]); - skl_check_wm_level(&wm->sagv.wm0, total[plane_id]); - skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]); + skl_check_wm_level(&wm->trans_wm, iter.total[plane_id]); + skl_check_wm_level(&wm->sagv.wm0, iter.total[plane_id]); + skl_check_wm_level(&wm->sagv.trans_wm, iter.total[plane_id]); } return 0; @@ -5508,6 +5528,7 @@ static int skl_wm_max_lines(struct drm_i915_private *dev_priv) } static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, int level, unsigned int latency, const struct skl_wm_params *wp, @@ -5519,7 +5540,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, uint_fixed_16_16_t selected_result; u32 blocks, lines, min_ddb_alloc = 0; - if (latency == 0) { + if (latency == 0 || + (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { /* reject it */ result->min_ddb_alloc = U16_MAX; return; @@ -5635,6 +5657,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, static void skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { @@ -5646,7 +5669,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, struct skl_wm_level *result = &levels[level]; unsigned int latency = dev_priv->wm.skl_latency[level]; - skl_compute_plane_wm(crtc_state, level, latency, + skl_compute_plane_wm(crtc_state, plane, level, latency, wm_params, result_prev, result); result_prev = result; @@ -5654,6 +5677,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, } static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, const struct skl_wm_params *wm_params, struct skl_plane_wm *plane_wm) { @@ -5662,7 +5686,7 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, struct skl_wm_level *levels = plane_wm->wm; unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us; - skl_compute_plane_wm(crtc_state, 0, latency, + skl_compute_plane_wm(crtc_state, plane, 0, latency, wm_params, &levels[0], sagv_wm); } @@ -5732,11 +5756,11 @@ static void skl_compute_transition_wm(struct drm_i915_private *dev_priv, static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, - enum plane_id plane_id, int color_plane) + struct intel_plane *plane, int color_plane) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; struct skl_wm_params wm_params; int ret; @@ -5745,13 +5769,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, if (ret) return ret; - skl_compute_wm_levels(crtc_state, &wm_params, wm->wm); + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); skl_compute_transition_wm(dev_priv, &wm->trans_wm, &wm->wm[0], &wm_params); if (DISPLAY_VER(dev_priv) >= 12) { - tgl_compute_sagv_wm(crtc_state, &wm_params, wm); + tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm, &wm->sagv.wm0, &wm_params); @@ -5762,9 +5786,9 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, - enum plane_id plane_id) + struct intel_plane *plane) { - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; struct skl_wm_params wm_params; int ret; @@ -5776,7 +5800,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, if (ret) return ret; - skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm); + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); return 0; } @@ -5796,13 +5820,13 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_id, 0); + plane, 0); if (ret) return ret; if (fb->format->is_yuv && fb->format->num_planes > 1) { ret = skl_build_plane_wm_uv(crtc_state, plane_state, - plane_id); + plane); if (ret) return ret; } @@ -5827,7 +5851,6 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; - enum plane_id y_plane_id = plane_state->planar_linked_plane->id; drm_WARN_ON(&dev_priv->drm, !intel_wm_plane_visible(crtc_state, plane_state)); @@ -5835,17 +5858,17 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, fb->format->num_planes == 1); ret = skl_build_plane_wm_single(crtc_state, plane_state, - y_plane_id, 0); + plane_state->planar_linked_plane, 0); if (ret) return ret; ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_id, 1); + plane, 1); if (ret) return ret; } else if (intel_wm_plane_visible(crtc_state, plane_state)) { ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_id, 0); + plane, 0); if (ret) return ret; } @@ -5891,7 +5914,8 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, { if (entry->end) intel_de_write_fw(dev_priv, reg, - (entry->end - 1) << 16 | entry->start); + PLANE_BUF_END(entry->end - 1) | + PLANE_BUF_START(entry->start)); else intel_de_write_fw(dev_priv, reg, 0); } @@ -6127,11 +6151,16 @@ skl_compute_ddb(struct intel_atomic_state *state) return ret; } + if (IS_ALDERLAKE_P(dev_priv)) + new_dbuf_state->joined_mbus = + adlp_check_mbus_joined(new_dbuf_state->active_pipes); + for_each_intel_crtc(&dev_priv->drm, crtc) { enum pipe pipe = crtc->pipe; new_dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes); + skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, + new_dbuf_state->joined_mbus); if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) continue; @@ -6143,9 +6172,6 @@ skl_compute_ddb(struct intel_atomic_state *state) new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); - if (IS_ALDERLAKE_P(dev_priv)) - new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes); - if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); @@ -6189,7 +6215,7 @@ skl_compute_ddb(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - ret = skl_allocate_plane_ddb(state, crtc); + ret = skl_crtc_allocate_plane_ddb(state, crtc); if (ret) return ret; @@ -6626,6 +6652,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) enum pipe pipe = crtc->pipe; unsigned int mbus_offset; enum plane_id plane_id; + u8 slices; skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; @@ -6645,19 +6672,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv); } - dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes); - dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); /* * Used for checking overlaps, so we need absolute * offsets instead of MBUS relative offsets. */ - mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]); + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + mbus_offset = mbus_ddb_offset(dev_priv, slices); crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; + /* The slices actually used by the planes on the pipe */ + dbuf_state->slices[pipe] = + skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb); + drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", crtc->base.base.id, crtc->base.name, @@ -6669,6 +6699,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; } +static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +{ + const struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->dbuf.obj.state); + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + entries[crtc->pipe] = crtc_state->wm.skl.ddb; + } + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + u8 slices; + + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + if (dbuf_state->slices[crtc->pipe] & ~slices) + return true; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, + I915_MAX_PIPES, crtc->pipe)) + return true; + } + + return false; +} + +void skl_wm_sanitize(struct drm_i915_private *i915) +{ + struct intel_crtc *crtc; + + /* + * On TGL/RKL (at least) the BIOS likes to assign the planes + * to the wrong DBUF slices. This will cause an infinite loop + * in skl_commit_modeset_enables() as it can't find a way to + * transition between the old bogus DBUF layout to the new + * proper DBUF layout without DBUF allocation overlaps between + * the planes (which cannot be allowed or else the hardware + * may hang). If we detect a bogus DBUF layout just turn off + * all the planes so that skl_commit_modeset_enables() can + * simply ignore them. + */ + if (!skl_dbuf_is_misconfigured(i915)) + return; + + drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); + + drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + + memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); + } +} + static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -6694,9 +6792,9 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) * multiple pipes are active. */ active->wm[0].enable = true; - active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; - active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; - active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; + active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); + active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); + active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); } else { int level, max_level = ilk_wm_max_level(dev_priv); @@ -7120,12 +7218,12 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) */ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) { - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN); - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN); - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN); + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM_LP_ENABLE); + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM_LP_ENABLE); + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM_LP_ENABLE); /* - * Don't touch WM1S_LP_EN here. + * Don't touch WM_LP_SPRITE_ENABLE here. * Doing so could cause underruns. */ } @@ -7220,7 +7318,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) { intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe), intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); + DISP_TRICKLE_FEED_DISABLE); intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe))); intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe)); @@ -7328,7 +7426,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) u32 tmp; tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD); - if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) + if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12) drm_dbg_kms(&dev_priv->drm, "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", tmp); @@ -7451,8 +7549,8 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, static void icl_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409120013:icl,ehl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - DPFC_CHICKEN_COMP_DUMMY_PIXEL); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /*Wa_14010594013:icl, ehl */ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, @@ -7464,7 +7562,7 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_1409825376:tgl (pre-prod)*/ @@ -7549,8 +7647,9 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcNukeOnHostModify:cfl * Display WA #0873: cfl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_NUKE_ON_ANY_MODIFICATION); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_NUKE_ON_ANY_MODIFICATION); } static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7582,8 +7681,9 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcNukeOnHostModify:kbl * Display WA #0873: kbl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_NUKE_ON_ANY_MODIFICATION); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_NUKE_ON_ANY_MODIFICATION); } static void skl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7609,15 +7709,17 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcNukeOnHostModify:skl * Display WA #0873: skl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_NUKE_ON_ANY_MODIFICATION); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_NUKE_ON_ANY_MODIFICATION); /* * WaFbcHighMemBwCorruptionAvoidance:skl * Display WA #0883: skl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_DISABLE_DUMMY0); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_DISABLE_DUMMY0); } static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7649,7 +7751,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) & ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); - intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, + intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE), _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); /* WaDisableSDEUnitClockGating:bdw */ @@ -7790,7 +7892,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv) ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); /* WaDisableSemaphoreAndSyncFlipWait:chv */ - intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, + intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE), _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); /* WaDisableCSUnitClockGating:chv */ @@ -7863,10 +7965,12 @@ static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, D_STATE, dstate); if (IS_PINEVIEW(dev_priv)) - intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); + intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE), + _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); /* IIR "flip pending" means done if this bit is set */ - intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); + intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE), + _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); /* interrupts should cause a wake up from C3 */ intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 990cdcaf85ce..51705151b842 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -12,7 +12,6 @@ #include "display/intel_global_state.h" #include "i915_drv.h" -#include "i915_reg.h" struct drm_device; struct drm_i915_private; @@ -47,6 +46,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); +void skl_wm_sanitize(struct drm_i915_private *dev_priv); bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, const struct intel_bw_state *bw_state); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 53f1ccb78849..6ed5786bcd29 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); - - if (rpm->available) - stack_depot_init(); + stack_depot_init(); } static noinline depot_stack_handle_t @@ -79,7 +77,7 @@ track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) depot_stack_handle_t stack, *stacks; unsigned long flags; - if (!rpm->available) + if (rpm->no_wakeref_tracking) return -1; stack = __save_depot_stack(); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 47a85fab4130..d9160e3ff4af 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -51,6 +51,7 @@ struct intel_runtime_pm { bool available; bool suspended; bool irqs_enabled; + bool no_wakeref_tracking; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) /* diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c index 5ba8490a31e6..5c6e517c73f4 100644 --- a/drivers/gpu/drm/i915/intel_sbi.c +++ b/drivers/gpu/drm/i915/intel_sbi.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "intel_sbi.h" +#include "i915_reg.h" /* SBI access */ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 778da3179b3c..dd8fdd5863de 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -23,7 +23,8 @@ #include <linux/pm_runtime.h> -#include "gt/intel_lrc_reg.h" /* for shadow reg list */ +#include "gt/intel_engine_regs.h" +#include "gt/intel_gt_regs.h" #include "i915_drv.h" #include "i915_iosf_mbi.h" @@ -1495,7 +1496,7 @@ ilk_dummy_write(struct intel_uncore *uncore) /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up * the chip from rc6 before touching it for real. MI_MODE is masked, * hence harmless to write 0 into. */ - __raw_uncore_write32(uncore, MI_MODE, 0); + __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0); } static void @@ -2273,76 +2274,6 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore) } } -static const struct reg_whitelist { - i915_reg_t offset_ldw; - i915_reg_t offset_udw; - u8 min_graphics_ver; - u8 max_graphics_ver; - u8 size; -} reg_read_whitelist[] = { { - .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), - .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), - .min_graphics_ver = 4, - .max_graphics_ver = 12, - .size = 8 -} }; - -int i915_reg_read_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *i915 = to_i915(dev); - struct intel_uncore *uncore = &i915->uncore; - struct drm_i915_reg_read *reg = data; - struct reg_whitelist const *entry; - intel_wakeref_t wakeref; - unsigned int flags; - int remain; - int ret = 0; - - entry = reg_read_whitelist; - remain = ARRAY_SIZE(reg_read_whitelist); - while (remain) { - u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); - - GEM_BUG_ON(!is_power_of_2(entry->size)); - GEM_BUG_ON(entry->size > 8); - GEM_BUG_ON(entry_offset & (entry->size - 1)); - - if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) && - entry_offset == (reg->offset & -entry->size)) - break; - entry++; - remain--; - } - - if (!remain) - return -EINVAL; - - flags = reg->offset & (entry->size - 1); - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - if (entry->size == 8 && flags == I915_REG_READ_8B_WA) - reg->val = intel_uncore_read64_2x32(uncore, - entry->offset_ldw, - entry->offset_udw); - else if (entry->size == 8 && flags == 0) - reg->val = intel_uncore_read64(uncore, - entry->offset_ldw); - else if (entry->size == 4 && flags == 0) - reg->val = intel_uncore_read(uncore, entry->offset_ldw); - else if (entry->size == 2 && flags == 0) - reg->val = intel_uncore_read16(uncore, - entry->offset_ldw); - else if (entry->size == 1 && flags == 0) - reg->val = intel_uncore_read8(uncore, - entry->offset_ldw); - else - ret = -EINVAL; - } - - return ret; -} - /** * __intel_wait_for_register_fw - wait until register matches expected state * @uncore: the struct intel_uncore diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 2a15b2b2e2fc..6ff56d673e2b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -30,7 +30,7 @@ #include <linux/hrtimer.h> #include <linux/io-64-nonatomic-lo-hi.h> -#include "i915_reg.h" +#include "i915_reg_defs.h" struct drm_i915_private; struct intel_runtime_pm; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c index 8d5553772ded..04745f914407 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c @@ -7,6 +7,7 @@ #include "intel_pxp_irq.h" #include "intel_pxp_session.h" #include "gt/intel_gt_irq.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_gt_types.h" #include "i915_irq.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h index 16990a3f2f85..586be769104f 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h @@ -6,7 +6,7 @@ #ifndef __INTEL_PXP_PM_H__ #define __INTEL_PXP_PM_H__ -#include "intel_pxp_types.h" +struct intel_pxp; #ifdef CONFIG_DRM_I915_PXP void intel_pxp_suspend_prepare(struct intel_pxp *pxp); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 1628b81d0a35..e5dd82e7e480 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -6,9 +6,10 @@ #include <linux/random.h> +#include "gem/i915_gem_internal.h" +#include "gem/i915_gem_pm.h" #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" -#include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 1d57b355e9a7..8c6517d29b8e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -22,6 +22,7 @@ * */ +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index fba1c8be1649..e7e6c4b2c81d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -26,6 +26,7 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" #include "gem/selftests/mock_context.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 2a99dd7c2fe8..c56a0c2cd2f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -26,6 +26,7 @@ #include <linux/pm_qos.h> #include <linux/sort.h> +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 2d6d7bd13c3c..c4e932368b37 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -24,6 +24,7 @@ #include <linux/random.h> #include "gt/intel_gt_pm.h" +#include "i915_driver.h" #include "i915_drv.h" #include "i915_selftest.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 0280605a2673..6921ba128015 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -25,6 +25,7 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" #include "gem/selftests/mock_context.h" #include "i915_scatterlist.h" diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 24d87d0fc747..0c22594ae274 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -6,6 +6,7 @@ #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" +#include "gem/i915_gem_internal.h" #include "gem/selftests/igt_gem_utils.h" #include "igt_spinner.h" diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 28a0f054009a..573d9b2e1a4a 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -161,6 +161,8 @@ struct drm_i915_private *mock_gem_device(void) i915_params_copy(&i915->params, &i915_modparams); intel_runtime_pm_init_early(&i915->runtime_pm); + /* wakeref tracking has significant overhead */ + i915->runtime_pm.no_wakeref_tracking = true; /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c index ed2ac5752ac4..c26001300ebd 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.c +++ b/drivers/gpu/drm/i915/vlv_sideband.c @@ -5,6 +5,7 @@ #include "i915_drv.h" #include "i915_iosf_mbi.h" +#include "i915_reg.h" #include "vlv_sideband.h" /* diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h index d7732f612e7f..9ce283d96b80 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.h +++ b/drivers/gpu/drm/i915/vlv_sideband.h @@ -9,6 +9,8 @@ #include <linux/bitops.h> #include <linux/types.h> +#include "vlv_sideband_reg.h" + enum pipe; struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/vlv_sideband_reg.h b/drivers/gpu/drm/i915/vlv_sideband_reg.h new file mode 100644 index 000000000000..b7fbff3d0409 --- /dev/null +++ b/drivers/gpu/drm/i915/vlv_sideband_reg.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _VLV_SIDEBAND_REG_H_ +#define _VLV_SIDEBAND_REG_H_ + +/* See configdb bunit SB addr map */ +#define BUNIT_REG_BISOC 0x11 + +/* PUNIT_REG_*SSPM0 */ +#define _SSPM0_SSC(val) ((val) << 0) +#define SSPM0_SSC_MASK _SSPM0_SSC(0x3) +#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0) +#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1) +#define SSPM0_SSC_RESET _SSPM0_SSC(0x2) +#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3) +#define _SSPM0_SSS(val) ((val) << 24) +#define SSPM0_SSS_MASK _SSPM0_SSS(0x3) +#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0) +#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1) +#define SSPM0_SSS_RESET _SSPM0_SSS(0x2) +#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3) + +/* PUNIT_REG_*SSPM1 */ +#define SSPM1_FREQSTAT_SHIFT 24 +#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT) +#define SSPM1_FREQGUAR_SHIFT 8 +#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT) +#define SSPM1_FREQ_SHIFT 0 +#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT) + +#define PUNIT_REG_VEDSSPM0 0x32 +#define PUNIT_REG_VEDSSPM1 0x33 + +#define PUNIT_REG_DSPSSPM 0x36 +#define DSPFREQSTAT_SHIFT_CHV 24 +#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) +#define DSPFREQGUAR_SHIFT_CHV 8 +#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV) +#define DSPFREQSTAT_SHIFT 30 +#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) +#define DSPFREQGUAR_SHIFT 14 +#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) +#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */ +#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */ +#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */ +#define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) +#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) +#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) +#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe)) +#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe)) +#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe)) +#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16)) +#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe)) +#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe)) +#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe)) +#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe)) +#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe)) + +#define PUNIT_REG_ISPSSPM0 0x39 +#define PUNIT_REG_ISPSSPM1 0x3a + +#define PUNIT_REG_PWRGT_CTRL 0x60 +#define PUNIT_REG_PWRGT_STATUS 0x61 +#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_ON(pw_idx) (0 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_CLK_GATE(pw_idx) (1 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_RESET(pw_idx) (2 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_GATE(pw_idx) (3 << ((pw_idx) * 2)) + +#define PUNIT_PWGT_IDX_RENDER 0 +#define PUNIT_PWGT_IDX_MEDIA 1 +#define PUNIT_PWGT_IDX_DISP2D 3 +#define PUNIT_PWGT_IDX_DPIO_CMN_BC 5 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01 6 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23 7 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01 8 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23 9 +#define PUNIT_PWGT_IDX_DPIO_RX0 10 +#define PUNIT_PWGT_IDX_DPIO_RX1 11 +#define PUNIT_PWGT_IDX_DPIO_CMN_D 12 + +#define PUNIT_REG_GPU_LFM 0xd3 +#define PUNIT_REG_GPU_FREQ_REQ 0xd4 +#define PUNIT_REG_GPU_FREQ_STS 0xd8 +#define GPLLENABLE (1 << 4) +#define GENFREQSTATUS (1 << 0) +#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc +#define PUNIT_REG_CZ_TIMESTAMP 0xce + +#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ +#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ + +#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136 +#define FB_GFX_FREQ_FUSE_MASK 0xff +#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24 +#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16 +#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8 + +#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 +#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 + +#define PUNIT_REG_DDR_SETUP2 0x139 +#define FORCE_DDR_FREQ_REQ_ACK (1 << 8) +#define FORCE_DDR_LOW_FREQ (1 << 1) +#define FORCE_DDR_HIGH_FREQ (1 << 0) + +#define PUNIT_GPU_STATUS_REG 0xdb +#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 +#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff +#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8 +#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff + +#define PUNIT_GPU_DUTYCYCLE_REG 0xdf +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8 +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff + +#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c +#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 +#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 +#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11 +#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800 +#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34 +#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007 +#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30 +#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 +#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 + +#define VLV_TURBO_SOC_OVERRIDE 0x04 +#define VLV_OVERRIDE_EN 1 +#define VLV_SOC_TDP_EN (1 << 1) +#define VLV_BIAS_CPU_125_SOC_875 (6 << 2) +#define CHV_BIAS_CPU_50_SOC_50 (3 << 2) + +/* vlv2 north clock has */ +#define CCK_FUSE_REG 0x8 +#define CCK_FUSE_HPLL_FREQ_MASK 0x3 +#define CCK_REG_DSI_PLL_FUSE 0x44 +#define CCK_REG_DSI_PLL_CONTROL 0x48 +#define DSI_PLL_VCO_EN (1 << 31) +#define DSI_PLL_LDO_GATE (1 << 30) +#define DSI_PLL_P1_POST_DIV_SHIFT 17 +#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17) +#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13) +#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12) +#define DSI_PLL_MUX_MASK (3 << 9) +#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10) +#define DSI_PLL_MUX_DSI0_CCK (1 << 10) +#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9) +#define DSI_PLL_MUX_DSI1_CCK (1 << 9) +#define DSI_PLL_CLK_GATE_MASK (0xf << 5) +#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8) +#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7) +#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6) +#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5) +#define DSI_PLL_LOCK (1 << 0) +#define CCK_REG_DSI_PLL_DIVIDER 0x4c +#define DSI_PLL_LFSR (1 << 31) +#define DSI_PLL_FRACTION_EN (1 << 30) +#define DSI_PLL_FRAC_COUNTER_SHIFT 27 +#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27) +#define DSI_PLL_USYNC_CNT_SHIFT 18 +#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18) +#define DSI_PLL_N1_DIV_SHIFT 16 +#define DSI_PLL_N1_DIV_MASK (3 << 16) +#define DSI_PLL_M1_DIV_SHIFT 0 +#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) +#define CCK_CZ_CLOCK_CONTROL 0x62 +#define CCK_GPLL_CLOCK_CONTROL 0x67 +#define CCK_DISPLAY_CLOCK_CONTROL 0x6b +#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c +#define CCK_TRUNK_FORCE_ON (1 << 17) +#define CCK_TRUNK_FORCE_OFF (1 << 16) +#define CCK_FREQUENCY_STATUS (0x1f << 8) +#define CCK_FREQUENCY_STATUS_SHIFT 8 +#define CCK_FREQUENCY_VALUES (0x1f << 0) + +#endif /* _VLV_SIDEBAND_REG_H_ */ diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c index 23adb64d640a..1d9da32195c2 100644 --- a/drivers/gpu/drm/i915/vlv_suspend.c +++ b/drivers/gpu/drm/i915/vlv_suspend.c @@ -14,6 +14,8 @@ #include "intel_pm.h" #include "vlv_suspend.h" +#include "gt/intel_gt_regs.h" + struct vlv_s0ix_state { /* GAM */ u32 wr_watermark; diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index f9a9198ef198..d0c2b1422b3b 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -2,6 +2,7 @@ /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ #include <linux/mm.h> +#include <linux/iosys-map.h> #include <linux/sync_file.h> #include <linux/pagemap.h> #include <linux/shmem_fs.h> @@ -182,7 +183,7 @@ static int lima_gem_pin(struct drm_gem_object *obj) return drm_gem_shmem_pin(&bo->base); } -static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct lima_bo *bo = to_lima_bo(obj); diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 5612d73f238f..390c969f74ad 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -284,7 +284,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) struct lima_dump_chunk_buffer *buffer_chunk; u32 size, task_size, mem_size; int i; - struct dma_buf_map map; + struct iosys_map map; int ret; mutex_lock(&dev->error_task_list_lock); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index d0544962cfc1..139d7724c6d0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -220,7 +220,7 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, return &mtk_gem->base; } -int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); struct sg_table *sgt = NULL; @@ -247,12 +247,13 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) out: kfree(sgt); - dma_buf_map_set_vaddr(map, mtk_gem->kvaddr); + iosys_map_set_vaddr(map, mtk_gem->kvaddr); return 0; } -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); void *vaddr = map->vaddr; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 9a359a06cb73..78f23b07a02e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -42,7 +42,8 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map); #endif diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index cd9ba13ad5fc..6e18d3bbd720 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -9,7 +9,7 @@ */ #include <linux/delay.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> @@ -848,7 +848,7 @@ mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe, static void mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, - struct drm_rect *clip, const struct dma_buf_map *map) + struct drm_rect *clip, const struct iosys_map *map) { void __iomem *dst = mdev->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d7574e6bd4e4..ae52412d529a 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -309,8 +309,8 @@ void msm_gem_shrinker_init(struct drm_device *dev); void msm_gem_shrinker_cleanup(struct drm_device *dev); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); -int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index fc94e061d6a7..e8f1b7a2ca9c 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -22,19 +22,19 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); } -int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { void *vaddr; vaddr = msm_gem_get_vaddr(obj); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } -void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { msm_gem_put_vaddr(obj); } diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 1d36df5af98d..bc0df93f7f21 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -1,16 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Collabora Ltd */ -#include <drm/drm_file.h> -#include <drm/drm_gem_shmem_helper.h> -#include <drm/panfrost_drm.h> #include <linux/completion.h> -#include <linux/dma-buf-map.h> #include <linux/iopoll.h> +#include <linux/iosys-map.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <drm/drm_file.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/panfrost_drm.h> + #include "panfrost_device.h" #include "panfrost_features.h" #include "panfrost_gem.h" @@ -73,7 +74,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; - struct dma_buf_map map; + struct iosys_map map; struct drm_gem_shmem_object *bo; u32 cfg, as; int ret; @@ -181,7 +182,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(perfcnt->buf); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(perfcnt->buf); if (user != perfcnt->user) return -EINVAL; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 9e0a1e836011..9a9c29b1d3e1 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -25,7 +25,7 @@ #include <linux/crc32.h> #include <linux/delay.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_drv.h> #include <drm/drm_atomic.h> @@ -566,8 +566,8 @@ static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev, { static const u32 size = 64 * 64 * 4; struct qxl_bo *cursor_bo; - struct dma_buf_map cursor_map; - struct dma_buf_map user_map; + struct iosys_map cursor_map; + struct iosys_map user_map; struct qxl_cursor cursor; int ret; @@ -1183,7 +1183,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev) { int ret; struct drm_gem_object *gobj; - struct dma_buf_map map; + struct iosys_map map; int monitors_config_size = sizeof(struct qxl_monitors_config) + qxl_num_crtc * sizeof(struct qxl_head); diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 7d27891e87fa..a93de9e1977a 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -20,7 +20,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_fourcc.h> @@ -44,7 +44,7 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, unsigned int num_clips, struct qxl_bo *clips_bo) { - struct dma_buf_map map; + struct iosys_map map; struct qxl_clip_rects *dev_clips; int ret; @@ -146,7 +146,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, int stride = fb->pitches[0]; /* depth is not actually interesting, we don't mask with it */ int depth = fb->format->cpp[0] * 8; - struct dma_buf_map surface_map; + struct iosys_map surface_map; uint8_t *surface_base; struct qxl_release *release; struct qxl_bo *clips_bo; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 359266d9e860..9796099ff18f 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -30,7 +30,7 @@ * Definitions taken from spice-protocol, plus kernel driver specific bits. */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/dma-fence.h> #include <linux/firmware.h> #include <linux/platform_device.h> @@ -50,7 +50,7 @@ #include "qxl_dev.h" -struct dma_buf_map; +struct iosys_map; #define DRIVER_AUTHOR "Dave Airlie" @@ -81,7 +81,7 @@ struct qxl_bo { /* Protected by tbo.reserved */ struct ttm_place placements[3]; struct ttm_placement placement; - struct dma_buf_map map; + struct iosys_map map; void *kptr; unsigned int map_count; int type; @@ -431,9 +431,9 @@ struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *qxl_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); void qxl_gem_prime_vunmap(struct drm_gem_object *obj, - struct dma_buf_map *map); + struct iosys_map *map); /* qxl_irq.c */ int qxl_irq_init(struct qxl_device *qdev); diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index fbb36e3e8564..b42a657e4c2f 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -23,7 +23,7 @@ * Alon Levy */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/io-mapping.h> #include "qxl_drv.h" @@ -158,7 +158,7 @@ int qxl_bo_create(struct qxl_device *qdev, unsigned long size, return 0; } -int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map) +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map) { int r; @@ -184,7 +184,7 @@ out: return 0; } -int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map) +int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map) { int r; @@ -210,7 +210,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, void *rptr; int ret; struct io_mapping *map; - struct dma_buf_map bo_map; + struct iosys_map bo_map; if (bo->tbo.resource->mem_type == TTM_PL_VRAM) map = qdev->vram_mapping; diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index cee4b52b75dd..53392cb90eec 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -59,8 +59,8 @@ extern int qxl_bo_create(struct qxl_device *qdev, u32 priority, struct qxl_surface *surf, struct qxl_bo **bo_ptr); -int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map); -int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map); +int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map); +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map); int qxl_bo_vunmap(struct qxl_bo *bo); void qxl_bo_vunmap_locked(struct qxl_bo *bo); void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 4a10cb0a413b..142d01415acb 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -54,7 +54,7 @@ struct drm_gem_object *qxl_gem_prime_import_sg_table( return ERR_PTR(-ENOSYS); } -int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); int ret; @@ -67,7 +67,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) } void qxl_gem_prime_vunmap(struct drm_gem_object *obj, - struct dma_buf_map *map) + struct iosys_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a36a4f2c76b0..f563284a7fac 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -26,6 +26,7 @@ * Jerome Glisse */ +#include <linux/iosys-map.h> #include <linux/pci.h> #include <drm/drm_device.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 63eb73b624aa..985584147da1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -510,7 +510,7 @@ err_free_rk_obj: return ERR_PTR(ret); } -int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); @@ -519,18 +519,19 @@ int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) pgprot_writecombine(PAGE_KERNEL)); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) return -ENOMEM; - dma_buf_map_set_vaddr(map, rk_obj->kvaddr); + iosys_map_set_vaddr(map, rk_obj->kvaddr); return 0; } -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 47c1861eece0..72f59ac6d258 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -31,8 +31,9 @@ struct drm_gem_object * rockchip_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map); struct rockchip_gem_object * rockchip_gem_create_object(struct drm_device *drm, unsigned int size, diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index fce0e52973c2..0063403ab5e1 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -174,7 +174,7 @@ static void tegra_bo_unpin(struct host1x_bo_mapping *map) static void *tegra_bo_mmap(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); - struct dma_buf_map map; + struct iosys_map map; int ret; if (obj->vaddr) { @@ -191,7 +191,7 @@ static void *tegra_bo_mmap(struct host1x_bo *bo) static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr); if (obj->vaddr) return; @@ -699,17 +699,17 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) return __tegra_gem_mmap(gem, vma); } -static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map) +static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map) { struct drm_gem_object *gem = buf->priv; struct tegra_bo *bo = to_tegra_bo(gem); - dma_buf_map_set_vaddr(map, bo->vaddr); + iosys_map_set_vaddr(map, bo->vaddr); return 0; } -static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map) +static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map) { } diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index ecf2475d0f16..c8e791840862 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -16,7 +16,7 @@ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com> */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -313,7 +313,8 @@ static int cirrus_mode_set(struct cirrus_device *cirrus, return 0; } -static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_map *map, +static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, + const struct iosys_map *map, struct drm_rect *rect) { struct cirrus_device *cirrus = to_cirrus(fb->dev); @@ -345,7 +346,8 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_ return 0; } -static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map) +static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb, + const struct iosys_map *map) { struct drm_rect fullscreen = { .x1 = 0, diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 6bc0c298739c..648e585d40a8 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -95,7 +95,7 @@ struct gm12u320_device { struct drm_rect rect; int frame; int draw_status_timeout; - struct dma_buf_map src_map; + struct iosys_map src_map; } fb_update; }; @@ -395,7 +395,8 @@ err: GM12U320_ERR("Frame update error: %d\n", ret); } -static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, const struct dma_buf_map *map, +static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, + const struct iosys_map *map, struct drm_rect *dirty) { struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev); @@ -438,7 +439,7 @@ static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320) mutex_lock(&gm12u320->fb_update.lock); old_fb = gm12u320->fb_update.fb; gm12u320->fb_update.fb = NULL; - dma_buf_map_clear(&gm12u320->fb_update.src_map); + iosys_map_clear(&gm12u320->fb_update.src_map); mutex_unlock(&gm12u320->fb_update.lock); drm_framebuffer_put(old_fb); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 544a84fa6589..2b8caa1efaa3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -33,7 +33,7 @@ #include <drm/ttm/ttm_placement.h> #include <drm/drm_cache.h> #include <drm/drm_vma_manager.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/io.h> #include <linux/highmem.h> #include <linux/wait.h> @@ -93,7 +93,7 @@ void ttm_move_memcpy(bool clear, { const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; - struct dma_buf_map src_map, dst_map; + struct iosys_map src_map, dst_map; pgoff_t i; /* Single TTM move. NOP */ @@ -390,7 +390,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) { struct ttm_resource *mem = bo->resource; int ret; @@ -418,7 +418,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) if (!vaddr_iomem) return -ENOMEM; - dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); + iosys_map_set_vaddr_iomem(map, vaddr_iomem); } else { struct ttm_operation_ctx ctx = { @@ -442,25 +442,25 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); } return 0; } EXPORT_SYMBOL(ttm_bo_vmap); -void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map) { struct ttm_resource *mem = bo->resource; - if (dma_buf_map_is_null(map)) + if (iosys_map_is_null(map)) return; if (!map->is_iomem) vunmap(map->vaddr); else if (!mem->bus.addr) iounmap(map->vaddr_iomem); - dma_buf_map_clear(map); + iosys_map_clear(map); ttm_mem_io_free(bo->bdev, bo->resource); } diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 68344c90549b..29ee2eda7b7d 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -22,7 +22,7 @@ * Authors: Christian König */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/io-mapping.h> #include <linux/scatterlist.h> @@ -244,7 +244,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man, EXPORT_SYMBOL(ttm_resource_manager_debug); static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *dmap, + struct iosys_map *dmap, pgoff_t i) { struct ttm_kmap_iter_iomap *iter_io = @@ -271,11 +271,11 @@ retry: addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs + (((resource_size_t)i - iter_io->cache.i) << PAGE_SHIFT)); - dma_buf_map_set_vaddr_iomem(dmap, addr); + iosys_map_set_vaddr_iomem(dmap, addr); } static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *map) + struct iosys_map *map) { io_mapping_unmap_local(map->vaddr_iomem); } @@ -326,14 +326,14 @@ EXPORT_SYMBOL(ttm_kmap_iter_iomap_init); */ static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *dmap, + struct iosys_map *dmap, pgoff_t i) { struct ttm_kmap_iter_linear_io *iter_io = container_of(iter, typeof(*iter_io), base); *dmap = iter_io->dmap; - dma_buf_map_incr(dmap, i * PAGE_SIZE); + iosys_map_incr(dmap, i * PAGE_SIZE); } static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = { @@ -369,7 +369,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, } if (mem->bus.addr) { - dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr); + iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); iter_io->needs_unmap = false; } else { size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; @@ -377,23 +377,23 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, iter_io->needs_unmap = true; memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); if (mem->bus.caching == ttm_write_combined) - dma_buf_map_set_vaddr_iomem(&iter_io->dmap, - ioremap_wc(mem->bus.offset, - bus_size)); + iosys_map_set_vaddr_iomem(&iter_io->dmap, + ioremap_wc(mem->bus.offset, + bus_size)); else if (mem->bus.caching == ttm_cached) - dma_buf_map_set_vaddr(&iter_io->dmap, - memremap(mem->bus.offset, bus_size, - MEMREMAP_WB | - MEMREMAP_WT | - MEMREMAP_WC)); + iosys_map_set_vaddr(&iter_io->dmap, + memremap(mem->bus.offset, bus_size, + MEMREMAP_WB | + MEMREMAP_WT | + MEMREMAP_WC)); /* If uncached requested or if mapping cached or wc failed */ - if (dma_buf_map_is_null(&iter_io->dmap)) - dma_buf_map_set_vaddr_iomem(&iter_io->dmap, - ioremap(mem->bus.offset, - bus_size)); + if (iosys_map_is_null(&iter_io->dmap)) + iosys_map_set_vaddr_iomem(&iter_io->dmap, + ioremap(mem->bus.offset, + bus_size)); - if (dma_buf_map_is_null(&iter_io->dmap)) { + if (iosys_map_is_null(&iter_io->dmap)) { ret = -ENOMEM; goto out_io_free; } @@ -422,7 +422,7 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, struct ttm_device *bdev, struct ttm_resource *mem) { - if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) { + if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) { if (iter_io->dmap.is_iomem) iounmap(iter_io->dmap.vaddr_iomem); else diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 79c870a3bef8..d234aab800a0 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -406,18 +406,18 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages) } static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *dmap, + struct iosys_map *dmap, pgoff_t i) { struct ttm_kmap_iter_tt *iter_tt = container_of(iter, typeof(*iter_tt), base); - dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i], - iter_tt->prot)); + iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i], + iter_tt->prot)); } static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter, - struct dma_buf_map *map) + struct iosys_map *map) { kunmap_local(map->vaddr); } diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 32232228dae9..e67c40a48fb4 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -264,7 +264,8 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y, return 0; } -static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_map *map, +static int udl_handle_damage(struct drm_framebuffer *fb, + const struct iosys_map *map, int x, int y, int width, int height) { struct drm_device *dev = fb->dev; diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 4227a915b06a..4017b0a621fc 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -10,7 +10,7 @@ * Hans de Goede <hdegoede@redhat.com> */ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/export.h> #include <drm/drm_atomic.h> @@ -398,7 +398,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, u32 height = new_state->crtc_h; struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); - struct dma_buf_map map = shadow_plane_state->data[0]; + struct iosys_map map = shadow_plane_state->data[0]; u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */ size_t data_size, mask_size; u32 flags; diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index 9e8204be9a14..c6a1036bf2ea 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -157,7 +157,7 @@ static void compose_plane(struct vkms_composer *primary_composer, void *vaddr; void (*pixel_blend)(const u8 *p_src, u8 *p_dst); - if (WARN_ON(dma_buf_map_is_null(&primary_composer->map[0]))) + if (WARN_ON(iosys_map_is_null(&primary_composer->map[0]))) return; vaddr = plane_composer->map[0].vaddr; @@ -187,7 +187,7 @@ static int compose_active_planes(void **vaddr_out, } } - if (WARN_ON(dma_buf_map_is_null(&primary_composer->map[0]))) + if (WARN_ON(iosys_map_is_null(&primary_composer->map[0]))) return -EINVAL; vaddr = primary_composer->map[0].vaddr; diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 9496fdc900b8..91e63b12f60f 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -23,14 +23,14 @@ #define NUM_OVERLAY_PLANES 8 struct vkms_writeback_job { - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; }; struct vkms_composer { struct drm_framebuffer fb; struct drm_rect src, dst; - struct dma_buf_map map[4]; + struct iosys_map map[4]; unsigned int offset; unsigned int pitch; unsigned int cpp; diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 32409e15244b..d8eb674b49a6 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index 8694227f555f..af1604dfbbaf 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_atomic.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index dd358ba2bf8e..5a5bf4e5b717 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -280,7 +280,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, return &xen_obj->base; } -int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map) +int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, + struct iosys_map *map) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); void *vaddr; @@ -293,13 +294,13 @@ int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_ VM_MAP, PAGE_KERNEL); if (!vaddr) return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - struct dma_buf_map *map) + struct iosys_map *map) { vunmap(map->vaddr); } diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h index eaea470f7001..a718a1f382a3 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.h +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h @@ -12,7 +12,7 @@ #define __XEN_DRM_FRONT_GEM_H struct dma_buf_attachment; -struct dma_buf_map; +struct iosys_map; struct drm_device; struct drm_gem_object; struct sg_table; @@ -32,9 +32,9 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj); void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj); int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, - struct dma_buf_map *map); + struct iosys_map *map); void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - struct dma_buf_map *map); + struct iosys_map *map); #endif /* __XEN_DRM_FRONT_GEM_H */ diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index 7c4096e62173..ecf065cd4a67 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -99,7 +99,7 @@ static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv) return buf->vaddr; if (buf->db_attach) { - struct dma_buf_map map; + struct iosys_map map; if (!dma_buf_vmap(buf->db_attach->dmabuf, &map)) buf->vaddr = map.vaddr; @@ -446,7 +446,7 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, return 0; } -static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) +static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map) { struct vb2_dc_buf *buf; void *vaddr; @@ -456,7 +456,7 @@ static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) if (!vaddr) return -EINVAL; - dma_buf_map_set_vaddr(map, vaddr); + iosys_map_set_vaddr(map, vaddr); return 0; } @@ -737,7 +737,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv) { struct vb2_dc_buf *buf = mem_priv; struct sg_table *sgt = buf->dma_sgt; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); if (WARN_ON(!buf->db_attach)) { pr_err("trying to unpin a not attached buffer\n"); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 90acafd9a290..f8a21c560ad2 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -303,7 +303,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv) { struct vb2_dma_sg_buf *buf = buf_priv; - struct dma_buf_map map; + struct iosys_map map; int ret; BUG_ON(!buf); @@ -492,11 +492,12 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, return 0; } -static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) +static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, + struct iosys_map *map) { struct vb2_dma_sg_buf *buf = dbuf->priv; - dma_buf_map_set_vaddr(map, buf->vaddr); + iosys_map_set_vaddr(map, buf->vaddr); return 0; } @@ -581,7 +582,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) { struct vb2_dma_sg_buf *buf = mem_priv; struct sg_table *sgt = buf->dma_sgt; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); if (WARN_ON(!buf->db_attach)) { pr_err("trying to unpin a not attached buffer\n"); diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c index 0bbfea66554f..948152f1596b 100644 --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c @@ -312,11 +312,12 @@ static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) vb2_vmalloc_put(dbuf->priv); } -static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) +static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, + struct iosys_map *map) { struct vb2_vmalloc_buf *buf = dbuf->priv; - dma_buf_map_set_vaddr(map, buf->vaddr); + iosys_map_set_vaddr(map, buf->vaddr); return 0; } @@ -372,7 +373,7 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb, static int vb2_vmalloc_map_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; - struct dma_buf_map map; + struct iosys_map map; int ret; ret = dma_buf_vmap(buf->dbuf, &map); @@ -386,7 +387,7 @@ static int vb2_vmalloc_map_dmabuf(void *mem_priv) static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); dma_buf_vunmap(buf->dbuf, &map); buf->vaddr = NULL; @@ -395,7 +396,7 @@ static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) static void vb2_vmalloc_detach_dmabuf(void *mem_priv) { struct vb2_vmalloc_buf *buf = mem_priv; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); if (buf->vaddr) dma_buf_vunmap(buf->dbuf, &map); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 4ccbf43e6bfa..5c0503655212 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -587,11 +587,11 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, kfree(a); } -static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct fastrpc_buf *buf = dmabuf->priv; - dma_buf_map_set_vaddr(map, buf->virt); + iosys_map_set_vaddr(map, buf->virt); return 0; } diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h index 98d020835b49..69487bd8ed56 100644 --- a/include/drm/dp/drm_dp_helper.h +++ b/include/drm/dp/drm_dp_helper.h @@ -560,6 +560,7 @@ struct drm_panel; # define DP_TRAINING_PATTERN_DISABLE 0 # define DP_TRAINING_PATTERN_1 1 # define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */ # define DP_TRAINING_PATTERN_3 3 /* 1.2 */ # define DP_TRAINING_PATTERN_4 7 /* 1.4 */ # define DP_TRAINING_PATTERN_MASK 0x3 @@ -738,11 +739,13 @@ struct drm_panel; DP_LANE_CHANNEL_EQ_DONE | \ DP_LANE_SYMBOL_LOCKED) -#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 - -#define DP_INTERLANE_ALIGN_DONE (1 << 0) -#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) -#define DP_LINK_STATUS_UPDATED (1 << 7) +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */ +#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */ +#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */ +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) #define DP_SINK_STATUS 0x205 # define DP_RECEIVE_PORT_0_STATUS (1 << 0) @@ -1112,6 +1115,7 @@ struct drm_panel; # define DP_UHBR13_5 (1 << 2) #define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */ +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7) # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00 # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01 @@ -1347,6 +1351,7 @@ struct drm_panel; # define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0) /* See DP_128B132B_SUPPORTED_LINK_RATES for values */ #define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */ +#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */ enum drm_dp_phy { DP_PHY_DPRX, @@ -1549,6 +1554,15 @@ void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux); +bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]); + u8 drm_dp_link_rate_to_bw_code(int link_rate); int drm_dp_bw_code_to_link_rate(u8 link_bw); diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index cc9de1632dd3..22deb216b59c 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -35,7 +35,7 @@ #include <linux/scatterlist.h> -struct dma_buf_map; +struct iosys_map; void drm_clflush_pages(struct page *pages[], unsigned long num_pages); void drm_clflush_sg(struct sg_table *st); @@ -74,7 +74,7 @@ static inline bool drm_arch_can_wc_memory(void) void drm_memcpy_init_early(void); -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len); #endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index f07f2fb02e75..4fc8018eddda 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -3,7 +3,7 @@ #ifndef _DRM_CLIENT_H_ #define _DRM_CLIENT_H_ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/types.h> @@ -144,7 +144,7 @@ struct drm_client_buffer { /** * @map: Virtual address for the buffer */ - struct dma_buf_map map; + struct iosys_map map; /** * @fb: DRM framebuffer @@ -156,7 +156,8 @@ struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); -int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map); +int drm_client_buffer_vmap(struct drm_client_buffer *buffer, + struct iosys_map *map); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); int drm_client_modeset_create(struct drm_client_dev *client); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index f9f9af8b5448..a70baea0636c 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1145,14 +1145,12 @@ struct drm_crtc { */ spinlock_t commit_lock; -#ifdef CONFIG_DEBUG_FS /** * @debugfs_entry: * * Debugfs directory for this CRTC. */ struct dentry *debugfs_entry; -#endif /** * @crc: diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 35e7f44c2a75..e2941cee14b6 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -39,7 +39,7 @@ #include <drm/drm_vma_manager.h> -struct dma_buf_map; +struct iosys_map; struct drm_gem_object; /** @@ -139,7 +139,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - int (*vmap)(struct drm_gem_object *obj, struct dma_buf_map *map); + int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @vunmap: @@ -149,7 +149,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - void (*vunmap)(struct drm_gem_object *obj, struct dma_buf_map *map); + void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @mmap: diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 0b1e2dd2ac3f..6e3319e9001a 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -3,7 +3,7 @@ #ifndef __DRM_GEM_ATOMIC_HELPER_H__ #define __DRM_GEM_ATOMIC_HELPER_H__ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> @@ -59,7 +59,7 @@ struct drm_shadow_plane_state { * The memory mappings stored in map should be established in the plane's * prepare_fb callback and removed in the cleanup_fb callback. */ - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; /** * @data: Address of each framebuffer BO's data @@ -67,7 +67,7 @@ struct drm_shadow_plane_state { * The address of the data stored in each mapping. This is different * for framebuffers with non-zero offset fields. */ - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; }; /** diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index adb507a9dbf0..fbda4ce5d5fb 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -38,7 +38,8 @@ void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj); void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, struct drm_printer *p, unsigned int indent); struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj); -int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map); +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, + struct iosys_map *map); int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma); extern const struct vm_operations_struct drm_gem_cma_vm_ops; @@ -106,7 +107,8 @@ static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_ob * Returns: * 0 on success or a negative error code on failure. */ -static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 905727719ead..1091e4fa08cb 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -2,7 +2,7 @@ #define __DRM_GEM_FB_HELPER_H__ #include <linux/dma-buf.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_fourcc.h> @@ -40,10 +40,10 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_gem_fb_vmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]); + struct iosys_map map[static DRM_FORMAT_MAX_PLANES], + struct iosys_map data[DRM_FORMAT_MAX_PLANES]); void drm_gem_fb_vunmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]); + struct iosys_map map[static DRM_FORMAT_MAX_PLANES]); int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 311d66c9cf4b..68347b63fc71 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -113,8 +113,10 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); @@ -226,7 +228,8 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_ * Returns: * 0 on success or a negative error code on failure. */ -static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); @@ -241,7 +244,8 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct d * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should * use it as their &drm_gem_object_funcs.vunmap handler. */ -static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index 78040f6cc6f3..4c003b4f173e 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -10,7 +10,7 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> -struct dma_buf_map; +struct iosys_map; #define drm_gem_ttm_of_gem(gem_obj) \ container_of(gem_obj, struct ttm_buffer_object, base) @@ -18,9 +18,9 @@ struct dma_buf_map; void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *gem); int drm_gem_ttm_vmap(struct drm_gem_object *gem, - struct dma_buf_map *map); + struct iosys_map *map); void drm_gem_ttm_vunmap(struct drm_gem_object *gem, - struct dma_buf_map *map); + struct iosys_map *map); int drm_gem_ttm_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma); diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index b4ce27a72773..c083a1d71cf4 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -12,7 +12,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <linux/container_of.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> struct drm_mode_create_dumb; struct drm_plane; @@ -51,7 +51,7 @@ struct vm_area_struct; */ struct drm_gem_vram_object { struct ttm_buffer_object bo; - struct dma_buf_map map; + struct iosys_map map; /** * @vmap_use_count: @@ -97,8 +97,9 @@ void drm_gem_vram_put(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); -int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map); +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, + struct iosys_map *map); int drm_gem_vram_fill_create_dumb(struct drm_file *file, struct drm_device *dev, diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 54f2c58305d2..2a1d01e5b56b 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -54,7 +54,7 @@ struct device; struct dma_buf_export_info; struct dma_buf; struct dma_buf_attachment; -struct dma_buf_map; +struct iosys_map; enum dma_data_direction; @@ -83,8 +83,8 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir); -int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map); -void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map); +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map); +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map); int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index baf3d1d3d566..533890dc9da1 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -666,6 +666,12 @@ INTEL_VGA_DEVICE(0x46C2, info), \ INTEL_VGA_DEVICE(0x46C3, info) +/* ADL-N */ +#define INTEL_ADLN_IDS(info) \ + INTEL_VGA_DEVICE(0x46D0, info), \ + INTEL_VGA_DEVICE(0x46D1, info), \ + INTEL_VGA_DEVICE(0x46D2, info) + /* RPL-S */ #define INTEL_RPLS_IDS(info) \ INTEL_VGA_DEVICE(0xA780, info), \ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c17b2df9178b..155b19ee12fb 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -47,7 +47,7 @@ struct ttm_global; struct ttm_device; -struct dma_buf_map; +struct iosys_map; struct drm_mm_node; @@ -481,17 +481,17 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); * ttm_bo_vmap * * @bo: The buffer object. - * @map: pointer to a struct dma_buf_map representing the map. + * @map: pointer to a struct iosys_map representing the map. * * Sets up a kernel virtual mapping, using ioremap or vmap to the * data in the buffer object. The parameter @map returns the virtual - * address as struct dma_buf_map. Unmap the buffer with ttm_bo_vunmap(). + * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap(). * * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ -int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); /** * ttm_bo_vunmap @@ -501,7 +501,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); * * Unmaps a kernel map set up by ttm_bo_vmap(). */ -void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); /** * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h index 8bb00fd39d6c..cc5c09a211b4 100644 --- a/include/drm/ttm/ttm_kmap_iter.h +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -8,7 +8,7 @@ #include <linux/types.h> struct ttm_kmap_iter; -struct dma_buf_map; +struct iosys_map; /** * struct ttm_kmap_iter_ops - Ops structure for a struct @@ -24,22 +24,22 @@ struct ttm_kmap_iter_ops { * kmap_local semantics. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. - * @dmap: The struct dma_buf_map holding the virtual address after + * @dmap: The struct iosys_map holding the virtual address after * the operation. * @i: The location within the resource to map. PAGE_SIZE granularity. */ void (*map_local)(struct ttm_kmap_iter *res_iter, - struct dma_buf_map *dmap, pgoff_t i); + struct iosys_map *dmap, pgoff_t i); /** * unmap_local() - Unmap a PAGE_SIZE part of the resource previously * mapped using kmap_local. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. - * @dmap: The struct dma_buf_map holding the virtual address after + * @dmap: The struct iosys_map holding the virtual address after * the operation. */ void (*unmap_local)(struct ttm_kmap_iter *res_iter, - struct dma_buf_map *dmap); + struct iosys_map *dmap); bool maps_tt; }; diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 69eea9d6399b..4fd727b52da1 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -27,7 +27,7 @@ #include <linux/types.h> #include <linux/mutex.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/dma-fence.h> #include <drm/drm_print.h> #include <drm/ttm/ttm_caching.h> @@ -41,7 +41,7 @@ struct ttm_resource; struct ttm_place; struct ttm_buffer_object; struct ttm_placement; -struct dma_buf_map; +struct iosys_map; struct io_mapping; struct sg_table; struct scatterlist; @@ -210,7 +210,7 @@ struct ttm_kmap_iter_iomap { */ struct ttm_kmap_iter_linear_io { struct ttm_kmap_iter base; - struct dma_buf_map dmap; + struct iosys_map dmap; bool needs_unmap; }; diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 7ab50076e7a6..2097760e8e95 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -13,7 +13,7 @@ #ifndef __DMA_BUF_H__ #define __DMA_BUF_H__ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/file.h> #include <linux/err.h> #include <linux/scatterlist.h> @@ -283,8 +283,8 @@ struct dma_buf_ops { */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); - int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); - void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); + int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map); + void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map); }; /** @@ -347,7 +347,7 @@ struct dma_buf { * @vmap_ptr: * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. */ - struct dma_buf_map vmap_ptr; + struct iosys_map vmap_ptr; /** * @exp_name: @@ -628,6 +628,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf, int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); -int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); -void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map); +int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map); +void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map); #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h new file mode 100644 index 000000000000..e69a002d5aa4 --- /dev/null +++ b/include/linux/iosys-map.h @@ -0,0 +1,459 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Pointer abstraction for IO/system memory + */ + +#ifndef __IOSYS_MAP_H__ +#define __IOSYS_MAP_H__ + +#include <linux/io.h> +#include <linux/string.h> + +/** + * DOC: overview + * + * When accessing a memory region, depending on its location, users may have to + * access it with I/O operations or memory load/store operations. For example, + * copying to system memory could be done with memcpy(), copying to I/O memory + * would be done with memcpy_toio(). + * + * .. code-block:: c + * + * void *vaddr = ...; // pointer to system memory + * memcpy(vaddr, src, len); + * + * void *vaddr_iomem = ...; // pointer to I/O memory + * memcpy_toio(vaddr, _iomem, src, len); + * + * The user of such pointer may not have information about the mapping of that + * region or may want to have a single code path to handle operations on that + * buffer, regardless if it's located in system or IO memory. The type + * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the + * buffer can be passed around to other drivers or have separate duties inside + * the same driver for allocation, read and write operations. + * + * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered + * bad style. Rather then accessing its fields directly, use one of the provided + * helper functions, or implement your own. For example, instances of + * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with + * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These + * helpers will set an address in system memory. + * + * .. code-block:: c + * + * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf); + * + * iosys_map_set_vaddr(&map, 0xdeadbeaf); + * + * To set an address in I/O memory, use iosys_map_set_vaddr_iomem(). + * + * .. code-block:: c + * + * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf); + * + * Instances of struct iosys_map do not have to be cleaned up, but + * can be cleared to NULL with iosys_map_clear(). Cleared mappings + * always refer to system memory. + * + * .. code-block:: c + * + * iosys_map_clear(&map); + * + * Test if a mapping is valid with either iosys_map_is_set() or + * iosys_map_is_null(). + * + * .. code-block:: c + * + * if (iosys_map_is_set(&map) != iosys_map_is_null(&map)) + * // always true + * + * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for + * equality with iosys_map_is_equal(). Mappings that point to different memory + * spaces, system or I/O, are never equal. That's even true if both spaces are + * located in the same address space, both mappings contain the same address + * value, or both mappings refer to NULL. + * + * .. code-block:: c + * + * struct iosys_map sys_map; // refers to system memory + * struct iosys_map io_map; // refers to I/O memory + * + * if (iosys_map_is_equal(&sys_map, &io_map)) + * // always false + * + * A set up instance of struct iosys_map can be used to access or manipulate the + * buffer memory. Depending on the location of the memory, the provided helpers + * will pick the correct operations. Data can be copied into the memory with + * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr(). + * + * .. code-block:: c + * + * const void *src = ...; // source buffer + * size_t len = ...; // length of src + * + * iosys_map_memcpy_to(&map, src, len); + * iosys_map_incr(&map, len); // go to first byte after the memcpy + */ + +/** + * struct iosys_map - Pointer to IO/system memory + * @vaddr_iomem: The buffer's address if in I/O memory + * @vaddr: The buffer's address if in system memory + * @is_iomem: True if the buffer is located in I/O memory, or false + * otherwise. + */ +struct iosys_map { + union { + void __iomem *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +/** + * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory + * @vaddr_: A system-memory address + */ +#define IOSYS_MAP_INIT_VADDR(vaddr_) \ + { \ + .vaddr = (vaddr_), \ + .is_iomem = false, \ + } + +/** + * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map + * @map_: The dma-buf mapping structure to copy from + * @offset_: Offset to add to the other mapping + * + * Initializes a new iosys_map struct based on another passed as argument. It + * does a shallow copy of the struct so it's possible to update the back storage + * without changing where the original map points to. It is the equivalent of + * doing: + * + * .. code-block:: c + * + * iosys_map map = other_map; + * iosys_map_incr(&map, &offset); + * + * Example usage: + * + * .. code-block:: c + * + * void foo(struct device *dev, struct iosys_map *base_map) + * { + * ... + * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET); + * ... + * } + * + * The advantage of using the initializer over just increasing the offset with + * iosys_map_incr() like above is that the new map will always point to the + * right place of the buffer during its scope. It reduces the risk of updating + * the wrong part of the buffer and having no compiler warning about that. If + * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn + * about the use of uninitialized variable. + */ +#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \ + struct iosys_map copy = *map_; \ + iosys_map_incr(©, offset_); \ + copy; \ +}) + +/** + * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory + * @map: The iosys_map structure + * @vaddr: A system-memory address + * + * Sets the address and clears the I/O-memory flag. + */ +static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr) +{ + map->vaddr = vaddr; + map->is_iomem = false; +} + +/** + * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory + * @map: The iosys_map structure + * @vaddr_iomem: An I/O-memory address + * + * Sets the address and the I/O-memory flag. + */ +static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map, + void __iomem *vaddr_iomem) +{ + map->vaddr_iomem = vaddr_iomem; + map->is_iomem = true; +} + +/** + * iosys_map_is_equal - Compares two iosys mapping structures for equality + * @lhs: The iosys_map structure + * @rhs: A iosys_map structure to compare with + * + * Two iosys mapping structures are equal if they both refer to the same type of memory + * and to the same address within that memory. + * + * Returns: + * True is both structures are equal, or false otherwise. + */ +static inline bool iosys_map_is_equal(const struct iosys_map *lhs, + const struct iosys_map *rhs) +{ + if (lhs->is_iomem != rhs->is_iomem) + return false; + else if (lhs->is_iomem) + return lhs->vaddr_iomem == rhs->vaddr_iomem; + else + return lhs->vaddr == rhs->vaddr; +} + +/** + * iosys_map_is_null - Tests for a iosys mapping to be NULL + * @map: The iosys_map structure + * + * Depending on the state of struct iosys_map.is_iomem, tests if the + * mapping is NULL. + * + * Returns: + * True if the mapping is NULL, or false otherwise. + */ +static inline bool iosys_map_is_null(const struct iosys_map *map) +{ + if (map->is_iomem) + return !map->vaddr_iomem; + return !map->vaddr; +} + +/** + * iosys_map_is_set - Tests if the iosys mapping has been set + * @map: The iosys_map structure + * + * Depending on the state of struct iosys_map.is_iomem, tests if the + * mapping has been set. + * + * Returns: + * True if the mapping is been set, or false otherwise. + */ +static inline bool iosys_map_is_set(const struct iosys_map *map) +{ + return !iosys_map_is_null(map); +} + +/** + * iosys_map_clear - Clears a iosys mapping structure + * @map: The iosys_map structure + * + * Clears all fields to zero, including struct iosys_map.is_iomem, so + * mapping structures that were set to point to I/O memory are reset for + * system memory. Pointers are cleared to NULL. This is the default. + */ +static inline void iosys_map_clear(struct iosys_map *map) +{ + if (map->is_iomem) { + map->vaddr_iomem = NULL; + map->is_iomem = false; + } else { + map->vaddr = NULL; + } +} + +/** + * iosys_map_memcpy_to - Memcpy into offset of iosys_map + * @dst: The iosys_map structure + * @dst_offset: The offset from which to copy + * @src: The source buffer + * @len: The number of byte in src + * + * Copies data into a iosys_map with an offset. The source buffer is in + * system memory. Depending on the buffer's location, the helper picks the + * correct method of accessing the memory. + */ +static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset, + const void *src, size_t len) +{ + if (dst->is_iomem) + memcpy_toio(dst->vaddr_iomem + dst_offset, src, len); + else + memcpy(dst->vaddr + dst_offset, src, len); +} + +/** + * iosys_map_memcpy_from - Memcpy from iosys_map into system memory + * @dst: Destination in system memory + * @src: The iosys_map structure + * @src_offset: The offset from which to copy + * @len: The number of byte in src + * + * Copies data from a iosys_map with an offset. The dest buffer is in + * system memory. Depending on the mapping location, the helper picks the + * correct method of accessing the memory. + */ +static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src, + size_t src_offset, size_t len) +{ + if (src->is_iomem) + memcpy_fromio(dst, src->vaddr_iomem + src_offset, len); + else + memcpy(dst, src->vaddr + src_offset, len); +} + +/** + * iosys_map_incr - Increments the address stored in a iosys mapping + * @map: The iosys_map structure + * @incr: The number of bytes to increment + * + * Increments the address stored in a iosys mapping. Depending on the + * buffer's location, the correct value will be updated. + */ +static inline void iosys_map_incr(struct iosys_map *map, size_t incr) +{ + if (map->is_iomem) + map->vaddr_iomem += incr; + else + map->vaddr += incr; +} + +/** + * iosys_map_memset - Memset iosys_map + * @dst: The iosys_map structure + * @offset: Offset from dst where to start setting value + * @value: The value to set + * @len: The number of bytes to set in dst + * + * Set value in iosys_map. Depending on the buffer's location, the helper + * picks the correct method of accessing the memory. + */ +static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, + int value, size_t len) +{ + if (dst->is_iomem) + memset_io(dst->vaddr_iomem + offset, value, len); + else + memset(dst->vaddr + offset, value, len); +} + +/** + * iosys_map_rd - Read a C-type value from the iosys_map + * + * @map__: The iosys_map structure + * @offset__: The offset from which to read + * @type__: Type of the value being read + * + * Read a C type value from iosys_map, handling possible un-aligned accesses to + * the mapping. + * + * Returns: + * The value read from the mapping. + */ +#define iosys_map_rd(map__, offset__, type__) ({ \ + type__ val; \ + iosys_map_memcpy_from(&val, map__, offset__, sizeof(val)); \ + val; \ +}) + +/** + * iosys_map_wr - Write a C-type value to the iosys_map + * + * @map__: The iosys_map structure + * @offset__: The offset from the mapping to write to + * @type__: Type of the value being written + * @val__: Value to write + * + * Write a C-type value to the iosys_map, handling possible un-aligned accesses + * to the mapping. + */ +#define iosys_map_wr(map__, offset__, type__, val__) ({ \ + type__ val = (val__); \ + iosys_map_memcpy_to(map__, offset__, &val, sizeof(val)); \ +}) + +/** + * iosys_map_rd_field - Read a member from a struct in the iosys_map + * + * @map__: The iosys_map structure + * @struct_offset__: Offset from the beggining of the map, where the struct + * is located + * @struct_type__: The struct describing the layout of the mapping + * @field__: Member of the struct to read + * + * Read a value from iosys_map considering its layout is described by a C struct + * starting at @struct_offset__. The field offset and size is calculated and its + * value read handling possible un-aligned memory accesses. For example: suppose + * there is a @struct foo defined as below and the value ``foo.field2.inner2`` + * needs to be read from the iosys_map: + * + * .. code-block:: c + * + * struct foo { + * int field1; + * struct { + * int inner1; + * int inner2; + * } field2; + * int field3; + * } __packed; + * + * This is the expected memory layout of a buffer using iosys_map_rd_field(): + * + * +------------------------------+--------------------------+ + * | Address | Content | + * +==============================+==========================+ + * | buffer + 0000 | start of mmapped buffer | + * | | pointed by iosys_map | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + ``struct_offset__`` | start of ``struct foo`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + wwww | ``foo.field2.inner2`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + yyyy | end of ``struct foo`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + zzzz | end of mmaped buffer | + * +------------------------------+--------------------------+ + * + * Values automatically calculated by this macro or not needed are denoted by + * wwww, yyyy and zzzz. This is the code to read that value: + * + * .. code-block:: c + * + * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2); + * + * Returns: + * The value read from the mapping. + */ +#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \ + struct_type__ *s; \ + iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \ + typeof(s->field__)); \ +}) + +/** + * iosys_map_wr_field - Write to a member of a struct in the iosys_map + * + * @map__: The iosys_map structure + * @struct_offset__: Offset from the beggining of the map, where the struct + * is located + * @struct_type__: The struct describing the layout of the mapping + * @field__: Member of the struct to read + * @val__: Value to write + * + * Write a value to the iosys_map considering its layout is described by a C struct + * starting at @struct_offset__. The field offset and size is calculated and the + * @val__ is written handling possible un-aligned memory accesses. Refer to + * iosys_map_rd_field() for expected usage and memory layout. + */ +#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ + struct_type__ *s; \ + iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \ + typeof(s->field__), val__); \ +}) + +#endif /* __IOSYS_MAP_H__ */ |