summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_display.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c854
1 files changed, 797 insertions, 57 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 3cadb0b76f22..dc50c05f23fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
static void amdgpu_display_flip_callback(struct dma_fence *f,
@@ -93,13 +94,13 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
* targeted by the flip
*/
if (amdgpu_crtc->enabled &&
- (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(crtc)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -132,10 +133,7 @@ static void amdgpu_display_unpin_work_func(struct work_struct *__work)
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
- r = amdgpu_bo_unpin(work->old_abo);
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to unpin buffer after flip\n");
- }
+ amdgpu_bo_unpin(work->old_abo);
amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
@@ -152,7 +150,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
@@ -205,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
- &work->shared_count,
- &work->shared);
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
+ &work->shared_count, &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
@@ -219,7 +216,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
if (!adev->enable_virtual_display)
work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+ amdgpu_get_vblank_counter_kms(crtc);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -249,8 +246,7 @@ pflip_cleanup:
}
unpin:
if (!adev->enable_virtual_display)
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
- DRM_ERROR("failed to unpin new abo in error path\n");
+ amdgpu_bo_unpin(new_abo);
unreserve:
amdgpu_bo_unreserve(new_abo);
@@ -282,7 +278,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
- return ret;
+ goto out;
ret = drm_crtc_helper_set_config(set, ctx);
@@ -292,7 +288,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
pm_runtime_mark_last_busy(dev->dev);
- adev = dev->dev_private;
+ adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
take the current one */
if (active && !adev->have_disp_power_ref) {
@@ -306,6 +302,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
adev->have_disp_power_ref = false;
}
+out:
/* drop the power reference we got coming in here */
pm_runtime_put_autosuspend(dev->dev);
return ret;
@@ -443,10 +440,6 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
drm_connector_list_iter_end(&iter);
}
-/**
- * amdgpu_display_ddc_probe
- *
- */
bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
bool use_aux)
{
@@ -511,33 +504,674 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* to avoid hang caused by placement of scanout BO in GTT on certain
* APUs. So force the BO placement to VRAM in case this architecture
* will not allow USWC mappings.
- * Also, don't allow GTT domain if the BO doens't have USWC falg set.
+ * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
*/
- if (adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type < CHIP_RAVEN &&
- (adev->flags & AMD_IS_APU) &&
- (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+ if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
- amdgpu_device_asic_has_dc_support(adev->asic_type))
- domain |= AMDGPU_GEM_DOMAIN_GTT;
+ amdgpu_device_asic_has_dc_support(adev->asic_type)) {
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ case CHIP_RAVEN:
+ /* enable S/G on PCO and RV2 */
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO))
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+ case CHIP_RENOIR:
+ case CHIP_VANGOGH:
+ case CHIP_YELLOW_CARP:
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ break;
+
+ default:
+ break;
+ }
+ }
#endif
return domain;
}
-int amdgpu_display_framebuffer_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+static const struct drm_format_info dcc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
+ .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+};
+
+static const struct drm_format_info dcc_retile_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
+ .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+};
+
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+ int num_formats, u32 format)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+const struct drm_format_info *
+amdgpu_lookup_format_info(u32 format, uint64_t modifier)
+{
+ if (!IS_AMD_FMT_MOD(modifier))
+ return NULL;
+
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
+ return lookup_format_info(dcc_retile_formats,
+ ARRAY_SIZE(dcc_retile_formats),
+ format);
+
+ if (AMD_FMT_MOD_GET(DCC, modifier))
+ return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
+ format);
+
+ /* returning NULL will cause the default format structs to be used. */
+ return NULL;
+}
+
+
+/*
+ * Tries to extract the renderable DCC offset from the opaque metadata attached
+ * to the buffer.
+ */
+static int
+extract_render_dcc_offset(struct amdgpu_device *adev,
+ struct drm_gem_object *obj,
+ uint64_t *offset)
{
+ struct amdgpu_bo *rbo;
+ int r = 0;
+ uint32_t metadata[10]; /* Something that fits a descriptor + header. */
+ uint32_t size;
+
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ /* Don't show error message when returning -ERESTARTSYS */
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
+ amdgpu_bo_unreserve(rbo);
+
+ if (r)
+ return r;
+
+ /*
+ * The first word is the metadata version, and we need space for at least
+ * the version + pci vendor+device id + 8 words for a descriptor.
+ */
+ if (size < 40 || metadata[0] != 1)
+ return -EINVAL;
+
+ if (adev->family >= AMDGPU_FAMILY_NV) {
+ /* resource word 6/7 META_DATA_ADDRESS{_LO} */
+ *offset = ((u64)metadata[9] << 16u) |
+ ((metadata[8] & 0xFF000000u) >> 16);
+ } else {
+ /* resource word 5/7 META_DATA_ADDRESS */
+ *offset = ((u64)metadata[9] << 8u) |
+ ((u64)(metadata[7] & 0x1FE0000u) << 23);
+ }
+
+ return 0;
+}
+
+static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
+{
+ struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
+ uint64_t modifier = 0;
+
+ if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
+ modifier = DRM_FORMAT_MOD_LINEAR;
+ } else {
+ int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
+ bool has_xor = swizzle >= 16;
+ int block_size_bits;
+ int version;
+ int pipe_xor_bits = 0;
+ int bank_xor_bits = 0;
+ int packers = 0;
+ int rb = 0;
+ int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
+ uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
+
+ switch (swizzle >> 2) {
+ case 0: /* 256B */
+ block_size_bits = 8;
+ break;
+ case 1: /* 4KiB */
+ case 5: /* 4KiB _X */
+ block_size_bits = 12;
+ break;
+ case 2: /* 64KiB */
+ case 4: /* 64 KiB _T */
+ case 6: /* 64 KiB _X */
+ block_size_bits = 16;
+ break;
+ default:
+ /* RESERVED or VAR */
+ return -EINVAL;
+ }
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
+ else if (adev->family == AMDGPU_FAMILY_NV)
+ version = AMD_FMT_MOD_TILE_VER_GFX10;
+ else
+ version = AMD_FMT_MOD_TILE_VER_GFX9;
+
+ switch (swizzle & 3) {
+ case 0: /* Z microtiling */
+ return -EINVAL;
+ case 1: /* S microtiling */
+ if (!has_xor)
+ version = AMD_FMT_MOD_TILE_VER_GFX9;
+ break;
+ case 2:
+ if (!has_xor && afb->base.format->cpp[0] != 4)
+ version = AMD_FMT_MOD_TILE_VER_GFX9;
+ break;
+ case 3:
+ break;
+ }
+
+ if (has_xor) {
+ switch (version) {
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
+ pipe_xor_bits = min(block_size_bits - 8, pipes);
+ packers = min(block_size_bits - 8 - pipe_xor_bits,
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
+ break;
+ case AMD_FMT_MOD_TILE_VER_GFX10:
+ pipe_xor_bits = min(block_size_bits - 8, pipes);
+ break;
+ case AMD_FMT_MOD_TILE_VER_GFX9:
+ rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
+ pipe_xor_bits = min(block_size_bits - 8, pipes +
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
+ bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
+ break;
+ }
+ }
+
+ modifier = AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
+ AMD_FMT_MOD_SET(TILE_VERSION, version) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, packers);
+
+ if (dcc_offset != 0) {
+ bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
+ bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
+ const struct drm_format_info *format_info;
+ u64 render_dcc_offset;
+
+ /* Enable constant encode on RAVEN2 and later. */
+ bool dcc_constant_encode = adev->asic_type > CHIP_RAVEN ||
+ (adev->asic_type == CHIP_RAVEN &&
+ adev->external_rev_id >= 0x81);
+
+ int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
+ dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
+ AMD_FMT_MOD_DCC_BLOCK_256B;
+
+ modifier |= AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
+
+ afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
+ afb->base.pitches[1] =
+ AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
+
+ /*
+ * If the userspace driver uses retiling the tiling flags do not contain
+ * info on the renderable DCC buffer. Luckily the opaque metadata contains
+ * the info so we can try to extract it. The kernel does not use this info
+ * but we should convert it to a modifier plane for getfb2, so the
+ * userspace driver that gets it doesn't have to juggle around another DCC
+ * plane internally.
+ */
+ if (extract_render_dcc_offset(adev, afb->base.obj[0],
+ &render_dcc_offset) == 0 &&
+ render_dcc_offset != 0 &&
+ render_dcc_offset != afb->base.offsets[1] &&
+ render_dcc_offset < UINT_MAX) {
+ uint32_t dcc_block_bits; /* of base surface data */
+
+ modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
+ afb->base.offsets[2] = render_dcc_offset;
+
+ if (adev->family >= AMDGPU_FAMILY_NV) {
+ int extra_pipe = 0;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
+ pipes == packers && pipes > 1)
+ extra_pipe = 1;
+
+ dcc_block_bits = max(20, 16 + pipes + extra_pipe);
+ } else {
+ modifier |= AMD_FMT_MOD_SET(RB, rb) |
+ AMD_FMT_MOD_SET(PIPE, pipes);
+ dcc_block_bits = max(20, 18 + rb);
+ }
+
+ dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
+ afb->base.pitches[2] = ALIGN(afb->base.width,
+ 1u << ((dcc_block_bits + 1) / 2));
+ }
+ format_info = amdgpu_lookup_format_info(afb->base.format->format,
+ modifier);
+ if (!format_info)
+ return -EINVAL;
+
+ afb->base.format = format_info;
+ }
+ }
+
+ afb->base.modifier = modifier;
+ afb->base.flags |= DRM_MODE_FB_MODIFIERS;
+ return 0;
+}
+
+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+{
+ u64 micro_tile_mode;
+
+ /* Zero swizzle mode means linear */
+ if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
+ return 0;
+
+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+ switch (micro_tile_mode) {
+ case 0: /* DISPLAY */
+ case 3: /* RENDER */
+ return 0;
+ default:
+ drm_dbg_kms(afb->base.dev,
+ "Micro tile mode %llu not supported for scanout\n",
+ micro_tile_mode);
+ return -EINVAL;
+ }
+}
+
+static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
+ unsigned int *width, unsigned int *height)
+{
+ unsigned int cpp_log2 = ilog2(cpp);
+ unsigned int pixel_log2 = block_log2 - cpp_log2;
+ unsigned int width_log2 = (pixel_log2 + 1) / 2;
+ unsigned int height_log2 = pixel_log2 - width_log2;
+
+ *width = 1 << width_log2;
+ *height = 1 << height_log2;
+}
+
+static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
+ bool pipe_aligned)
+{
+ unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+
+ switch (ver) {
+ case AMD_FMT_MOD_TILE_VER_GFX9: {
+ /*
+ * TODO: for pipe aligned we may need to check the alignment of the
+ * total size of the surface, which may need to be bigger than the
+ * natural alignment due to some HW workarounds
+ */
+ return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
+ }
+ case AMD_FMT_MOD_TILE_VER_GFX10:
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
+ int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
+
+ if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
+ AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
+ ++pipes_log2;
+
+ return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
+ }
+ default:
+ return 0;
+ }
+}
+
+static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
+ const struct drm_format_info *format,
+ unsigned int block_width, unsigned int block_height,
+ unsigned int block_size_log2)
+{
+ unsigned int width = rfb->base.width /
+ ((plane && plane < format->num_planes) ? format->hsub : 1);
+ unsigned int height = rfb->base.height /
+ ((plane && plane < format->num_planes) ? format->vsub : 1);
+ unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
+ unsigned int block_pitch = block_width * cpp;
+ unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
+ unsigned int block_size = 1 << block_size_log2;
+ uint64_t size;
+
+ if (rfb->base.pitches[plane] % block_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is not a multiple of block pitch %d\n",
+ rfb->base.pitches[plane], plane, block_pitch);
+ return -EINVAL;
+ }
+ if (rfb->base.pitches[plane] < min_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is less than minimum pitch %d\n",
+ rfb->base.pitches[plane], plane, min_pitch);
+ return -EINVAL;
+ }
+
+ /* Force at least natural alignment. */
+ if (rfb->base.offsets[plane] % block_size) {
+ drm_dbg_kms(rfb->base.dev,
+ "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
+ rfb->base.offsets[plane], plane, block_size);
+ return -EINVAL;
+ }
+
+ size = rfb->base.offsets[plane] +
+ (uint64_t)rfb->base.pitches[plane] / block_pitch *
+ block_size * DIV_ROUND_UP(height, block_height);
+
+ if (rfb->base.obj[0]->size < size) {
+ drm_dbg_kms(rfb->base.dev,
+ "BO size 0x%zx is less than 0x%llx required for plane %d\n",
+ rfb->base.obj[0]->size, size, plane);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
+{
+ const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
+ uint64_t modifier = rfb->base.modifier;
int ret;
+ unsigned int i, block_width, block_height, block_size_log2;
+
+ if (!rfb->base.dev->mode_config.allow_fb_modifiers)
+ return 0;
+
+ for (i = 0; i < format_info->num_planes; ++i) {
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ block_width = 256 / format_info->cpp[i];
+ block_height = 1;
+ block_size_log2 = 8;
+ } else {
+ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+
+ switch ((swizzle & ~3) + 1) {
+ case DC_SW_256B_S:
+ block_size_log2 = 8;
+ break;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ block_size_log2 = 12;
+ break;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_T:
+ case DC_SW_64KB_S_X:
+ block_size_log2 = 16;
+ break;
+ default:
+ drm_dbg_kms(rfb->base.dev,
+ "Swizzle mode with unknown block size: %d\n", swizzle);
+ return -EINVAL;
+ }
+
+ get_block_dimensions(block_size_log2, format_info->cpp[i],
+ &block_width, &block_height);
+ }
+
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ if (AMD_FMT_MOD_GET(DCC, modifier)) {
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
+ block_size_log2 = get_dcc_block_size(modifier, false, false);
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height,
+ block_size_log2);
+ if (ret)
+ return ret;
+
+ ++i;
+ block_size_log2 = get_dcc_block_size(modifier, true, true);
+ } else {
+ bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
+
+ block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
+ }
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+ uint64_t *tiling_flags, bool *tmz_surface)
+{
+ struct amdgpu_bo *rbo;
+ int r;
+
+ if (!amdgpu_fb) {
+ *tiling_flags = 0;
+ *tmz_surface = false;
+ return 0;
+ }
+
+ rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
+ r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ /* Don't show error message when returning -ERESTARTSYS */
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ if (tiling_flags)
+ amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+
+ if (tmz_surface)
+ *tmz_surface = amdgpu_bo_encrypted(rbo);
+
+ amdgpu_bo_unreserve(rbo);
+
+ return r;
+}
+
+int amdgpu_display_gem_fb_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ if (ret)
+ goto err;
+
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret) {
- rfb->base.obj[0] = NULL;
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
+ rfb->base.obj[0] = NULL;
+ return ret;
+}
+
+int amdgpu_display_gem_fb_verify_and_init(
+ struct drm_device *dev, struct amdgpu_framebuffer *rfb,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ rfb->base.obj[0] = obj;
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ /* Verify that the modifier is supported. */
+ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ drm_dbg_kms(dev,
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ if (ret)
+ goto err;
+
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
+ rfb->base.obj[0] = NULL;
+ return ret;
+}
+
+int amdgpu_display_framebuffer_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret, i;
+
+ /*
+ * This needs to happen before modifier conversion as that might change
+ * the number of planes.
+ */
+ for (i = 1; i < rfb->base.format->num_planes; ++i) {
+ if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+ drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
+ i, mode_cmd->handles[0], mode_cmd->handles[i]);
+ ret = -EINVAL;
+ return ret;
+ }
+ }
+
+ ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
+ if (ret)
+ return ret;
+
+ if (!dev->mode_config.allow_fb_modifiers) {
+ drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
+ "GFX9+ requires FB check based on format modifier\n");
+ ret = check_tiling_flags_gfx6(rfb);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->mode_config.allow_fb_modifiers &&
+ !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
+ ret = convert_tiling_flags_to_modifier(rfb);
+ if (ret) {
+ drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
+ rfb->tiling_flags);
+ return ret;
+ }
+ }
+
+ ret = amdgpu_display_verify_sizes(rfb);
+ if (ret)
return ret;
+
+ for (i = 0; i < rfb->base.format->num_planes; ++i) {
+ drm_gem_object_get(rfb->base.obj[0]);
+ rfb->base.obj[i] = rfb->base.obj[0];
}
+
return 0;
}
@@ -546,36 +1180,43 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
- dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
- "can't create framebuffer\n", mode_cmd->handles[0]);
+ drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
+ "can't create framebuffer\n", mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
- DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ bo = gem_to_amdgpu_bo(obj);
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
}
- ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
+ ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
+ mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
+ drm_gem_object_put(obj);
return &amdgpu_fb->base;
}
@@ -607,51 +1248,51 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
int sz;
adev->mode_info.coherent_mode_property =
- drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
if (!adev->mode_info.coherent_mode_property)
return -ENOMEM;
adev->mode_info.load_detect_property =
- drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
if (!adev->mode_info.load_detect_property)
return -ENOMEM;
- drm_mode_create_scaling_mode_property(adev->ddev);
+ drm_mode_create_scaling_mode_property(adev_to_drm(adev));
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
adev->mode_info.underscan_property =
- drm_property_create_enum(adev->ddev, 0,
- "underscan",
- amdgpu_underscan_enum_list, sz);
+ drm_property_create_enum(adev_to_drm(adev), 0,
+ "underscan",
+ amdgpu_underscan_enum_list, sz);
adev->mode_info.underscan_hborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan hborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan hborder", 0, 128);
if (!adev->mode_info.underscan_hborder_property)
return -ENOMEM;
adev->mode_info.underscan_vborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan vborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan vborder", 0, 128);
if (!adev->mode_info.underscan_vborder_property)
return -ENOMEM;
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
adev->mode_info.audio_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"audio",
amdgpu_audio_enum_list, sz);
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
adev->mode_info.dither_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"dither",
amdgpu_dither_enum_list, sz);
if (amdgpu_device_has_dc_support(adev)) {
adev->mode_info.abm_level_property =
- drm_property_create_range(adev->ddev, 0,
- "abm level", 0, 4);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "abm level", 0, 4);
if (!adev->mode_info.abm_level_property)
return -ENOMEM;
}
@@ -690,7 +1331,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_encoder *amdgpu_encoder;
struct drm_connector *connector;
- struct amdgpu_connector *amdgpu_connector;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
@@ -702,7 +1342,6 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
continue;
amdgpu_encoder = to_amdgpu_encoder(encoder);
connector = amdgpu_get_connector_for_encoder(encoder);
- amdgpu_connector = to_amdgpu_connector(connector);
/* set scaling */
if (amdgpu_encoder->rmx_type == RMX_OFF)
@@ -803,7 +1442,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -916,3 +1555,104 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
return AMDGPU_CRTC_IRQ_NONE;
}
}
+
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int r;
+
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
+ drm_modeset_unlock_all(dev);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+
+ if (fb == NULL || fb->obj[0] == NULL) {
+ continue;
+ }
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ /* don't unpin kernel fb objects */
+ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
+ }
+ }
+ return 0;
+}
+
+int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ int r;
+
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+ }
+
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+