diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
173 files changed, 4946 insertions, 2562 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 42f882c633ee..bcef6ea4bcf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -81,6 +81,8 @@ #include "amdgpu_job.h" #include "amdgpu_bo_list.h" #include "amdgpu_gem.h" +#include "amdgpu_doorbell.h" +#include "amdgpu_amdkfd.h" #define MAX_GPU_INSTANCE 16 @@ -162,6 +164,7 @@ extern int amdgpu_si_support; extern int amdgpu_cik_support; #endif +#define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 @@ -234,7 +237,7 @@ enum amdgpu_kiq_irq { #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ -#define MAX_KIQ_REG_TRY 20 +#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, @@ -361,123 +364,6 @@ int amdgpu_fence_slab_init(void); void amdgpu_fence_slab_fini(void); /* - * GPU doorbell structures, functions & helpers - */ -typedef enum _AMDGPU_DOORBELL_ASSIGNMENT -{ - AMDGPU_DOORBELL_KIQ = 0x000, - AMDGPU_DOORBELL_HIQ = 0x001, - AMDGPU_DOORBELL_DIQ = 0x002, - AMDGPU_DOORBELL_MEC_RING0 = 0x010, - AMDGPU_DOORBELL_MEC_RING1 = 0x011, - AMDGPU_DOORBELL_MEC_RING2 = 0x012, - AMDGPU_DOORBELL_MEC_RING3 = 0x013, - AMDGPU_DOORBELL_MEC_RING4 = 0x014, - AMDGPU_DOORBELL_MEC_RING5 = 0x015, - AMDGPU_DOORBELL_MEC_RING6 = 0x016, - AMDGPU_DOORBELL_MEC_RING7 = 0x017, - AMDGPU_DOORBELL_GFX_RING0 = 0x020, - AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, - AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, - AMDGPU_DOORBELL_IH = 0x1E8, - AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, - AMDGPU_DOORBELL_INVALID = 0xFFFF -} AMDGPU_DOORBELL_ASSIGNMENT; - -struct amdgpu_doorbell { - /* doorbell mmio */ - resource_size_t base; - resource_size_t size; - u32 __iomem *ptr; - u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ -}; - -/* - * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space - */ -typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT -{ - /* - * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in - * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. - * Compute related doorbells are allocated from 0x00 to 0x8a - */ - - - /* kernel scheduling */ - AMDGPU_DOORBELL64_KIQ = 0x00, - - /* HSA interface queue and debug queue */ - AMDGPU_DOORBELL64_HIQ = 0x01, - AMDGPU_DOORBELL64_DIQ = 0x02, - - /* Compute engines */ - AMDGPU_DOORBELL64_MEC_RING0 = 0x03, - AMDGPU_DOORBELL64_MEC_RING1 = 0x04, - AMDGPU_DOORBELL64_MEC_RING2 = 0x05, - AMDGPU_DOORBELL64_MEC_RING3 = 0x06, - AMDGPU_DOORBELL64_MEC_RING4 = 0x07, - AMDGPU_DOORBELL64_MEC_RING5 = 0x08, - AMDGPU_DOORBELL64_MEC_RING6 = 0x09, - AMDGPU_DOORBELL64_MEC_RING7 = 0x0a, - - /* User queue doorbell range (128 doorbells) */ - AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b, - AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a, - - /* Graphics engine */ - AMDGPU_DOORBELL64_GFX_RING0 = 0x8b, - - /* - * Other graphics doorbells can be allocated here: from 0x8c to 0xdf - * Graphics voltage island aperture 1 - * default non-graphics QWORD index is 0xe0 - 0xFF inclusive - */ - - /* sDMA engines reserved from 0xe0 -0xef */ - AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0, - AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1, - AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8, - AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9, - - /* For vega10 sriov, the sdma doorbell must be fixed as follow - * to keep the same setting with host driver, or it will - * happen conflicts - */ - AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0, - AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, - AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2, - AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, - - /* Interrupt handler */ - AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */ - AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */ - AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */ - - /* VCN engine use 32 bits doorbell */ - AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ - AMDGPU_DOORBELL64_VCN2_3 = 0xF9, - AMDGPU_DOORBELL64_VCN4_5 = 0xFA, - AMDGPU_DOORBELL64_VCN6_7 = 0xFB, - - /* overlap the doorbell assignment with VCN as they are mutually exclusive - * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD - */ - AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, - AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, - AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, - AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, - - AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, - AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, - AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, - AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, - - AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, - AMDGPU_DOORBELL64_INVALID = 0xFFFF -} AMDGPU_DOORBELL64_ASSIGNMENT; - -/* * IRQS. */ @@ -654,6 +540,8 @@ struct amdgpu_asic_funcs { struct amdgpu_ring *ring); /* check if the asic needs a full reset of if soft reset will work */ bool (*need_full_reset)(struct amdgpu_device *adev); + /* initialize doorbell layout for specific asic*/ + void (*init_doorbell_index)(struct amdgpu_device *adev); }; /* @@ -976,6 +864,9 @@ struct amdgpu_device { /* GDS */ struct amdgpu_gds gds; + /* KFD */ + struct amdgpu_kfd_dev kfd; + /* display related functionality */ struct amdgpu_display_manager dm; @@ -989,9 +880,6 @@ struct amdgpu_device { atomic64_t visible_pin_size; atomic64_t gart_pin_size; - /* amdkfd interface */ - struct kfd_dev *kfd; - /* soc15 register offset based on ip, instance and segment */ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; @@ -1023,6 +911,10 @@ struct amdgpu_device { unsigned long last_mm_index; bool in_gpu_reset; struct mutex lock_reset; + struct amdgpu_doorbell_index doorbell_index; + + int asic_reset_res; + struct work_struct xgmi_reset_work; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1047,11 +939,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); -u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); -void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); -u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); -void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); - bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); @@ -1113,11 +1000,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) -#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) -#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) -#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) -#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v)) - #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK @@ -1159,6 +1041,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) +#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) /* Common functions */ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); @@ -1219,12 +1102,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); - -/* - * functions used by amdgpu_xgmi.c - */ -int amdgpu_xgmi_add_device(struct amdgpu_device *adev); - /* * functions used by amdgpu_encoder.c */ @@ -1252,6 +1129,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); + +void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, + struct amdgpu_dm_backlight_caps *caps); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 7f0afc526419..4376b17ca594 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg { }; struct amdgpu_atif_notifications { - bool display_switch; - bool expansion_mode_change; bool thermal_state; bool forced_power_state; bool system_power_state; - bool display_conf_change; - bool px_gfx_switch; bool brightness_change; bool dgpu_display_event; + bool gpu_package_power_limit; }; struct amdgpu_atif_functions { bool system_params; bool sbios_requests; - bool select_active_disp; - bool lid_state; - bool get_tv_standard; - bool set_tv_standard; - bool get_panel_expansion_mode; - bool set_panel_expansion_mode; bool temperature_change; - bool graphics_device_types; + bool query_backlight_transfer_characteristics; + bool ready_to_undock; + bool external_gpu_information; }; struct amdgpu_atif { @@ -72,6 +65,7 @@ struct amdgpu_atif { struct amdgpu_atif_functions functions; struct amdgpu_atif_notification_cfg notification_cfg; struct amdgpu_encoder *encoder_for_bl; + struct amdgpu_dm_backlight_caps backlight_caps; }; /* Call the ATIF method @@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, */ static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask) { - n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; - n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; - n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; - n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; + n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED; } /** @@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas { f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; - f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED; - f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED; - f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED; - f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED; - f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED; - f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED; f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; - f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED; + f->query_backlight_transfer_characteristics = + mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED; + f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED; + f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED; } /** @@ -311,6 +299,65 @@ out: } /** + * amdgpu_atif_query_backlight_caps - get min and max backlight input signal + * + * @handle: acpi handle + * + * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function + * to determine the acceptable range of backlight values + * + * Backlight_caps.caps_valid will be set to true if the query is successful + * + * The input signals are in range 0-255 + * + * This function assumes the display with backlight is the first LCD + * + * Returns 0 on success, error on failure. + */ +static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif) +{ + union acpi_object *info; + struct atif_qbtc_output characteristics; + struct atif_qbtc_arguments arguments; + struct acpi_buffer params; + size_t size; + int err = 0; + + arguments.size = sizeof(arguments); + arguments.requested_display = ATIF_QBTC_REQUEST_LCD1; + + params.length = sizeof(arguments); + params.pointer = (void *)&arguments; + + info = amdgpu_atif_call(atif, + ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS, + ¶ms); + if (!info) { + err = -EIO; + goto out; + } + + size = *(u16 *) info->buffer.pointer; + if (size < 10) { + err = -EINVAL; + goto out; + } + + memset(&characteristics, 0, sizeof(characteristics)); + size = min(sizeof(characteristics), size); + memcpy(&characteristics, info->buffer.pointer, size); + + atif->backlight_caps.caps_valid = true; + atif->backlight_caps.min_input_signal = + characteristics.min_input_signal; + atif->backlight_caps.max_input_signal = + characteristics.max_input_signal; +out: + kfree(info); + return err; +} + +/** * amdgpu_atif_get_sbios_requests - get requested sbios event * * @handle: acpi handle @@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) } } + if (atif->functions.query_backlight_transfer_characteristics) { + ret = amdgpu_atif_query_backlight_caps(atif); + if (ret) { + DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n", + ret); + atif->backlight_caps.caps_valid = false; + } + } else { + atif->backlight_caps.caps_valid = false; + } + out: adev->acpi_nb.notifier_call = amdgpu_acpi_event; register_acpi_notifier(&adev->acpi_nb); @@ -806,6 +864,18 @@ out: return ret; } +void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, + struct amdgpu_dm_backlight_caps *caps) +{ + if (!adev->atif) { + caps->caps_valid = false; + return; + } + caps->caps_valid = adev->atif->backlight_caps.caps_valid; + caps->min_input_signal = adev->atif->backlight_caps.min_input_signal; + caps->max_input_signal = adev->atif->backlight_caps.max_input_signal; +} + /** * amdgpu_acpi_fini - tear down driver acpi support * @@ -816,6 +886,5 @@ out: void amdgpu_acpi_fini(struct amdgpu_device *adev) { unregister_acpi_notifier(&adev->acpi_nb); - if (adev->atif) - kfree(adev->atif); + kfree(adev->atif); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bcf1666fb31d..2dfaf158ef07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -26,15 +26,26 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include <linux/module.h> +#include <linux/dma-buf.h> const struct kgd2kfd_calls *kgd2kfd; static const unsigned int compute_vmid_bitmap = 0xFF00; +/* Total memory size in system memory and all GPU VRAM. Used to + * estimate worst case amount of memory to reserve for page tables + */ +uint64_t amdgpu_amdkfd_total_mem_size; + int amdgpu_amdkfd_init(void) { + struct sysinfo si; int ret; + si_meminfo(&si); + amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh; + amdgpu_amdkfd_total_mem_size *= si.mem_unit; + #ifdef CONFIG_HSA_AMD ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); if (ret) @@ -73,9 +84,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) case CHIP_FIJI: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); @@ -85,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) return; } - adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev, + adev->pdev, kfd2kgd); + + if (adev->kfd.dev) + amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; } /** @@ -126,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) { int i, n; int last_valid_bit; - if (adev->kfd) { + + if (adev->kfd.dev) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = compute_vmid_bitmap, .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, @@ -165,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) &gpu_resources.doorbell_start_offset); if (adev->asic_type < CHIP_VEGA10) { - kgd2kfd->device_init(adev->kfd, &gpu_resources); + kgd2kfd->device_init(adev->kfd.dev, &gpu_resources); return; } @@ -179,25 +196,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) * process in case of 64-bit doorbells so we * can use each doorbell assignment twice. */ - if (adev->asic_type == CHIP_VEGA10) { - gpu_resources.sdma_doorbell[0][i] = - AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1); - gpu_resources.sdma_doorbell[0][i+1] = - AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1); - gpu_resources.sdma_doorbell[1][i] = - AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1); - gpu_resources.sdma_doorbell[1][i+1] = - AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1); - } else { - gpu_resources.sdma_doorbell[0][i] = - AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1); - gpu_resources.sdma_doorbell[0][i+1] = - AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1); - gpu_resources.sdma_doorbell[1][i] = - AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1); - gpu_resources.sdma_doorbell[1][i+1] = - AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1); - } + gpu_resources.sdma_doorbell[0][i] = + adev->doorbell_index.sdma_engine0 + (i >> 1); + gpu_resources.sdma_doorbell[0][i+1] = + adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1); + gpu_resources.sdma_doorbell[1][i] = + adev->doorbell_index.sdma_engine1 + (i >> 1); + gpu_resources.sdma_doorbell[1][i+1] = + adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1); } /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for * SDMA, IH and VCN. So don't use them for the CP. @@ -205,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) gpu_resources.reserved_doorbell_mask = 0x1e0; gpu_resources.reserved_doorbell_val = 0x0e0; - kgd2kfd->device_init(adev->kfd, &gpu_resources); + kgd2kfd->device_init(adev->kfd.dev, &gpu_resources); } } void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev) { - if (adev->kfd) { - kgd2kfd->device_exit(adev->kfd); - adev->kfd = NULL; + if (adev->kfd.dev) { + kgd2kfd->device_exit(adev->kfd.dev); + adev->kfd.dev = NULL; } } void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry) { - if (adev->kfd) - kgd2kfd->interrupt(adev->kfd, ih_ring_entry); + if (adev->kfd.dev) + kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry); } void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) { - if (adev->kfd) - kgd2kfd->suspend(adev->kfd); + if (adev->kfd.dev) + kgd2kfd->suspend(adev->kfd.dev); } int amdgpu_amdkfd_resume(struct amdgpu_device *adev) { int r = 0; - if (adev->kfd) - r = kgd2kfd->resume(adev->kfd); + if (adev->kfd.dev) + r = kgd2kfd->resume(adev->kfd.dev); return r; } @@ -244,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev) { int r = 0; - if (adev->kfd) - r = kgd2kfd->pre_reset(adev->kfd); + if (adev->kfd.dev) + r = kgd2kfd->pre_reset(adev->kfd.dev); return r; } @@ -254,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) { int r = 0; - if (adev->kfd) - r = kgd2kfd->post_reset(adev->kfd); + if (adev->kfd.dev) + r = kgd2kfd->post_reset(adev->kfd.dev); return r; } @@ -428,6 +434,62 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) cu_info->lds_size = acu_info.lds_size; } +int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, + struct kgd_dev **dma_buf_kgd, + uint64_t *bo_size, void *metadata_buffer, + size_t buffer_size, uint32_t *metadata_size, + uint32_t *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + struct dma_buf *dma_buf; + struct drm_gem_object *obj; + struct amdgpu_bo *bo; + uint64_t metadata_flags; + int r = -EINVAL; + + dma_buf = dma_buf_get(dma_buf_fd); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + + if (dma_buf->ops != &amdgpu_dmabuf_ops) + /* Can't handle non-graphics buffers */ + goto out_put; + + obj = dma_buf->priv; + if (obj->dev->driver != adev->ddev->driver) + /* Can't handle buffers from different drivers */ + goto out_put; + + adev = obj->dev->dev_private; + bo = gem_to_amdgpu_bo(obj); + if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT))) + /* Only VRAM and GTT BOs are supported */ + goto out_put; + + r = 0; + if (dma_buf_kgd) + *dma_buf_kgd = (struct kgd_dev *)adev; + if (bo_size) + *bo_size = amdgpu_bo_size(bo); + if (metadata_size) + *metadata_size = bo->metadata_size; + if (metadata_buffer) + r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size, + metadata_size, &metadata_flags); + if (flags) { + *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? + ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT; + + if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + *flags |= ALLOC_MEM_FLAGS_PUBLIC; + } + +out_put: + dma_buf_put(dma_buf); + return r; +} + uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; @@ -510,7 +572,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) { - if (adev->kfd) { + if (adev->kfd.dev) { if ((1 << vmid) & compute_vmid_bitmap) return true; } @@ -524,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) return false; } -void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) { } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index bcf587b4ba98..70429f7aa9a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -27,7 +27,6 @@ #include <linux/types.h> #include <linux/mm.h> -#include <linux/mmu_context.h> #include <linux/workqueue.h> #include <kgd_kfd_interface.h> #include <drm/ttm/ttm_execbuf_util.h> @@ -35,6 +34,7 @@ #include "amdgpu_vm.h" extern const struct kgd2kfd_calls *kgd2kfd; +extern uint64_t amdgpu_amdkfd_total_mem_size; struct amdgpu_device; @@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence { char timeline_name[TASK_COMM_LEN]; }; +struct amdgpu_kfd_dev { + struct kfd_dev *dev; + uint64_t vram_used; +}; + struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, struct mm_struct *mm); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); @@ -144,6 +149,11 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); +int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, + struct kgd_dev **dmabuf_kgd, + uint64_t *bo_size, void *metadata_buffer, + size_t buffer_size, uint32_t *metadata_size, + uint32_t *flags); uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); @@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, struct kfd_vm_fault_info *info); +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, + struct dma_buf *dmabuf, + uint64_t va, void *vm, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset); + void amdgpu_amdkfd_gpuvm_init_mem_limits(void); -void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo); +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 72a357dae070..ff7fac7df34b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -23,6 +23,7 @@ #include <linux/fdtable.h> #include <linux/uaccess.h> #include <linux/firmware.h> +#include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 0e2a56b6a9b6..56ea929f524b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -24,6 +24,7 @@ #include <linux/fdtable.h> #include <linux/uaccess.h> #include <linux/firmware.h> +#include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 03b604c96d94..5c51d4910650 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -26,6 +26,7 @@ #include <linux/fdtable.h> #include <linux/uaccess.h> #include <linux/firmware.h> +#include <linux/mmu_context.h> #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index df0a059565f9..be1ab43473c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -25,6 +25,7 @@ #include <linux/list.h> #include <linux/pagemap.h> #include <linux/sched/mm.h> +#include <linux/dma-buf.h> #include <drm/drmP.h> #include "amdgpu_object.h" #include "amdgpu_vm.h" @@ -46,9 +47,9 @@ /* Impose limit on how much memory KFD can use */ static struct { uint64_t max_system_mem_limit; - uint64_t max_userptr_mem_limit; + uint64_t max_ttm_mem_limit; int64_t system_mem_used; - int64_t userptr_mem_used; + int64_t ttm_mem_used; spinlock_t mem_limit_lock; } kfd_mem_limit; @@ -90,8 +91,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, } /* Set memory usage limits. Current, limits are - * System (kernel) memory - 3/8th System RAM - * Userptr memory - 3/4th System RAM + * System (TTM + userptr) memory - 3/4th System RAM + * TTM memory - 3/8th System RAM */ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) { @@ -103,48 +104,61 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) mem *= si.mem_unit; spin_lock_init(&kfd_mem_limit.mem_limit_lock); - kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3); - kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2); - pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n", + kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2); + kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3); + pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n", (kfd_mem_limit.max_system_mem_limit >> 20), - (kfd_mem_limit.max_userptr_mem_limit >> 20)); + (kfd_mem_limit.max_ttm_mem_limit >> 20)); } -static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain) +static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, + uint64_t size, u32 domain, bool sg) { - size_t acc_size; + size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed; + uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9; int ret = 0; acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, sizeof(struct amdgpu_bo)); - spin_lock(&kfd_mem_limit.mem_limit_lock); + vram_needed = 0; if (domain == AMDGPU_GEM_DOMAIN_GTT) { - if (kfd_mem_limit.system_mem_used + (acc_size + size) > - kfd_mem_limit.max_system_mem_limit) { - ret = -ENOMEM; - goto err_no_mem; - } - kfd_mem_limit.system_mem_used += (acc_size + size); - } else if (domain == AMDGPU_GEM_DOMAIN_CPU) { - if ((kfd_mem_limit.system_mem_used + acc_size > - kfd_mem_limit.max_system_mem_limit) || - (kfd_mem_limit.userptr_mem_used + (size + acc_size) > - kfd_mem_limit.max_userptr_mem_limit)) { - ret = -ENOMEM; - goto err_no_mem; - } - kfd_mem_limit.system_mem_used += acc_size; - kfd_mem_limit.userptr_mem_used += size; + /* TTM GTT memory */ + system_mem_needed = acc_size + size; + ttm_mem_needed = acc_size + size; + } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { + /* Userptr */ + system_mem_needed = acc_size + size; + ttm_mem_needed = acc_size; + } else { + /* VRAM and SG */ + system_mem_needed = acc_size; + ttm_mem_needed = acc_size; + if (domain == AMDGPU_GEM_DOMAIN_VRAM) + vram_needed = size; + } + + spin_lock(&kfd_mem_limit.mem_limit_lock); + + if ((kfd_mem_limit.system_mem_used + system_mem_needed > + kfd_mem_limit.max_system_mem_limit) || + (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > + kfd_mem_limit.max_ttm_mem_limit) || + (adev->kfd.vram_used + vram_needed > + adev->gmc.real_vram_size - reserved_for_pt)) { + ret = -ENOMEM; + } else { + kfd_mem_limit.system_mem_used += system_mem_needed; + kfd_mem_limit.ttm_mem_used += ttm_mem_needed; + adev->kfd.vram_used += vram_needed; } -err_no_mem: + spin_unlock(&kfd_mem_limit.mem_limit_lock); return ret; } -static void unreserve_system_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain) +static void unreserve_mem_limit(struct amdgpu_device *adev, + uint64_t size, u32 domain, bool sg) { size_t acc_size; @@ -154,35 +168,39 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev, spin_lock(&kfd_mem_limit.mem_limit_lock); if (domain == AMDGPU_GEM_DOMAIN_GTT) { kfd_mem_limit.system_mem_used -= (acc_size + size); - } else if (domain == AMDGPU_GEM_DOMAIN_CPU) { + kfd_mem_limit.ttm_mem_used -= (acc_size + size); + } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { + kfd_mem_limit.system_mem_used -= (acc_size + size); + kfd_mem_limit.ttm_mem_used -= acc_size; + } else { kfd_mem_limit.system_mem_used -= acc_size; - kfd_mem_limit.userptr_mem_used -= size; + kfd_mem_limit.ttm_mem_used -= acc_size; + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + adev->kfd.vram_used -= size; + WARN_ONCE(adev->kfd.vram_used < 0, + "kfd VRAM memory accounting unbalanced"); + } } WARN_ONCE(kfd_mem_limit.system_mem_used < 0, "kfd system memory accounting unbalanced"); - WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0, - "kfd userptr memory accounting unbalanced"); + WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, + "kfd TTM memory accounting unbalanced"); spin_unlock(&kfd_mem_limit.mem_limit_lock); } -void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) { - spin_lock(&kfd_mem_limit.mem_limit_lock); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + u32 domain = bo->preferred_domains; + bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { - kfd_mem_limit.system_mem_used -= bo->tbo.acc_size; - kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo); - } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) { - kfd_mem_limit.system_mem_used -= - (bo->tbo.acc_size + amdgpu_bo_size(bo)); + domain = AMDGPU_GEM_DOMAIN_CPU; + sg = false; } - WARN_ONCE(kfd_mem_limit.system_mem_used < 0, - "kfd system memory accounting unbalanced"); - WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0, - "kfd userptr memory accounting unbalanced"); - spin_unlock(&kfd_mem_limit.mem_limit_lock); + unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); } @@ -395,23 +413,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) return 0; } -static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct dma_fence *f) -{ - int ret = amdgpu_sync_fence(adev, sync, f, false); - - /* Sync objects can't handle multiple GPUs (contexts) updating - * sync->last_vm_update. Fortunately we don't need it for - * KFD's purposes, so we can just drop that fence. - */ - if (sync->last_vm_update) { - dma_fence_put(sync->last_vm_update); - sync->last_vm_update = NULL; - } - - return ret; -} - static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) { struct amdgpu_bo *pd = vm->root.base.bo; @@ -422,7 +423,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) if (ret) return ret; - return sync_vm_fence(adev, sync, vm->last_update); + return amdgpu_sync_fence(NULL, sync, vm->last_update, false); } /* add_bo_to_vm - Add a BO to a VM @@ -536,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, struct amdgpu_bo *bo = mem->bo; INIT_LIST_HEAD(&entry->head); - entry->shared = true; + entry->num_shared = 1; entry->bo = &bo->tbo; mutex_lock(&process_info->lock); if (userptr) @@ -677,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ctx->kfd_bo.priority = 0; ctx->kfd_bo.tv.bo = &bo->tbo; - ctx->kfd_bo.tv.shared = true; + ctx->kfd_bo.tv.num_shared = 1; ctx->kfd_bo.user_pages = NULL; list_add(&ctx->kfd_bo.tv.head, &ctx->list); @@ -741,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, ctx->kfd_bo.priority = 0; ctx->kfd_bo.tv.bo = &bo->tbo; - ctx->kfd_bo.tv.shared = true; + ctx->kfd_bo.tv.num_shared = 1; ctx->kfd_bo.user_pages = NULL; list_add(&ctx->kfd_bo.tv.head, &ctx->list); @@ -826,7 +827,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, /* Add the eviction fence back */ amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true); - sync_vm_fence(adev, sync, bo_va->last_pt_update); + amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); return 0; } @@ -851,7 +852,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, return ret; } - return sync_vm_fence(adev, sync, bo_va->last_pt_update); + return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); } static int map_bo_to_gpuvm(struct amdgpu_device *adev, @@ -886,6 +887,24 @@ update_gpuvm_pte_failed: return ret; } +static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size) +{ + struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL); + + if (!sg) + return NULL; + if (sg_alloc_table(sg, 1, GFP_KERNEL)) { + kfree(sg); + return NULL; + } + sg->sgl->dma_address = addr; + sg->sgl->length = size; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->sgl->dma_length = size; +#endif + return sg; +} + static int process_validate_vms(struct amdkfd_process_info *process_info) { struct amdgpu_vm *peer_vm; @@ -901,6 +920,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info) return 0; } +static int process_sync_pds_resv(struct amdkfd_process_info *process_info, + struct amdgpu_sync *sync) +{ + struct amdgpu_vm *peer_vm; + int ret; + + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) { + struct amdgpu_bo *pd = peer_vm->root.base.bo; + + ret = amdgpu_sync_resv(NULL, + sync, pd->tbo.resv, + AMDGPU_FENCE_OWNER_UNDEFINED, false); + if (ret) + return ret; + } + + return 0; +} + static int process_update_pds(struct amdkfd_process_info *process_info, struct amdgpu_sync *sync) { @@ -1149,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + enum ttm_bo_type bo_type = ttm_bo_type_device; + struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; @@ -1177,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if (!offset || !*offset) return -EINVAL; user_addr = *offset; + } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) { + domain = AMDGPU_GEM_DOMAIN_GTT; + alloc_domain = AMDGPU_GEM_DOMAIN_CPU; + bo_type = ttm_bo_type_sg; + alloc_flags = 0; + if (size > UINT_MAX) + return -EINVAL; + sg = create_doorbell_sg(*offset, size); + if (!sg) + return -ENOMEM; } else { return -EINVAL; } *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); - if (!*mem) - return -ENOMEM; + if (!*mem) { + ret = -ENOMEM; + goto err; + } INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); @@ -1199,7 +1252,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( byte_align = (adev->family == AMDGPU_FAMILY_VI && adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && - adev->asic_type != CHIP_POLARIS11) ? + adev->asic_type != CHIP_POLARIS11 && + adev->asic_type != CHIP_POLARIS12) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; @@ -1215,10 +1269,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); if (ret) { pr_debug("Insufficient system memory\n"); - goto err_reserve_system_mem; + goto err_reserve_limit; } pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", @@ -1229,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( bp.byte_align = byte_align; bp.domain = alloc_domain; bp.flags = alloc_flags; - bp.type = ttm_bo_type_device; + bp.type = bo_type; bp.resv = NULL; ret = amdgpu_bo_create(adev, &bp, &bo); if (ret) { @@ -1237,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( domain_string(alloc_domain), ret); goto err_bo_create; } + if (bo_type == ttm_bo_type_sg) { + bo->tbo.sg = sg; + bo->tbo.ttm->sg = sg; + } bo->kfd_bo = *mem; (*mem)->bo = bo; if (user_addr) @@ -1266,12 +1324,17 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: amdgpu_bo_unref(&bo); /* Don't unreserve system mem limit twice */ - goto err_reserve_system_mem; + goto err_reserve_limit; err_bo_create: - unreserve_system_mem_limit(adev, size, alloc_domain); -err_reserve_system_mem: + unreserve_mem_limit(adev, size, alloc_domain, !!sg); +err_reserve_limit: mutex_destroy(&(*mem)->lock); kfree(*mem); +err: + if (sg) { + sg_free_table(sg); + kfree(sg); + } return ret; } @@ -1341,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( /* Free the sync object */ amdgpu_sync_free(&mem->sync); + /* If the SG is not NULL, it's one we created for a doorbell + * BO. We need to free it. + */ + if (mem->bo->tbo.sg) { + sg_free_table(mem->bo->tbo.sg); + kfree(mem->bo->tbo.sg); + } + /* Free the BO*/ amdgpu_bo_unref(&mem->bo); mutex_destroy(&mem->lock); @@ -1405,7 +1476,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( * the queues are still stopped and we can leave mapping for * the next restore worker */ - if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM) + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && + bo->tbo.mem.mem_type == TTM_PL_SYSTEM) is_invalid_userptr = true; if (check_if_add_bo_to_vm(avm, mem)) { @@ -1642,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, return 0; } +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, + struct dma_buf *dma_buf, + uint64_t va, void *vm, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + struct drm_gem_object *obj; + struct amdgpu_bo *bo; + struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + + if (dma_buf->ops != &amdgpu_dmabuf_ops) + /* Can't handle non-graphics buffers */ + return -EINVAL; + + obj = dma_buf->priv; + if (obj->dev->dev_private != adev) + /* Can't handle buffers from other devices */ + return -EINVAL; + + bo = gem_to_amdgpu_bo(obj); + if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT))) + /* Only VRAM and GTT BOs are supported */ + return -EINVAL; + + *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); + if (!*mem) + return -ENOMEM; + + if (size) + *size = amdgpu_bo_size(bo); + + if (mmap_offset) + *mmap_offset = amdgpu_bo_mmap_offset(bo); + + INIT_LIST_HEAD(&(*mem)->bo_va_list); + mutex_init(&(*mem)->lock); + (*mem)->mapping_flags = + AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | + AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC; + + (*mem)->bo = amdgpu_bo_ref(bo); + (*mem)->va = va; + (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? + AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT; + (*mem)->mapped_to_gpu_memory = 0; + (*mem)->process_info = avm->process_info; + add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false); + amdgpu_sync_create(&(*mem)->sync); + + return 0; +} + /* Evict a userptr BO by stopping the queues if necessary * * Runs in MMU notifier, may be in RECLAIM_FS context. This means it @@ -1808,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) validate_list.head) { list_add_tail(&mem->resv_list.head, &resv_list); mem->resv_list.bo = mem->validate_list.bo; - mem->resv_list.shared = mem->validate_list.shared; + mem->resv_list.num_shared = mem->validate_list.num_shared; } /* Reserve all BOs and page tables for validation */ @@ -2027,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) list_add_tail(&mem->resv_list.head, &ctx.list); mem->resv_list.bo = mem->validate_list.bo; - mem->resv_list.shared = mem->validate_list.shared; + mem->resv_list.num_shared = mem->validate_list.num_shared; } ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, @@ -2044,13 +2170,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) if (ret) goto validate_map_fail; - /* Wait for PD/PTs validate to finish */ - /* FIXME: I think this isn't needed */ - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) { - struct amdgpu_bo *bo = peer_vm->root.base.bo; - - ttm_bo_wait(&bo->tbo, false, false); + ret = process_sync_pds_resv(process_info, &sync_obj); + if (ret) { + pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); + goto validate_map_fail; } /* Validate BOs and map them to GPUVM (update VM page tables). */ @@ -2066,7 +2189,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) pr_debug("Memory eviction: Validate BOs failed. Try again\n"); goto validate_map_fail; } - + ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false); + if (ret) { + pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); + goto validate_map_fail; + } list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { ret = update_gpuvm_pte((struct amdgpu_device *) @@ -2087,6 +2214,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) goto validate_map_fail; } + /* Wait for validate and PT updates to finish */ amdgpu_sync_wait(&sync_obj, false); /* Release old eviction fence and create new one, because fence only @@ -2105,10 +2233,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) process_info->eviction_fence = new_fence; *ef = dma_fence_get(&new_fence->base); - /* Wait for validate to finish and attach new eviction fence */ - list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) - ttm_bo_wait(&mem->bo->tbo, false, false); + /* Attach new eviction fence to all BOs */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list.head) amdgpu_bo_fence(mem->bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 14d2982a47cc..5c79da8e1150 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, entry->priority = min(info[i].bo_priority, AMDGPU_BO_LIST_MAX_PRIORITY); entry->tv.bo = &bo->tbo; - entry->tv.shared = !bo->prime_shared_count; if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) list->gds_obj = bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index ceadeeadfa56..387f1cf1dc20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -381,7 +381,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, (adev->pdev->revision == 0xe7) || (adev->pdev->revision == 0xef))) || ((adev->pdev->device == 0x6fdf) && - (adev->pdev->revision == 0xef))) { + ((adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); } else if ((adev->pdev->device == 0x67df) && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dc54e9efd910..1c49b8266d69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &bo->tbo; - p->uf_entry.tv.shared = true; + /* One for TTM and one for the CS job */ + p->uf_entry.tv.num_shared = 2; p->uf_entry.user_pages = NULL; drm_gem_object_put_unlocked(gobj); @@ -124,14 +125,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs goto free_chunk; } + mutex_lock(&p->ctx->lock); + /* skip guilty context job */ if (atomic_read(&p->ctx->guilty) == 1) { ret = -ECANCELED; goto free_chunk; } - mutex_lock(&p->ctx->lock); - /* get chunks */ chunk_array_user = u64_to_user_ptr(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, @@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, return r; } + /* One for TTM and one for the CS job */ + amdgpu_bo_list_for_each_entry(e, p->bo_list) + e->tv.num_shared = 2; + amdgpu_bo_list_get_list(p->bo_list, &p->validated); if (p->bo_list->first_userptr != p->bo_list->num_entries) p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); @@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, gws = p->bo_list->gws_obj; oa = p->bo_list->oa_obj; - amdgpu_bo_list_for_each_entry(e, p->bo_list) - e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo)); + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + + /* Make sure we use the exclusive slot for shared BOs */ + if (bo->prime_shared_count) + e->tv.num_shared = 0; + e->bo_va = amdgpu_vm_bo_find(vm, bo); + } if (gds) { p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; @@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1); - if (r) - return r; - p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); if (amdgpu_vm_debug) { @@ -1421,6 +1428,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, if (IS_ERR(fence)) return PTR_ERR(fence); + if (!fence) + fence = dma_fence_get_stub(); + switch (info->in.what) { case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: r = drm_syncobj_create(&syncobj, 0, fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 0c590ddf250a..7e22be7ca68a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -43,7 +43,7 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, domain, bo, NULL, &ptr); - if (!bo) + if (!*bo) return -ENOMEM; memset(ptr, 0, size); @@ -74,7 +74,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&csa_tv.head); csa_tv.bo = &bo->tbo; - csa_tv.shared = true; + csa_tv.num_shared = 1; list_add(&csa_tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &pd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f9b54236102d..d85184b5b35c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_UVD_ENC] = 1, [AMDGPU_HW_IP_VCN_DEC] = 1, [AMDGPU_HW_IP_VCN_ENC] = 1, + [AMDGPU_HW_IP_VCN_JPEG] = 1, }; static int amdgput_ctx_total_num_entities(void) @@ -247,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, return -ENOMEM; mutex_lock(&mgr->lock); - r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); + r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL); if (r < 0) { mutex_unlock(&mgr->lock); kfree(ctx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 590588a82471..8a078f4ae73d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -59,6 +59,8 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_pm.h" +#include "amdgpu_xgmi.h" + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) */ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) { + /* No doorbell on SI hardware generation */ if (adev->asic_type < CHIP_BONAIRE) { adev->doorbell.base = 0; @@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) return -EINVAL; + amdgpu_asic_init_doorbell_index(adev); + /* doorbell bar mapping */ adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2); adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), - AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); + adev->doorbell_index.max_assignment+1); if (adev->doorbell.num_doorbells == 0) return -EINVAL; + /* For Vega, reserve and map two pages on doorbell BAR since SDMA + * paging queue doorbell use the second page. The + * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the + * doorbells are in the first page. So with paging queue enabled, + * the max num_doorbells should + 1 page (0x400 in dword) + */ + if (adev->asic_type >= CHIP_VEGA10) + adev->doorbell.num_doorbells += 0x400; + adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32)); @@ -1851,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) { int i, r; + if (adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_remove_device(adev); + amdgpu_amdkfd_device_fini(adev); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); @@ -2340,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) return amdgpu_device_asic_has_dc_support(adev->asic_type); } + +static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) +{ + struct amdgpu_device *adev = + container_of(__work, struct amdgpu_device, xgmi_reset_work); + + adev->asic_reset_res = amdgpu_asic_reset(adev); + if (adev->asic_reset_res) + DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s", + adev->asic_reset_res, adev->ddev->unique); +} + + /** * amdgpu_device_init - initialize the driver * @@ -2438,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, amdgpu_device_delay_enable_gfx_off); + INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); + adev->gfx.gfx_off_req_count = 1; adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; @@ -2458,9 +2490,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* doorbell bar mapping */ - amdgpu_device_doorbell_init(adev); - /* io port mapping */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) { @@ -2479,6 +2508,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + /* doorbell bar mapping and doorbell index init*/ + amdgpu_device_doorbell_init(adev); + /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ /* this will fail for cards that aren't VGA class devices, just * ignore it */ @@ -3151,86 +3183,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) return 0; } -/** - * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough - * - * @adev: amdgpu device pointer - * - * attempt to do soft-reset or full-reset and reinitialize Asic - * return 0 means succeeded otherwise failed - */ -static int amdgpu_device_reset(struct amdgpu_device *adev) -{ - bool need_full_reset, vram_lost = 0; - int r; - - need_full_reset = amdgpu_device_ip_need_full_reset(adev); - - if (!need_full_reset) { - amdgpu_device_ip_pre_soft_reset(adev); - r = amdgpu_device_ip_soft_reset(adev); - amdgpu_device_ip_post_soft_reset(adev); - if (r || amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("soft reset failed, will fallback to full reset!\n"); - need_full_reset = true; - } - } - - if (need_full_reset) { - r = amdgpu_device_ip_suspend(adev); - -retry: - r = amdgpu_asic_reset(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); - - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_device_ip_resume_phase1(adev); - if (r) - goto out; - - vram_lost = amdgpu_device_check_vram_lost(adev); - if (vram_lost) { - DRM_ERROR("VRAM is lost!\n"); - atomic_inc(&adev->vram_lost_counter); - } - - r = amdgpu_gtt_mgr_recover( - &adev->mman.bdev.man[TTM_PL_TT]); - if (r) - goto out; - - r = amdgpu_device_fw_loading(adev); - if (r) - return r; - - r = amdgpu_device_ip_resume_phase2(adev); - if (r) - goto out; - - if (vram_lost) - amdgpu_device_fill_reset_magic(adev); - } - } - -out: - if (!r) { - amdgpu_irq_gpu_reset_resume_helper(adev); - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - r = amdgpu_device_ip_suspend(adev); - need_full_reset = true; - goto retry; - } - } - - if (!r) - r = amdgpu_device_recover_vram(adev); - - return r; -} /** * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf @@ -3306,6 +3258,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) if (amdgpu_gpu_recovery == -1) { switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: case CHIP_TOPAZ: case CHIP_TONGA: case CHIP_FIJI: @@ -3329,31 +3283,13 @@ disabled: return false; } -/** - * amdgpu_device_gpu_recover - reset the asic and recover scheduler - * - * @adev: amdgpu device pointer - * @job: which job trigger hang - * - * Attempt to reset the GPU if it has hung (all asics). - * Returns 0 for success or an error on failure. - */ -int amdgpu_device_gpu_recover(struct amdgpu_device *adev, - struct amdgpu_job *job) -{ - int i, r, resched; - - dev_info(adev->dev, "GPU reset begin!\n"); - - mutex_lock(&adev->lock_reset); - atomic_inc(&adev->gpu_reset_counter); - adev->in_gpu_reset = 1; - - /* Block kfd */ - amdgpu_amdkfd_pre_reset(adev); - /* block TTM */ - resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); +static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, + struct amdgpu_job *job, + bool *need_full_reset_arg) +{ + int i, r = 0; + bool need_full_reset = *need_full_reset_arg; /* block all schedulers and reset given job's ring */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -3373,10 +3309,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, amdgpu_fence_driver_force_completion(ring); } - if (amdgpu_sriov_vf(adev)) - r = amdgpu_device_reset_sriov(adev, job ? false : true); - else - r = amdgpu_device_reset(adev); + + + if (!amdgpu_sriov_vf(adev)) { + + if (!need_full_reset) + need_full_reset = amdgpu_device_ip_need_full_reset(adev); + + if (!need_full_reset) { + amdgpu_device_ip_pre_soft_reset(adev); + r = amdgpu_device_ip_soft_reset(adev); + amdgpu_device_ip_post_soft_reset(adev); + if (r || amdgpu_device_ip_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } + } + + if (need_full_reset) + r = amdgpu_device_ip_suspend(adev); + + *need_full_reset_arg = need_full_reset; + } + + return r; +} + +static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, + struct list_head *device_list_handle, + bool *need_full_reset_arg) +{ + struct amdgpu_device *tmp_adev = NULL; + bool need_full_reset = *need_full_reset_arg, vram_lost = false; + int r = 0; + + /* + * ASIC reset has to be done on all HGMI hive nodes ASAP + * to allow proper links negotiation in FW (within 1 sec) + */ + if (need_full_reset) { + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + /* For XGMI run all resets in parallel to speed up the process */ + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work)) + r = -EALREADY; + } else + r = amdgpu_asic_reset(tmp_adev); + + if (r) { + DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s", + r, tmp_adev->ddev->unique); + break; + } + } + + /* For XGMI wait for all PSP resets to complete before proceed */ + if (!r) { + list_for_each_entry(tmp_adev, device_list_handle, + gmc.xgmi.head) { + if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { + flush_work(&tmp_adev->xgmi_reset_work); + r = tmp_adev->asic_reset_res; + if (r) + break; + } + } + } + } + + + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (need_full_reset) { + /* post card */ + if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context)) + DRM_WARN("asic atom init failed!"); + + if (!r) { + dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_device_ip_resume_phase1(tmp_adev); + if (r) + goto out; + + vram_lost = amdgpu_device_check_vram_lost(tmp_adev); + if (vram_lost) { + DRM_ERROR("VRAM is lost!\n"); + atomic_inc(&tmp_adev->vram_lost_counter); + } + + r = amdgpu_gtt_mgr_recover( + &tmp_adev->mman.bdev.man[TTM_PL_TT]); + if (r) + goto out; + + r = amdgpu_device_fw_loading(tmp_adev); + if (r) + return r; + + r = amdgpu_device_ip_resume_phase2(tmp_adev); + if (r) + goto out; + + if (vram_lost) + amdgpu_device_fill_reset_magic(tmp_adev); + + /* Update PSP FW topology after reset */ + if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1) + r = amdgpu_xgmi_update_topology(hive, tmp_adev); + } + } + + +out: + if (!r) { + amdgpu_irq_gpu_reset_resume_helper(tmp_adev); + r = amdgpu_ib_ring_tests(tmp_adev); + if (r) { + dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_device_ip_suspend(tmp_adev); + need_full_reset = true; + r = -EAGAIN; + goto end; + } + } + + if (!r) + r = amdgpu_device_recover_vram(tmp_adev); + else + tmp_adev->asic_reset_res = r; + } + +end: + *need_full_reset_arg = need_full_reset; + return r; +} + +static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev, + struct amdgpu_job *job) +{ + int i; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -3388,7 +3458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * or all rings (in the case @job is NULL) * after above amdgpu_reset accomplished */ - if ((!job || job->base.sched == &ring->sched) && !r) + if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res) drm_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); @@ -3398,21 +3468,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, drm_helper_resume_force_mode(adev->ddev); } - ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); + adev->asic_reset_res = 0; +} - if (r) { - /* bad news, how to tell it to userspace ? */ - dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); - amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); - } else { - dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter)); - } +static void amdgpu_device_lock_adev(struct amdgpu_device *adev) +{ + mutex_lock(&adev->lock_reset); + atomic_inc(&adev->gpu_reset_counter); + adev->in_gpu_reset = 1; + /* Block kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_pre_reset(adev); +} - /*unlock kfd */ - amdgpu_amdkfd_post_reset(adev); +static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) +{ + /*unlock kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_post_reset(adev); amdgpu_vf_error_trans_all(adev); adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); +} + + +/** + * amdgpu_device_gpu_recover - reset the asic and recover scheduler + * + * @adev: amdgpu device pointer + * @job: which job trigger hang + * + * Attempt to reset the GPU if it has hung (all asics). + * Attempt to do soft-reset or full-reset and reinitialize Asic + * Returns 0 for success or an error on failure. + */ + +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job *job) +{ + int r; + struct amdgpu_hive_info *hive = NULL; + bool need_full_reset = false; + struct amdgpu_device *tmp_adev = NULL; + struct list_head device_list, *device_list_handle = NULL; + + INIT_LIST_HEAD(&device_list); + + dev_info(adev->dev, "GPU reset begin!\n"); + + /* + * In case of XGMI hive disallow concurrent resets to be triggered + * by different nodes. No point also since the one node already executing + * reset will also reset all the other nodes in the hive. + */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive && adev->gmc.xgmi.num_physical_nodes > 1 && + !mutex_trylock(&hive->hive_lock)) + return 0; + + /* Start with adev pre asic reset first for soft reset check.*/ + amdgpu_device_lock_adev(adev); + r = amdgpu_device_pre_asic_reset(adev, + job, + &need_full_reset); + if (r) { + /*TODO Should we stop ?*/ + DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", + r, adev->ddev->unique); + adev->asic_reset_res = r; + } + + /* Build list of devices to reset */ + if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) { + if (!hive) { + amdgpu_device_unlock_adev(adev); + return -ENODEV; + } + + /* + * In case we are in XGMI hive mode device reset is done for all the + * nodes in the hive to retrain all XGMI links and hence the reset + * sequence is executed in loop on all nodes. + */ + device_list_handle = &hive->device_list; + } else { + list_add_tail(&adev->gmc.xgmi.head, &device_list); + device_list_handle = &device_list; + } + +retry: /* Rest of adevs pre asic reset from XGMI hive. */ + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + + if (tmp_adev == adev) + continue; + + amdgpu_device_lock_adev(tmp_adev); + r = amdgpu_device_pre_asic_reset(tmp_adev, + NULL, + &need_full_reset); + /*TODO Should we stop ?*/ + if (r) { + DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ", + r, tmp_adev->ddev->unique); + tmp_adev->asic_reset_res = r; + } + } + + /* Actual ASIC resets if needed.*/ + /* TODO Implement XGMI hive reset logic for SRIOV */ + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_device_reset_sriov(adev, job ? false : true); + if (r) + adev->asic_reset_res = r; + } else { + r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); + if (r && r == -EAGAIN) + goto retry; + } + + /* Post ASIC reset for all devs .*/ + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL); + + if (r) { + /* bad news, how to tell it to userspace ? */ + dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); + amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } else { + dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter)); + } + + amdgpu_device_unlock_adev(tmp_adev); + } + + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) + mutex_unlock(&hive->hive_lock); + + if (r) + dev_info(adev->dev, "GPU reset end with ret = %d\n", r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 686a26de50f9..15ce7e681d67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -631,6 +631,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); if (!adev->mode_info.max_bpc_property) return -ENOMEM; + adev->mode_info.abm_level_property = + drm_property_create_range(adev->ddev, 0, + "abm level", 0, 4); + if (!adev->mode_info.abm_level_property) + return -ENOMEM; } return 0; @@ -857,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev, /* Inside "upper part" of vblank area? Apply corrective offset if so: */ if (in_vbl && (*vpos >= vbl_start)) { vtotal = mode->crtc_vtotal; - *vpos = *vpos - vtotal; + + /* With variable refresh rate displays the vpos can exceed + * the vtotal value. Clamp to 0 to return -vbl_end instead + * of guessing the remaining number of lines until scanout. + */ + *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0; } /* Correct for shifted end of vbl at vbl_end. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h new file mode 100644 index 000000000000..be620b29f4aa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -0,0 +1,243 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * GPU doorbell structures, functions & helpers + */ +struct amdgpu_doorbell { + /* doorbell mmio */ + resource_size_t base; + resource_size_t size; + u32 __iomem *ptr; + u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ +}; + +/* Reserved doorbells for amdgpu (including multimedia). + * KFD can use all the rest in the 2M doorbell bar. + * For asic before vega10, doorbell is 32-bit, so the + * index/offset is in dword. For vega10 and after, doorbell + * can be 64-bit, so the index defined is in qword. + */ +struct amdgpu_doorbell_index { + uint32_t kiq; + uint32_t mec_ring0; + uint32_t mec_ring1; + uint32_t mec_ring2; + uint32_t mec_ring3; + uint32_t mec_ring4; + uint32_t mec_ring5; + uint32_t mec_ring6; + uint32_t mec_ring7; + uint32_t userqueue_start; + uint32_t userqueue_end; + uint32_t gfx_ring0; + uint32_t sdma_engine0; + uint32_t sdma_engine1; + uint32_t sdma_engine2; + uint32_t sdma_engine3; + uint32_t sdma_engine4; + uint32_t sdma_engine5; + uint32_t sdma_engine6; + uint32_t sdma_engine7; + uint32_t ih; + union { + struct { + uint32_t vcn_ring0_1; + uint32_t vcn_ring2_3; + uint32_t vcn_ring4_5; + uint32_t vcn_ring6_7; + } vcn; + struct { + uint32_t uvd_ring0_1; + uint32_t uvd_ring2_3; + uint32_t uvd_ring4_5; + uint32_t uvd_ring6_7; + uint32_t vce_ring0_1; + uint32_t vce_ring2_3; + uint32_t vce_ring4_5; + uint32_t vce_ring6_7; + } uvd_vce; + }; + uint32_t max_assignment; +}; + +typedef enum _AMDGPU_DOORBELL_ASSIGNMENT +{ + AMDGPU_DOORBELL_KIQ = 0x000, + AMDGPU_DOORBELL_HIQ = 0x001, + AMDGPU_DOORBELL_DIQ = 0x002, + AMDGPU_DOORBELL_MEC_RING0 = 0x010, + AMDGPU_DOORBELL_MEC_RING1 = 0x011, + AMDGPU_DOORBELL_MEC_RING2 = 0x012, + AMDGPU_DOORBELL_MEC_RING3 = 0x013, + AMDGPU_DOORBELL_MEC_RING4 = 0x014, + AMDGPU_DOORBELL_MEC_RING5 = 0x015, + AMDGPU_DOORBELL_MEC_RING6 = 0x016, + AMDGPU_DOORBELL_MEC_RING7 = 0x017, + AMDGPU_DOORBELL_GFX_RING0 = 0x020, + AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, + AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, + AMDGPU_DOORBELL_IH = 0x1E8, + AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, + AMDGPU_DOORBELL_INVALID = 0xFFFF +} AMDGPU_DOORBELL_ASSIGNMENT; + +typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT +{ + /* Compute + GFX: 0~255 */ + AMDGPU_VEGA20_DOORBELL_KIQ = 0x000, + AMDGPU_VEGA20_DOORBELL_HIQ = 0x001, + AMDGPU_VEGA20_DOORBELL_DIQ = 0x002, + AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003, + AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004, + AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005, + AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006, + AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007, + AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008, + AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009, + AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A, + AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B, + AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A, + AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B, + /* SDMA:256~335*/ + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100, + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A, + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114, + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E, + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128, + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132, + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C, + AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146, + /* IH: 376~391 */ + AMDGPU_VEGA20_DOORBELL_IH = 0x178, + /* MMSCH: 392~407 + * overlap the doorbell assignment with VCN as they are mutually exclusive + * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD + */ + AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ + AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189, + AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A, + AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B, + + AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188, + AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189, + AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A, + AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B, + + AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C, + AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D, + AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E, + AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F, + AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F, + AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF +} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT; + +/* + * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space + */ +typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT +{ + /* + * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in + * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. + * Compute related doorbells are allocated from 0x00 to 0x8a + */ + + + /* kernel scheduling */ + AMDGPU_DOORBELL64_KIQ = 0x00, + + /* HSA interface queue and debug queue */ + AMDGPU_DOORBELL64_HIQ = 0x01, + AMDGPU_DOORBELL64_DIQ = 0x02, + + /* Compute engines */ + AMDGPU_DOORBELL64_MEC_RING0 = 0x03, + AMDGPU_DOORBELL64_MEC_RING1 = 0x04, + AMDGPU_DOORBELL64_MEC_RING2 = 0x05, + AMDGPU_DOORBELL64_MEC_RING3 = 0x06, + AMDGPU_DOORBELL64_MEC_RING4 = 0x07, + AMDGPU_DOORBELL64_MEC_RING5 = 0x08, + AMDGPU_DOORBELL64_MEC_RING6 = 0x09, + AMDGPU_DOORBELL64_MEC_RING7 = 0x0a, + + /* User queue doorbell range (128 doorbells) */ + AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b, + AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a, + + /* Graphics engine */ + AMDGPU_DOORBELL64_GFX_RING0 = 0x8b, + + /* + * Other graphics doorbells can be allocated here: from 0x8c to 0xdf + * Graphics voltage island aperture 1 + * default non-graphics QWORD index is 0xe0 - 0xFF inclusive + */ + + /* For vega10 sriov, the sdma doorbell must be fixed as follow + * to keep the same setting with host driver, or it will + * happen conflicts + */ + AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0, + AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, + AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2, + AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, + + /* Interrupt handler */ + AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */ + AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */ + AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */ + + /* VCN engine use 32 bits doorbell */ + AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ + AMDGPU_DOORBELL64_VCN2_3 = 0xF9, + AMDGPU_DOORBELL64_VCN4_5 = 0xFA, + AMDGPU_DOORBELL64_VCN6_7 = 0xFB, + + /* overlap the doorbell assignment with VCN as they are mutually exclusive + * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD + */ + AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, + AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, + AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, + AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, + + AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, + AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, + AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, + AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, + + AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, + AMDGPU_DOORBELL64_INVALID = 0xFFFF +} AMDGPU_DOORBELL64_ASSIGNMENT; + +u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); +void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); +u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); +void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); + +#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) +#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) +#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) +#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v)) + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8de55f7f1a3a..c806f984bcc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -454,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444); /** * DOC: param_buf_per_se (int) - * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx). + * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte. + * The default is 0 (depending on gfx). */ -MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)"); +MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)"); module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444); /** @@ -864,6 +865,7 @@ static const struct pci_device_id pciidlist[] = { /* VEGAM */ {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, + {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, @@ -872,7 +874,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, + {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, /* Vega 12 */ {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, @@ -885,6 +893,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, /* Raven */ @@ -1220,9 +1229,6 @@ static struct drm_driver kms_driver = { .patchlevel = KMS_DRIVER_PATCHLEVEL, }; -static struct drm_driver *driver; -static struct pci_driver *pdriver; - static struct pci_driver amdgpu_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, @@ -1252,16 +1258,14 @@ static int __init amdgpu_init(void) goto error_fence; DRM_INFO("amdgpu kernel modesetting enabled.\n"); - driver = &kms_driver; - pdriver = &amdgpu_kms_pci_driver; - driver->num_ioctls = amdgpu_max_kms_ioctl; + kms_driver.num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ amdgpu_amdkfd_init(); /* let modprobe override vga console setting */ - return pci_register_driver(pdriver); + return pci_register_driver(&amdgpu_kms_pci_driver); error_fence: amdgpu_sync_fini(); @@ -1273,7 +1277,7 @@ error_sync: static void __exit amdgpu_exit(void) { amdgpu_amdkfd_fini(); - pci_unregister_driver(pdriver); + pci_unregister_driver(&amdgpu_kms_pci_driver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); amdgpu_fence_slab_fini(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7b3d1ebda9df..f4f00217546e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, INIT_LIST_HEAD(&duplicates); tv.bo = &bo->tbo; - tv.shared = true; + tv.num_shared = 1; list_add(&tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); @@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -ENOENT; abo = gem_to_amdgpu_bo(gobj); tv.bo = &abo->tbo; - tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID); + if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) + tv.num_shared = 1; + else + tv.num_shared = 0; list_add(&tv.head, &list); } else { gobj = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index d63daba9b17c..f1ddfc50bcc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +extern const struct dma_buf_ops amdgpu_dmabuf_ops; + /* * GEM objects. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 6a70c0b7105f..97a60da62004 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -250,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->adev = NULL; ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL_KIQ; + ring->doorbell_index = adev->doorbell_index.kiq; r = amdgpu_gfx_kiq_acquire(adev, ring); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 8c57924c075f..81e6070d255b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -99,6 +99,7 @@ struct amdgpu_xgmi { unsigned num_physical_nodes; /* gpu list in the same hive */ struct list_head head; + bool supported; }; struct amdgpu_gmc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 9ce8c93ec19b..f877bb78d10a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -51,14 +51,12 @@ struct amdgpu_ih_ring { struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ u32 (*get_wptr)(struct amdgpu_device *adev); - bool (*prescreen_iv)(struct amdgpu_device *adev); void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); void (*set_rptr)(struct amdgpu_device *adev); }; #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) -#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 6b6524f04ce0..b7968f426862 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -145,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev, u32 ring_index = ih->rptr >> 2; struct amdgpu_iv_entry entry; - /* Prescreening of high-frequency interrupts */ - if (!amdgpu_ih_prescreen_iv(adev)) - return; - - /* Before dispatching irq to IP blocks, send it to amdkfd */ - amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]); - entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; amdgpu_ih_decode_iv(adev, &entry); @@ -371,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, unsigned client_id = entry->client_id; unsigned src_id = entry->src_id; struct amdgpu_irq_src *src; + bool handled = false; int r; trace_amdgpu_iv(entry); if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); - return; - } - if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { + } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); - return; - } - if (adev->irq.virq[src_id]) { + } else if (adev->irq.virq[src_id]) { generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); - } else { - if (!adev->irq.client[client_id].sources) { - DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", - client_id, src_id); - return; - } - src = adev->irq.client[client_id].sources[src_id]; - if (!src) { - DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); - return; - } + } else if (!adev->irq.client[client_id].sources) { + DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", + client_id, src_id); + } else if ((src = adev->irq.client[client_id].sources[src_id])) { r = src->funcs->process(adev, src, entry); - if (r) + if (r < 0) DRM_ERROR("error processing interrupt (%d)\n", r); + else if (r) + handled = true; + + } else { + DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); } + + /* Send it to amdkfd as well if it isn't already handled */ + if (!handled) + amdgpu_amdkfd_interrupt(adev, entry->iv_entry); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index e0af44fd6a0c..0a17fb1af204 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -32,6 +32,9 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); + struct amdgpu_task_info ti; + + memset(&ti, 0, sizeof(struct amdgpu_task_info)); if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", @@ -39,9 +42,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) return; } + amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), ring->fence_drv.sync_seq); + DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", + ti.process_name, ti.tgid, ti.task_name, ti.pid); if (amdgpu_device_should_recover_gpu(ring->adev)) amdgpu_device_gpu_recover(ring->adev, job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 9b3164c0f861..bc62bf41b7e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (!info->return_size || !info->return_pointer) return -EINVAL; - /* Ensure IB tests are run on ring */ - flush_delayed_work(&adev->late_init_work); - switch (info->query) { case AMDGPU_INFO_ACCEL_WORKING: ui32 = adev->accel_working; @@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) struct amdgpu_fpriv *fpriv; int r, pasid; + /* Ensure IB tests are run on ring */ + flush_delayed_work(&adev->late_init_work); + file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index e55508b39496..3e6823fdd939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change * * @mn: our notifier - * @mm: the mm this callback is about - * @start: start of updated range - * @end: end of updated range + * @range: mmu notifier context * * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - bool blockable) + const struct mmu_notifier_range *range) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; + unsigned long end; /* notification is exclusive, but interval is inclusive */ - end -= 1; + end = range->end - 1; /* TODO we should be able to split locking for interval tree and * amdgpu_mn_invalidate_node */ - if (amdgpu_mn_read_lock(amn, blockable)) + if (amdgpu_mn_read_lock(amn, range->blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, start, end); + it = interval_tree_iter_first(&amn->objects, range->start, end); while (it) { struct amdgpu_mn_node *node; - if (!blockable) { + if (!range->blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); - amdgpu_mn_invalidate_node(node, start, end); + amdgpu_mn_invalidate_node(node, range->start, end); } return 0; @@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, * are restorted in amdgpu_mn_invalidate_range_end_hsa. */ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - bool blockable) + const struct mmu_notifier_range *range) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; + unsigned long end; /* notification is exclusive, but interval is inclusive */ - end -= 1; + end = range->end - 1; - if (amdgpu_mn_read_lock(amn, blockable)) + if (amdgpu_mn_read_lock(amn, range->blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, start, end); + it = interval_tree_iter_first(&amn->objects, range->start, end); while (it) { struct amdgpu_mn_node *node; struct amdgpu_bo *bo; - if (!blockable) { + if (!range->blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); list_for_each_entry(bo, &node->bos, mn_list) { struct kgd_mem *mem = bo->kfd_bo; if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, - start, end)) - amdgpu_amdkfd_evict_userptr(mem, mm); + range->start, + end)) + amdgpu_amdkfd_evict_userptr(mem, range->mm); } } @@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, * Release the lock again to allow new command submissions. */ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end) + const struct mmu_notifier_range *range) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 0dc2c5c57015..aadd0fa42e43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -38,7 +38,6 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_plane_helper.h> -#include <drm/drm_fb_helper.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/hrtimer.h> @@ -294,13 +293,6 @@ struct amdgpu_display_funcs { uint16_t connector_object_id, struct amdgpu_hpd *hpd, struct amdgpu_router *router); - /* it is used to enter or exit into free sync mode */ - int (*notify_freesync)(struct drm_device *dev, void *data, - struct drm_file *filp); - /* it is used to allow enablement of freesync mode */ - int (*set_freesync_property)(struct drm_connector *connector, - struct drm_property *property, - uint64_t val); }; @@ -340,6 +332,8 @@ struct amdgpu_mode_info { struct drm_property *dither_property; /* maximum number of bits per channel for monitor color */ struct drm_property *max_bpc_property; + /* Adaptive Backlight Modulation (power feature) */ + struct drm_property *abm_level_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index cf768acb51dc..728e15e5d68a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_subtract_pin_size(bo); if (bo->kfd_bo) - amdgpu_amdkfd_unreserve_system_memory_limit(bo); + amdgpu_amdkfd_unreserve_memory_limit(bo); amdgpu_bo_kunmap(bo); @@ -608,53 +608,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, } /** - * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object - * @adev: amdgpu device object - * @ring: amdgpu_ring for the engine handling the buffer operations - * @bo: &amdgpu_bo buffer to be backed up - * @resv: reservation object with embedded fence - * @fence: dma_fence associated with the operation - * @direct: whether to submit the job directly - * - * Copies an &amdgpu_bo buffer object to its shadow object. - * Not used for now. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct reservation_object *resv, - struct dma_fence **fence, - bool direct) - -{ - struct amdgpu_bo *shadow = bo->shadow; - uint64_t bo_addr, shadow_addr; - int r; - - if (!shadow) - return -EINVAL; - - bo_addr = amdgpu_bo_gpu_offset(bo); - shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); - - r = reservation_object_reserve_shared(bo->tbo.resv, 1); - if (r) - goto err; - - r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, - amdgpu_bo_size(bo), resv, fence, - direct, false); - if (!r) - amdgpu_bo_fence(bo, *fence, true); - -err: - return r; -} - -/** * amdgpu_bo_validate - validate an &amdgpu_bo buffer object * @bo: pointer to the buffer object * @@ -959,7 +912,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) struct ttm_operation_ctx ctx = { false, false }; int r, i; - if (!bo->pin_count) { + if (WARN_ON_ONCE(!bo->pin_count)) { dev_warn(adev->dev, "%p unpin not necessary\n", bo); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 7d3312d0da11..9291c2f837e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); -int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct reservation_object *resv, - struct dma_fence **fence, bool direct); int amdgpu_bo_validate(struct amdgpu_bo *bo); int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7235cd0b0fa9..1f61ed95727c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -33,6 +33,8 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/nospec.h> +#include "hwmgr.h" +#define WIDTH_4K 3840 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); @@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) return 0; + /* Skip fan attributes on APU */ + if ((adev->flags & AMD_IS_APU) && + (attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_input.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_target.dev_attr.attr || + attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) + return 0; + /* Skip limit attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || @@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); mutex_unlock(&adev->pm.mutex); } + /* enable/disable Low Memory PState for UVD (4k videos) */ + if (adev->asic_type == CHIP_STONEY && + adev->uvd.decode_image_width >= WIDTH_4K) { + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + + if (hwmgr && hwmgr->hwmgr_func && + hwmgr->hwmgr_func->update_nbdpm_pstate) + hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, + !enable, + true); + } } void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 3e44d889f7af..71913a18d142 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -39,8 +39,6 @@ #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> -static const struct dma_buf_ops amdgpu_dmabuf_ops; - /** * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table * implementation @@ -332,7 +330,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, return ret; } -static const struct dma_buf_ops amdgpu_dmabuf_ops = { +const struct dma_buf_ops amdgpu_dmabuf_ops = { .attach = amdgpu_gem_map_attach, .detach = amdgpu_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index e05dc66b1090..8fab0d637ee5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -155,10 +155,14 @@ psp_cmd_submit_buf(struct psp_context *psp, return ret; } -static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd, +static void psp_prep_tmr_cmd_buf(struct psp_context *psp, + struct psp_gfx_cmd_resp *cmd, uint64_t tmr_mc, uint32_t size) { - cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; + if (psp_support_vmr_ring(psp)) + cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; + else + cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); cmd->cmd.cmd_setup_tmr.buf_size = size; @@ -192,7 +196,7 @@ static int psp_tmr_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE); + psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE); DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n", PSP_TMR_SIZE, psp->tmr_mc_addr); @@ -536,8 +540,10 @@ static int psp_load_fw(struct amdgpu_device *adev) int ret; struct psp_context *psp = &adev->psp; - if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0) + if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { + psp_ring_destroy(psp, PSP_RING_TYPE__KM); goto skip_memalloc; + } psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!psp->cmd) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 9ec5d1a666a6..3ee573b4016e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -83,12 +83,13 @@ struct psp_funcs enum AMDGPU_UCODE_ID ucode_type); bool (*smu_reload_quirk)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp); - uint64_t (*xgmi_get_node_id)(struct psp_context *psp); - uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); + int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id); + int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id); int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology); int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, struct psp_xgmi_topology_info *topology); + bool (*support_vmr_ring)(struct psp_context *psp); }; struct psp_xgmi_context { @@ -192,12 +193,14 @@ struct psp_xgmi_topology_info { ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) +#define psp_support_vmr_ring(psp) \ + ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false) #define psp_mode1_reset(psp) \ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) -#define psp_xgmi_get_node_id(psp) \ - ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0) -#define psp_xgmi_get_hive_id(psp) \ - ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0) +#define psp_xgmi_get_node_id(psp, node_id) \ + ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL) +#define psp_xgmi_get_hive_id(psp, hive_id) \ + ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL) #define psp_xgmi_get_topology_info(psp, num_device, topology) \ ((psp)->funcs->xgmi_get_topology_info ? \ (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL) @@ -217,7 +220,6 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; int psp_gpu_reset(struct amdgpu_device *adev); int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); - extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5b75bdc8dc28..335a0edf114b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { ktime_t deadline = ktime_add_us(ktime_get(), 10000); - if (!ring->funcs->soft_recovery) + if (!ring->funcs->soft_recovery || !fence) return false; atomic_inc(&ring->adev->gpu_reset_counter); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 0beb01fef83f..d87e828a084b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -29,7 +29,7 @@ #include <drm/drm_print.h> /* max number of rings */ -#define AMDGPU_MAX_RINGS 21 +#define AMDGPU_MAX_RINGS 23 #define AMDGPU_MAX_GFX_RINGS 1 #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 69896f451e8a..4e5d13e41f6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, buf_sizes[0x1] = dpb_size; buf_sizes[0x2] = image_size; buf_sizes[0x4] = min_ctx_size; + /* store image width to adjust nb memory pstate */ + adev->uvd.decode_image_width = width; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index a3ab1a41060f..5eb63288d157 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -65,6 +65,8 @@ struct amdgpu_uvd { struct drm_sched_entity entity; struct delayed_work idle_work; unsigned harvest_config; + /* store image width to adjust nb memory state */ + unsigned decode_image_width; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e2e42e3fbcf3..ecf6f96df2ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -262,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev, ring = &adev->vcn.ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); @@ -322,7 +322,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev, ring = &adev->vcn.ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); @@ -396,16 +396,26 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { struct dpg_pause_state new_state; + unsigned int fences = 0; + unsigned int i; - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); + } + if (fences) new_state.fw_based = VCN_DPG_STATE__PAUSE; else - new_state.fw_based = adev->vcn.pause_state.fw_based; + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else - new_state.jpeg = adev->vcn.pause_state.jpeg; + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + new_state.jpeg = VCN_DPG_STATE__PAUSE; amdgpu_vcn_pause_dpg_mode(adev, &new_state); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index cfee74732edb..462a04e0f5e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -334,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) if (adev->fw_vram_usage.va != NULL) { adev->virt.fw_reserve.p_pf2vf = - (struct amdgim_pf2vf_info_header *)( + (struct amd_sriov_msg_pf2vf_info_header *)( adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 0728fbc9a692..722deefc0a7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -63,8 +63,8 @@ struct amdgpu_virt_ops { * Firmware Reserve Frame buffer */ struct amdgpu_virt_fw_reserve { - struct amdgim_pf2vf_info_header *p_pf2vf; - struct amdgim_vf2pf_info_header *p_vf2pf; + struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; + struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; unsigned int checksum_key; }; /* @@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, }; -struct amdgim_pf2vf_info_header { +struct amd_sriov_msg_pf2vf_info_header { /* the total structure size in byte. */ uint32_t size; /* version of this structure, written by the GIM */ uint32_t version; + /* reserved */ + uint32_t reserved[2]; } __aligned(4); struct amdgim_pf2vf_info_v1 { /* header contains size and version */ - struct amdgim_pf2vf_info_header header; + struct amd_sriov_msg_pf2vf_info_header header; /* max_width * max_height */ unsigned int uvd_enc_max_pixels_count; /* 16x16 pixels/sec, codec independent */ @@ -112,7 +114,7 @@ struct amdgim_pf2vf_info_v1 { struct amdgim_pf2vf_info_v2 { /* header contains size and version */ - struct amdgim_pf2vf_info_header header; + struct amd_sriov_msg_pf2vf_info_header header; /* use private key from mailbox 2 to create chueksum */ uint32_t checksum; /* The features flags of the GIM driver supports. */ @@ -137,20 +139,22 @@ struct amdgim_pf2vf_info_v2 { uint64_t vcefw_kboffset; /* VCE FW size in KB */ uint32_t vcefw_ksize; - uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)]; + uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)]; } __aligned(4); -struct amdgim_vf2pf_info_header { +struct amd_sriov_msg_vf2pf_info_header { /* the total structure size in byte. */ uint32_t size; /*version of this structure, written by the guest */ uint32_t version; + /* reserved */ + uint32_t reserved[2]; } __aligned(4); struct amdgim_vf2pf_info_v1 { /* header contains size and version */ - struct amdgim_vf2pf_info_header header; + struct amd_sriov_msg_vf2pf_info_header header; /* driver version */ char driver_version[64]; /* driver certification, 1=WHQL, 0=None */ @@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 { struct amdgim_vf2pf_info_v2 { /* header contains size and version */ - struct amdgim_vf2pf_info_header header; + struct amd_sriov_msg_vf2pf_info_header header; uint32_t checksum; /* driver version */ uint8_t driver_version[64]; @@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 { uint32_t uvd_enc_usage; /* guest uvd engine usage percentage. 0xffff means N/A. */ uint32_t uvd_enc_health; - uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)]; + uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; } __aligned(4); #define AMDGPU_FW_VRAM_VF2PF_VER 2 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 58a2363040dd..e73d152659a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, if (level == adev->vm_manager.root_level) /* For the root directory */ - return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift; + return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift; else if (level != AMDGPU_VM_PTB) /* Everything in between */ return 512; @@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, { entry->priority = 0; entry->tv.bo = &vm->root.base.bo->tbo; - entry->tv.shared = true; + /* One for the VM updates, one for TTM and one for the CS job */ + entry->tv.num_shared = 3; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } @@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); - r = reservation_object_reserve_shared(bo->tbo.resv, 1); - if (r) - return r; - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) goto error; @@ -1656,9 +1653,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if (!amdgpu_vm_pt_descendant(adev, &cursor)) return -ENOENT; continue; - } else if (frag >= parent_shift) { + } else if (frag >= parent_shift && + cursor.level - 1 != adev->vm_manager.root_level) { /* If the fragment size is even larger than the parent - * shift we should go up one level and check it again. + * shift we should go up one level and check it again + * unless one level up is the root level. */ if (!amdgpu_vm_pt_ancestor(&cursor)) return -ENOENT; @@ -1666,10 +1665,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, } /* Looks good so far, calculate parameters for the update */ - incr = AMDGPU_GPU_PAGE_SIZE << shift; + incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift; mask = amdgpu_vm_entries_mask(adev, cursor.level); pe_start = ((cursor.pfn >> shift) & mask) * 8; - entry_end = (mask + 1) << shift; + entry_end = (uint64_t)(mask + 1) << shift; entry_end += cursor.pfn & ~(entry_end - 1); entry_end = min(entry_end, end); @@ -1682,7 +1681,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, flags | AMDGPU_PTE_FRAG(frag)); pe_start += nptes * 8; - dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift; + dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; frag_start = upd_end; if (frag_start >= frag_end) { @@ -1842,10 +1841,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1); - if (r) - goto error_free; - r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); if (r) goto error_free; @@ -3026,6 +3021,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) goto error_free_root; + r = reservation_object_reserve_shared(root->tbo.resv, 1); + if (r) + goto error_unreserve; + r = amdgpu_vm_clear_bo(adev, vm, root, adev->vm_manager.root_level, vm->pte_support_ats); @@ -3055,7 +3054,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, } INIT_KFIFO(vm->faults); - vm->fault_credit = 16; return 0; @@ -3268,42 +3266,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } /** - * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID - * - * @adev: amdgpu_device pointer - * @pasid: PASID do identify the VM - * - * This function is expected to be called in interrupt context. - * - * Returns: - * True if there was fault credit, false otherwise - */ -bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, - unsigned int pasid) -{ - struct amdgpu_vm *vm; - - spin_lock(&adev->vm_manager.pasid_lock); - vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - if (!vm) { - /* VM not found, can't track fault credit */ - spin_unlock(&adev->vm_manager.pasid_lock); - return true; - } - - /* No lock needed. only accessed by IRQ handler */ - if (!vm->fault_credit) { - /* Too many faults in this VM */ - spin_unlock(&adev->vm_manager.pasid_lock); - return false; - } - - vm->fault_credit--; - spin_unlock(&adev->vm_manager.pasid_lock); - return true; -} - -/** * amdgpu_vm_manager_init - init the VM manager * * @adev: amdgpu_device pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 2a8898d19c8b..e8dcfd59fc93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -229,9 +229,6 @@ struct amdgpu_vm { /* Up to 128 pending retry page faults */ DECLARE_KFIFO(faults, u64, 128); - /* Limit non-retry fault storms */ - unsigned int fault_credit; - /* Points to the KFD process VM info */ struct amdkfd_process_info *process_info; @@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); -bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, - unsigned int pasid); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 909216a9b447..8a8bc60cb6b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -23,7 +23,7 @@ */ #include <linux/list.h> #include "amdgpu.h" -#include "amdgpu_psp.h" +#include "amdgpu_xgmi.h" static DEFINE_MUTEX(xgmi_mutex); @@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex); #define AMDGPU_MAX_XGMI_HIVE 8 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 -struct amdgpu_hive_info { - uint64_t hive_id; - struct list_head device_list; -}; - static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; static unsigned hive_count = 0; -static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) + +void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) +{ + return &hive->device_list; +} + +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) { int i; struct amdgpu_hive_info *tmp; @@ -58,39 +59,73 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) tmp = &xgmi_hives[hive_count++]; tmp->hive_id = adev->gmc.xgmi.hive_id; INIT_LIST_HEAD(&tmp->device_list); + mutex_init(&tmp->hive_lock); + return tmp; } +int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev) +{ + int ret = -EINVAL; + + /* Each psp need to set the latest topology */ + ret = psp_xgmi_set_topology_info(&adev->psp, + hive->number_devices, + &hive->topology_info); + if (ret) + dev_err(adev->dev, + "XGMI: Set topology failure on device %llx, hive %llx, ret %d", + adev->gmc.xgmi.node_id, + adev->gmc.xgmi.hive_id, ret); + else + dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n", + adev->gmc.xgmi.physical_node_id, + adev->gmc.xgmi.hive_id); + + return ret; +} + int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { - struct psp_xgmi_topology_info *tmp_topology; + struct psp_xgmi_topology_info *hive_topology; struct amdgpu_hive_info *hive; struct amdgpu_xgmi *entry; - struct amdgpu_device *tmp_adev; + struct amdgpu_device *tmp_adev = NULL; int count = 0, ret = -EINVAL; - if ((adev->asic_type < CHIP_VEGA20) || - (adev->flags & AMD_IS_APU) ) + if (!adev->gmc.xgmi.supported) return 0; - adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); - adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); - tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL); - if (!tmp_topology) - return -ENOMEM; + ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to get node id\n"); + return ret; + } + + ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to get hive id\n"); + return ret; + } + mutex_lock(&xgmi_mutex); hive = amdgpu_get_xgmi_hive(adev); if (!hive) goto exit; + hive_topology = &hive->topology_info; + list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); list_for_each_entry(entry, &hive->device_list, head) - tmp_topology->nodes[count++].node_id = entry->node_id; + hive_topology->nodes[count++].node_id = entry->node_id; + hive->number_devices = count; /* Each psp need to get the latest topology */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology); + ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology); if (ret) { dev_err(tmp_adev->dev, "XGMI: Get topology failure on device %llx, hive %llx, ret %d", @@ -101,25 +136,33 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) } } - /* Each psp need to set the latest topology */ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); - if (ret) { - dev_err(tmp_adev->dev, - "XGMI: Set topology failure on device %llx, hive %llx, ret %d", - tmp_adev->gmc.xgmi.node_id, - tmp_adev->gmc.xgmi.hive_id, ret); - /* To do : continue with some node failed or disable the whole hive */ + ret = amdgpu_xgmi_update_topology(hive, tmp_adev); + if (ret) break; - } } - if (!ret) - dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n", - adev->gmc.xgmi.physical_node_id, - adev->gmc.xgmi.hive_id); exit: mutex_unlock(&xgmi_mutex); - kfree(tmp_topology); return ret; } + +void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) +{ + struct amdgpu_hive_info *hive; + + if (!adev->gmc.xgmi.supported) + return; + + mutex_lock(&xgmi_mutex); + + hive = amdgpu_get_xgmi_hive(adev); + if (!hive) + goto exit; + + if (!(hive->number_devices--)) + mutex_destroy(&hive->hive_lock); + +exit: + mutex_unlock(&xgmi_mutex); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h new file mode 100644 index 000000000000..6151eb9c8ad3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -0,0 +1,40 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __AMDGPU_XGMI_H__ +#define __AMDGPU_XGMI_H__ + +#include "amdgpu_psp.h" + +struct amdgpu_hive_info { + uint64_t hive_id; + struct list_head device_list; + struct psp_xgmi_topology_info topology_info; + int number_devices; + struct mutex hive_lock; +}; + +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev); +int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); +int amdgpu_xgmi_add_device(struct amdgpu_device *adev); +void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index f41f5f57e9f3..71c50d8900e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .flush_hdp = &cik_flush_hdp, .invalidate_hdp = &cik_invalidate_hdp, .need_full_reset = &cik_need_full_reset, + .init_doorbell_index = &legacy_doorbell_index_init, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index e49c6f15a0a0..54c625a2e570 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int cik_set_ip_blocks(struct amdgpu_device *adev); +void legacy_doorbell_index_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index b5775c6a857b..8a8b4967a101 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev) * [127:96] - reserved */ -/** - * cik_ih_prescreen_iv - prescreen an interrupt vector - * - * @adev: amdgpu_device pointer - * - * Returns true if the interrupt vector should be further processed. - */ -static bool cik_ih_prescreen_iv(struct amdgpu_device *adev) -{ - u32 ring_index = adev->irq.ih.rptr >> 2; - u16 pasid; - - switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { - case 146: - case 147: - pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; - if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) - return true; - break; - default: - /* Not a VM fault */ - return true; - } - - adev->irq.ih.rptr += 16; - return false; -} - /** * cik_ih_decode_iv - decode an interrupt vector * @@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = { static const struct amdgpu_ih_funcs cik_ih_funcs = { .get_wptr = cik_ih_get_wptr, - .prescreen_iv = cik_ih_prescreen_iv, .decode_iv = cik_ih_decode_iv, .set_rptr = cik_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index df5ac4d85a00..9d3ea298e116 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -208,34 +208,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev) } /** - * cz_ih_prescreen_iv - prescreen an interrupt vector - * - * @adev: amdgpu_device pointer - * - * Returns true if the interrupt vector should be further processed. - */ -static bool cz_ih_prescreen_iv(struct amdgpu_device *adev) -{ - u32 ring_index = adev->irq.ih.rptr >> 2; - u16 pasid; - - switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { - case 146: - case 147: - pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; - if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) - return true; - break; - default: - /* Not a VM fault */ - return true; - } - - adev->irq.ih.rptr += 16; - return false; -} - -/** * cz_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer @@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = { static const struct amdgpu_ih_funcs cz_ih_funcs = { .get_wptr = cz_ih_get_wptr, - .prescreen_iv = cz_ih_prescreen_iv, .decode_iv = cz_ih_decode_iv, .set_rptr = cz_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index f467b9bd090d..3a9fb6018c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4363,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id; + ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index cb066a8dccd7..381f593b0cda 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -44,7 +44,6 @@ #include "gca/gfx_8_0_d.h" #include "gca/gfx_8_0_enum.h" #include "gca/gfx_8_0_sh_mask.h" -#include "gca/gfx_8_0_enum.h" #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" @@ -1891,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id; + ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX8_MEC_HPD_SIZE); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); @@ -2002,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle) /* no gfx doorbells on iceland */ if (adev->asic_type != CHIP_TOPAZ) { ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; + ring->doorbell_index = adev->doorbell_index.gfx_ring0; } r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, @@ -4069,6 +4068,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) { + gfx_v8_0_init_csb(adev); + return 0; + } + adev->gfx.rlc.funcs->stop(adev); adev->gfx.rlc.funcs->reset(adev); gfx_v8_0_init_pg(adev); @@ -4216,7 +4220,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, DOORBELL_RANGE_LOWER, - AMDGPU_DOORBELL_GFX_RING0); + adev->doorbell_index.gfx_ring0); WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp); WREG32(mmCP_RB_DOORBELL_RANGE_UPPER, @@ -4645,8 +4649,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev) { if (adev->asic_type > CHIP_TONGA) { - WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); - WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2); + WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2); + WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2); } /* enable doorbells */ WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1); @@ -4948,14 +4952,13 @@ static bool gfx_v8_0_check_soft_reset(void *handle) static int gfx_v8_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; - srbm_soft_reset = adev->gfx.srbm_soft_reset; /* stop the rlc */ adev->gfx.rlc.funcs->stop(adev); @@ -5052,14 +5055,13 @@ static int gfx_v8_0_soft_reset(void *handle) static int gfx_v8_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 grbm_soft_reset = 0; if ((!adev->gfx.grbm_soft_reset) && (!adev->gfx.srbm_soft_reset)) return 0; grbm_soft_reset = adev->gfx.grbm_soft_reset; - srbm_soft_reset = adev->gfx.srbm_soft_reset; if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c27caa144c57..7556716038d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin"); MODULE_FIRMWARE("amdgpu/picasso_mec.bin"); MODULE_FIRMWARE("amdgpu/picasso_mec2.bin"); MODULE_FIRMWARE("amdgpu/picasso_rlc.bin"); +MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin"); MODULE_FIRMWARE("amdgpu/raven2_ce.bin"); MODULE_FIRMWARE("amdgpu/raven2_pfp.bin"); @@ -645,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); + /* + * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin + * instead of picasso_rlc.bin. + * Judgment method: + * PCO AM4: revision >= 0xC8 && revision <= 0xCF + * or revision >= 0xD8 && revision <= 0xDF + * otherwise is PCO FP5 + */ + if (!strcmp(chip_name, "picasso") && + (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) || + ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF)))) + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name); + else + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); if (err) goto out; @@ -1566,7 +1580,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->ring_obj = NULL; ring->use_doorbell = true; - ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1; + ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); @@ -1655,7 +1669,7 @@ static int gfx_v9_0_sw_init(void *handle) else sprintf(ring->name, "gfx_%d", i); ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1; + ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); if (r) @@ -2326,12 +2340,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) #endif WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); + udelay(50); /* carrizo do enable cp interrupt after cp inited */ - if (!(adev->flags & AMD_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) { gfx_v9_0_enable_gui_idle_interrupt(adev, true); - - udelay(50); + udelay(50); + } #ifdef AMDGPU_RLC_DEBUG_RETRY /* RLC_GPM_GENERAL_6 : RLC Ucode version */ @@ -2981,9 +2996,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) /* enable the doorbell if requested */ if (ring->use_doorbell) { WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, - (AMDGPU_DOORBELL64_KIQ *2) << 2); + (adev->doorbell_index.kiq * 2) << 2); WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, - (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2); + (adev->doorbell_index.userqueue_end * 2) << 2); } WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 531aaf377592..1ad7e6b8ed1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) chip_name = "tonga"; break; case CHIP_POLARIS11: - chip_name = "polaris11"; + if (((adev->pdev->device == 0x67ef) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe5))) || + ((adev->pdev->device == 0x67ff) && + ((adev->pdev->revision == 0xcf) || + (adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) + chip_name = "polaris11_k"; + else if ((adev->pdev->device == 0x67ef) && + (adev->pdev->revision == 0xe2)) + chip_name = "polaris11_k"; + else + chip_name = "polaris11"; break; case CHIP_POLARIS10: - chip_name = "polaris10"; + if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe1) || + (adev->pdev->revision == 0xf7))) + chip_name = "polaris10_k"; + else + chip_name = "polaris10"; break; case CHIP_POLARIS12: - chip_name = "polaris12"; + if (((adev->pdev->device == 0x6987) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc3))) || + ((adev->pdev->device == 0x6981) && + ((adev->pdev->revision == 0x00) || + (adev->pdev->revision == 0x01) || + (adev->pdev->revision == 0x10)))) + chip_name = "polaris12_k"; + else + chip_name = "polaris12"; break; case CHIP_FIJI: case CHIP_CARRIZO: @@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 data, vbios_version; + u32 data; int i, ucode_size, regs_size; /* Skip MC ucode loading on SR-IOV capable boards. @@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) if (amdgpu_sriov_bios(adev)) return 0; - WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); - data = RREG32(mmMC_SEQ_IO_DEBUG_DATA); - vbios_version = data & 0xf; - - if (vbios_version == 0) - return 0; - if (!adev->gmc.fw) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 811231e4ec53..bacdaef77b6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, return 0; } +/** + * vega10_ih_prescreen_iv - prescreen an interrupt vector + * + * @adev: amdgpu_device pointer + * + * Returns true if the interrupt vector should be further processed. + */ +static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry, + uint64_t addr) +{ + struct amdgpu_vm *vm; + u64 key; + int r; + + /* No PASID, can't identify faulting process */ + if (!entry->pasid) + return true; + + /* Not a retry fault */ + if (!(entry->src_data[1] & 0x80)) + return true; + + /* Track retry faults in per-VM fault FIFO. */ + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid); + if (!vm) { + /* VM not found, process it normally */ + spin_unlock(&adev->vm_manager.pasid_lock); + return true; + } + + key = AMDGPU_VM_FAULT(entry->pasid, addr); + r = amdgpu_vm_add_fault(vm->fault_hash, key); + + /* Hash table is full or the fault is already being processed, + * ignore further page faults + */ + if (r != 0) { + spin_unlock(&adev->vm_manager.pasid_lock); + return false; + } + /* No locking required with single writer and single reader */ + r = kfifo_put(&vm->faults, key); + if (!r) { + /* FIFO is full. Ignore it until there is space */ + amdgpu_vm_clear_fault(vm->fault_hash, key); + spin_unlock(&adev->vm_manager.pasid_lock); + return false; + } + + spin_unlock(&adev->vm_manager.pasid_lock); + /* It's the first fault for this address, process it normally */ + return true; +} + static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; + if (!gmc_v9_0_prescreen_iv(adev, entry, addr)) + return 1; /* This also prevents sending it to KFD */ + if (!amdgpu_sriov_vf(adev)) { status = RREG32(hub->vm_l2_pro_fault_status); WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); @@ -338,9 +397,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, struct amdgpu_vmhub *hub = &adev->vmhub[i]; u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); - if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset && - adev->gfx.kiq.ring.sched.ready && - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { + /* This is necessary for a HW workaround under SRIOV as well + * as GFXOFF under bare metal + */ + if (adev->gfx.kiq.ring.sched.ready && + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && + !adev->in_gpu_reset) { uint32_t req = hub->vm_inv_eng0_req + eng; uint32_t ack = hub->vm_inv_eng0_ack + eng; @@ -656,37 +718,46 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) } } -static int gmc_v9_0_late_init(void *handle) +static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* - * The latest engine allocation on gfx9 is: - * Engine 0, 1: idle - * Engine 2, 3: firmware - * Engine 4~13: amdgpu ring, subject to change when ring number changes - * Engine 14~15: idle - * Engine 16: kfd tlb invalidation - * Engine 17: Gart flushes - */ - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; + struct amdgpu_ring *ring; + unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = + {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP}; unsigned i; - int r; + unsigned vmhub, inv_eng; - if (!gmc_v9_0_keep_stolen_memory(adev)) - amdgpu_bo_late_init(adev); + for (i = 0; i < adev->num_rings; ++i) { + ring = adev->rings[i]; + vmhub = ring->funcs->vmhub; + + inv_eng = ffs(vm_inv_engs[vmhub]); + if (!inv_eng) { + dev_err(adev->dev, "no VM inv eng for ring %s\n", + ring->name); + return -EINVAL; + } - for(i = 0; i < adev->num_rings; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - unsigned vmhub = ring->funcs->vmhub; + ring->vm_inv_eng = inv_eng - 1; + change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); - ring->vm_inv_eng = vm_inv_eng[vmhub]++; dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", ring->name, ring->vm_inv_eng, ring->funcs->vmhub); } - /* Engine 16 is used for KFD and 17 for GART flushes */ - for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 16); + return 0; +} + +static int gmc_v9_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + if (!gmc_v9_0_keep_stolen_memory(adev)) + amdgpu_bo_late_init(adev); + + r = gmc_v9_0_allocate_vm_inv_eng(adev); + if (r) + return r; if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) { r = gmc_v9_0_ecc_available(adev); @@ -899,6 +970,9 @@ static int gmc_v9_0_sw_init(void *handle) /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, &adev->gmc.vm_fault); + if (r) + return r; + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, &adev->gmc.vm_fault); @@ -931,7 +1005,7 @@ static int gmc_v9_0_sw_init(void *handle) } adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); - if (adev->asic_type == CHIP_VEGA20) { + if (adev->gmc.xgmi.supported) { r = gfxhub_v1_1_get_xgmi_info(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index b030ca5ea107..5c8deac65580 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -24,6 +24,16 @@ #ifndef __GMC_V9_0_H__ #define __GMC_V9_0_H__ + /* + * The latest engine allocation on gfx9 is: + * Engine 2, 3: firmware + * Engine 0, 1, 4~16: amdgpu ring, + * subject to change when ring number changes + * Engine 17: Gart flushes + */ +#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 +#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 + extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index cf0fc61aebe6..a3984d10b604 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -208,34 +208,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) } /** - * iceland_ih_prescreen_iv - prescreen an interrupt vector - * - * @adev: amdgpu_device pointer - * - * Returns true if the interrupt vector should be further processed. - */ -static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev) -{ - u32 ring_index = adev->irq.ih.rptr >> 2; - u16 pasid; - - switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { - case 146: - case 147: - pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; - if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) - return true; - break; - default: - /* Not a VM fault */ - return true; - } - - adev->irq.ih.rptr += 16; - return false; -} - -/** * iceland_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer @@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = { static const struct amdgpu_ih_funcs iceland_ih_funcs = { .get_wptr = iceland_ih_get_wptr, - .prescreen_iv = iceland_ih_prescreen_iv, .decode_iv = iceland_ih_decode_iv, .set_rptr = iceland_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 64e875d528dd..6a0fcd67662a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -37,7 +37,6 @@ #include "gmc/gmc_8_2_sh_mask.h" #include "oss/oss_3_0_d.h" #include "oss/oss_3_0_sh_mask.h" -#include "gca/gfx_8_0_sh_mask.h" #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" #include "smu/smu_7_1_3_d.h" diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 6f9c54978cc1..accdedd63c98 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -32,6 +32,7 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 #define smnPCIE_CONFIG_CNTL 0x11180044 +#define smnPCIE_CI_CNTL 0x11180080 static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { @@ -270,6 +271,12 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_CI_CNTL); + data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); + + if (def != data) + WREG32_PCIE(smnPCIE_CI_CNTL, data); } const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index f8cee95d61cc..4cd31a276dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -31,6 +31,7 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 +#define smnPCIE_CI_CNTL 0x11180080 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) { @@ -222,7 +223,13 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_CI_CNTL); + data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); + if (def != data) + WREG32_PCIE(smnPCIE_CI_CNTL, data); } const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 882bd83a28c4..0de00fbe9233 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */ GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */ GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */ + GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */ + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */ GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */ }; @@ -89,7 +91,8 @@ enum psp_gfx_cmd_id GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */ GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */ GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */ - + GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */ + GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */ }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 295c2205485a..d78b4306a36f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; - struct psp_ring *ring; unsigned int psp_ring_reg = 0; struct amdgpu_device *adev = psp->adev; - ring = &psp->km_ring; - /* Write the ring destroy command to C2PMSG_64 */ psp_ring_reg = 3 << 16; WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index e5dd052d9e06..0c6e7f9b143f 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -34,6 +34,7 @@ #include "nbio/nbio_7_4_offset.h" MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); +MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); /* address block */ @@ -100,6 +101,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) char fw_name[30]; int err = 0; const struct psp_firmware_header_v1_0 *sos_hdr; + const struct psp_firmware_header_v1_0 *asd_hdr; const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); @@ -132,14 +134,30 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + le32_to_cpu(sos_hdr->sos_offset_bytes); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); + err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + if (err) + goto out1; + + err = amdgpu_ucode_validate(adev->psp.asd_fw); + if (err) + goto out1; + + asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; + adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); + adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); + adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); + adev->psp.asd_start_addr = (uint8_t *)asd_hdr + + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) - goto out; + goto out2; err = amdgpu_ucode_validate(adev->psp.ta_fw); if (err) - goto out; + goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); @@ -148,14 +166,18 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); return 0; + +out2: + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; +out1: + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; out: - if (err) { - dev_err(adev->dev, - "psp v11.0: Failed to load firmware \"%s\"\n", - fw_name); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - } + dev_err(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + release_firmware(adev->psp.sos_fw); + adev->psp.sos_fw = NULL; return err; } @@ -171,8 +193,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) * are already been loaded. */ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) + if (sol_reg) { + psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); + printk("sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; + } /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), @@ -288,6 +313,13 @@ static int psp_v11_0_ring_init(struct psp_context *psp, return 0; } +static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) +{ + if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) + return true; + return false; +} + static int psp_v11_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -296,26 +328,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct psp_ring *ring = &psp->km_ring; struct amdgpu_device *adev = psp->adev; - /* Write low address of the ring to C2PMSG_69 */ - psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); - /* Write high address of the ring to C2PMSG_70 */ - psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg); - /* Write size of ring to C2PMSG_71 */ - psp_ring_reg = ring->ring_size; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg); - /* Write the ring initialization command to C2PMSG_64 */ - psp_ring_reg = ring_type; - psp_ring_reg = psp_ring_reg << 16; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + if (psp_v11_0_support_vmr_ring(psp)) { + /* Write low address of the ring to C2PMSG_102 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); + /* Write high address of the ring to C2PMSG_103 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg); + + /* Write the ring initialization command to C2PMSG_101 */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_INIT_GPCOM_RING); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_101 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x8000FFFF, false); + + } else { + /* Write low address of the ring to C2PMSG_69 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); + /* Write high address of the ring to C2PMSG_70 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg); + /* Write size of ring to C2PMSG_71 */ + psp_ring_reg = ring->ring_size; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg); + /* Write the ring initialization command to C2PMSG_64 */ + psp_ring_reg = ring_type; + psp_ring_reg = psp_ring_reg << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, false); + } return ret; } @@ -326,15 +379,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, int ret = 0; struct amdgpu_device *adev = psp->adev; - /* Write the ring destroy command to C2PMSG_64 */ - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS); + /* Write the ring destroy command*/ + if (psp_v11_0_support_vmr_ring(psp)) + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); /* there might be handshake issue with hardware which needs delay */ mdelay(20); - /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + /* Wait for response flag (bit 31) */ + if (psp_v11_0_support_vmr_ring(psp)) + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); return ret; } @@ -373,7 +435,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp, uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; /* KM (GPCOM) prepare write pointer */ - psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + if (psp_v11_0_support_vmr_ring(psp)) + psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + else + psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); /* Update KM RB frame pointer to new frame */ /* write_frame ptr increments by size of rb_frame in bytes */ @@ -402,7 +467,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp, /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); + if (psp_v11_0_support_vmr_ring(psp)) { + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + } else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); return 0; } @@ -547,7 +616,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) /*send the mode 1 reset command*/ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST); - mdelay(1000); + msleep(500); offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); @@ -640,7 +709,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); } -static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) +static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) { struct ta_xgmi_shared_memory *xgmi_cmd; int ret; @@ -653,12 +722,14 @@ static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) /* Invoke xgmi ta to get hive id */ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); if (ret) - return 0; - else - return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; + return ret; + + *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; + + return 0; } -static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) +static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) { struct ta_xgmi_shared_memory *xgmi_cmd; int ret; @@ -671,9 +742,11 @@ static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) /* Invoke xgmi ta to get the node id */ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); if (ret) - return 0; - else - return xgmi_cmd->xgmi_out_message.get_node_id.node_id; + return ret; + + *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; + + return 0; } static const struct psp_funcs psp_v11_0_funcs = { @@ -692,6 +765,7 @@ static const struct psp_funcs psp_v11_0_funcs = { .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, + .support_vmr_ring = psp_v11_0_support_vmr_ring, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 9cea0bbe4525..79694ff16969 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -240,8 +240,11 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) * are already been loaded. */ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) + if (sol_reg) { + psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); + printk("sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; + } /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), @@ -356,12 +359,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; - struct psp_ring *ring; unsigned int psp_ring_reg = 0; struct amdgpu_device *adev = psp->adev; - ring = &psp->km_ring; - /* Write the ring destroy command to C2PMSG_64 */ psp_ring_reg = 3 << 16; WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); @@ -593,9 +593,9 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) } /*send the mode 1 reset command*/ - WREG32(offset, 0x70000); + WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST); - mdelay(1000); + msleep(500); offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index b6a25f92d566..1bccc5fe2d9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1146,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle) if (!amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; ring->doorbell_index = (i == 0) ? - AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1; + adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1; } else { ring->use_pollmem = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index f4490cdd9804..fd0bfe140ee0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -925,11 +925,9 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) OFFSET, ring->doorbell_index); WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell); WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset); - /* TODO: enable doorbell support */ - /*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index);*/ - sdma_v4_0_ring_set_wptr(ring); + /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */ + sdma_v4_0_page_ring_set_wptr(ring); /* set minor_ptr_update to 0 after wptr programed */ WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0); @@ -1449,23 +1447,45 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10); } +static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) +{ + uint fw_version = adev->sdma.instance[0].fw_version; + + switch (adev->asic_type) { + case CHIP_VEGA10: + return fw_version >= 430; + case CHIP_VEGA12: + /*return fw_version >= 31;*/ + return false; + case CHIP_VEGA20: + return fw_version >= 123; + default: + return false; + } +} + static int sdma_v4_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; - if (adev->asic_type == CHIP_RAVEN) { + if (adev->asic_type == CHIP_RAVEN) adev->sdma.num_instances = 1; - adev->sdma.has_page_queue = false; - } else { + else adev->sdma.num_instances = 2; - /* TODO: Page queue breaks driver reload under SRIOV */ - if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) - adev->sdma.has_page_queue = false; - else if (adev->asic_type != CHIP_VEGA20 && - adev->asic_type != CHIP_VEGA12) - adev->sdma.has_page_queue = true; + + r = sdma_v4_0_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load sdma firmware!\n"); + return r; } + /* TODO: Page queue breaks driver reload under SRIOV */ + if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) + adev->sdma.has_page_queue = false; + else if (sdma_v4_0_fw_support_paging_queue(adev)) + adev->sdma.has_page_queue = true; + sdma_v4_0_set_ring_funcs(adev); sdma_v4_0_set_buffer_funcs(adev); sdma_v4_0_set_vm_pte_funcs(adev); @@ -1474,7 +1494,6 @@ static int sdma_v4_0_early_init(void *handle) return 0; } - static int sdma_v4_0_sw_init(void *handle) { struct amdgpu_ring *ring; @@ -1493,12 +1512,6 @@ static int sdma_v4_0_sw_init(void *handle) if (r) return r; - r = sdma_v4_0_init_microcode(adev); - if (r) { - DRM_ERROR("Failed to load sdma firmware!\n"); - return r; - } - for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; @@ -1507,15 +1520,10 @@ static int sdma_v4_0_sw_init(void *handle) DRM_INFO("use_doorbell being set to: [%s]\n", ring->use_doorbell?"true":"false"); - if (adev->asic_type == CHIP_VEGA10) - ring->doorbell_index = (i == 0) ? - (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset - : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset - else - ring->doorbell_index = (i == 0) ? - (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset - : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset - + /* doorbell size is 2 dwords, get DWORD offset */ + ring->doorbell_index = (i == 0) ? + (adev->doorbell_index.sdma_engine0 << 1) + : (adev->doorbell_index.sdma_engine1 << 1); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, @@ -1529,7 +1537,15 @@ static int sdma_v4_0_sw_init(void *handle) if (adev->sdma.has_page_queue) { ring = &adev->sdma.instance[i].page; ring->ring_obj = NULL; - ring->use_doorbell = false; + ring->use_doorbell = true; + + /* paging queue use same doorbell index/routing as gfx queue + * with 0x400 (4096 dwords) offset on second doorbell page + */ + ring->doorbell_index = (i == 0) ? + (adev->doorbell_index.sdma_engine0 << 1) + : (adev->doorbell_index.sdma_engine1 << 1); + ring->doorbell_index += 0x400; sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, @@ -1689,13 +1705,15 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, amdgpu_fence_process(&adev->sdma.instance[instance].ring); break; case 1: - /* XXX compute */ + if (adev->asic_type == CHIP_VEGA20) + amdgpu_fence_process(&adev->sdma.instance[instance].page); break; case 2: /* XXX compute */ break; case 3: - amdgpu_fence_process(&adev->sdma.instance[instance].page); + if (adev->asic_type != CHIP_VEGA20) + amdgpu_fence_process(&adev->sdma.instance[instance].page); break; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index b3d7d9f83202..2938fb9f17cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev) return (wptr & adev->irq.ih.ptr_mask); } -/** - * si_ih_prescreen_iv - prescreen an interrupt vector - * - * @adev: amdgpu_device pointer - * - * Returns true if the interrupt vector should be further processed. - */ -static bool si_ih_prescreen_iv(struct amdgpu_device *adev) -{ - /* Process all interrupts */ - return true; -} - static void si_ih_decode_iv(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { @@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = { static const struct amdgpu_ih_funcs si_ih_funcs = { .get_wptr = si_ih_get_wptr, - .prescreen_iv = si_ih_prescreen_iv, .decode_iv = si_ih_decode_iv, .set_rptr = si_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4cc0dcb1a187..8849b74078d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -507,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) return -EINVAL; } + if (adev->asic_type == CHIP_VEGA20) + adev->gmc.xgmi.supported = true; + if (adev->flags & AMD_IS_APU) adev->nbio_funcs = &nbio_v7_0_funcs; else if (adev->asic_type == CHIP_VEGA20) @@ -613,6 +616,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .flush_hdp = &soc15_flush_hdp, .invalidate_hdp = &soc15_invalidate_hdp, .need_full_reset = &soc15_need_full_reset, + .init_doorbell_index = &vega10_doorbell_index_init, +}; + +static const struct amdgpu_asic_funcs vega20_asic_funcs = +{ + .read_disabled_bios = &soc15_read_disabled_bios, + .read_bios_from_rom = &soc15_read_bios_from_rom, + .read_register = &soc15_read_register, + .reset = &soc15_asic_reset, + .set_vga_state = &soc15_vga_set_state, + .get_xclk = &soc15_get_xclk, + .set_uvd_clocks = &soc15_set_uvd_clocks, + .set_vce_clocks = &soc15_set_vce_clocks, + .get_config_memsize = &soc15_get_config_memsize, + .flush_hdp = &soc15_flush_hdp, + .invalidate_hdp = &soc15_invalidate_hdp, + .need_full_reset = &soc15_need_full_reset, + .init_doorbell_index = &vega20_doorbell_index_init, }; static int soc15_common_early_init(void *handle) @@ -632,11 +653,11 @@ static int soc15_common_early_init(void *handle) adev->se_cac_rreg = &soc15_se_cac_rreg; adev->se_cac_wreg = &soc15_se_cac_wreg; - adev->asic_funcs = &soc15_asic_funcs; adev->external_rev_id = 0xFF; switch (adev->asic_type) { case CHIP_VEGA10: + adev->asic_funcs = &soc15_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_RLC_LS | @@ -660,6 +681,7 @@ static int soc15_common_early_init(void *handle) adev->external_rev_id = 0x1; break; case CHIP_VEGA12: + adev->asic_funcs = &soc15_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CGCG | @@ -682,6 +704,7 @@ static int soc15_common_early_init(void *handle) adev->external_rev_id = adev->rev_id + 0x14; break; case CHIP_VEGA20: + adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CGCG | @@ -704,6 +727,7 @@ static int soc15_common_early_init(void *handle) adev->external_rev_id = adev->rev_id + 0x28; break; case CHIP_RAVEN: + adev->asic_funcs = &soc15_asic_funcs; if (adev->rev_id >= 0x8) adev->external_rev_id = adev->rev_id + 0x81; else if (adev->pdev->device == 0x15d8) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index f8ad7804dc40..a66c8bfbbaa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); +void vega10_doorbell_index_init(struct amdgpu_device *adev); +void vega20_doorbell_index_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 958b10a57073..49c262540940 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -49,14 +49,19 @@ #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \ do { \ + uint32_t old_ = 0; \ uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ uint32_t loop = adev->usec_timeout; \ while ((tmp_ & (mask)) != (expected_value)) { \ - udelay(2); \ + if (old_ != tmp_) { \ + loop = adev->usec_timeout; \ + old_ = tmp_; \ + } else \ + udelay(1); \ tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ loop--; \ if (!loop) { \ - DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ + DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \ ret = -ETIMEDOUT; \ break; \ diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 3abffd06b5c7..15da06ddeb75 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -219,34 +219,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) } /** - * tonga_ih_prescreen_iv - prescreen an interrupt vector - * - * @adev: amdgpu_device pointer - * - * Returns true if the interrupt vector should be further processed. - */ -static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev) -{ - u32 ring_index = adev->irq.ih.rptr >> 2; - u16 pasid; - - switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) { - case 146: - case 147: - pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16; - if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid)) - return true; - break; - default: - /* Not a VM fault */ - return true; - } - - adev->irq.ih.rptr += 16; - return false; -} - -/** * tonga_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer @@ -322,7 +294,7 @@ static int tonga_ih_sw_init(void *handle) return r; adev->irq.ih.use_doorbell = true; - adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH; + adev->irq.ih.doorbell_index = adev->doorbell_index.ih; r = amdgpu_irq_init(adev); @@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = { static const struct amdgpu_ih_funcs tonga_ih_funcs = { .get_wptr = tonga_ih_get_wptr, - .prescreen_iv = tonga_ih_prescreen_iv, .decode_iv = tonga_ih_decode_iv, .set_rptr = tonga_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 90bbcee00f28..d69c8f6daaf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 1c5e12703103..ee8cd06ddc38 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index f184842ef2a2..d4f4a66f8324 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -400,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle) DRM_INFO("UVD ENC is disabled\n"); } - r = amdgpu_uvd_resume(adev); - if (r) - return r; - ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; + r = amdgpu_uvd_resume(adev); + if (r) + return r; + if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst->ring_enc[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 8a4595968d98..aef924026a28 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -430,16 +430,12 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - r = amdgpu_uvd_resume(adev); - if (r) - return r; - for (j = 0; j < adev->uvd.num_uvd_inst; j++) { if (adev->uvd.harvest_config & (1 << j)) continue; if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; - sprintf(ring->name, "uvd<%d>", j); + sprintf(ring->name, "uvd_%d", ring->me); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); if (r) return r; @@ -447,7 +443,7 @@ static int uvd_v7_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst[j].ring_enc[i]; - sprintf(ring->name, "uvd_enc%d<%d>", i, j); + sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; @@ -455,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle) * sriov, so set unused location for other unused rings. */ if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2; else - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; + ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); if (r) @@ -465,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle) } } + r = amdgpu_uvd_resume(adev); + if (r) + return r; + r = amdgpu_uvd_entity_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 3e84840859a7..2668effadd27 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -37,7 +37,6 @@ #include "gca/gfx_8_0_d.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" -#include "gca/gfx_8_0_d.h" #include "gca/gfx_8_0_sh_mask.h" #include "ivsrcid/ivsrcid_vislands30.h" diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 0054ba1b9a68..9fb34b7d8e03 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle) * so set unused location for other unused rings. */ if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2; + ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2; else - ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1; + ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index c1a03505f956..89bb2fef90eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr); +static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); /** * vcn_v1_0_early_init - set function pointers @@ -213,8 +214,9 @@ static int vcn_v1_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->vcn.ring_dec; - if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) - vcn_v1_0_stop(adev); + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || + RREG32_SOC15(VCN, 0, mmUVD_STATUS)) + vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); ring->sched.ready = false; @@ -1086,7 +1088,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - /* initialize wptr */ + /* initialize JPEG wptr */ + ring = &adev->vcn.ring_jpeg; ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); /* copy patch commands to the jpeg ring */ @@ -1158,21 +1161,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) { int ret_code = 0; + uint32_t tmp; /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); - if (!ret_code) { - int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; - /* wait for read ptr to be equal to write ptr */ - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, - UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); - } + tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); + + tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + + tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index a0fda6f9252a..2c250b01a903 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -220,90 +220,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) } /** - * vega10_ih_prescreen_iv - prescreen an interrupt vector - * - * @adev: amdgpu_device pointer - * - * Returns true if the interrupt vector should be further processed. - */ -static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) -{ - u32 ring_index = adev->irq.ih.rptr >> 2; - u32 dw0, dw3, dw4, dw5; - u16 pasid; - u64 addr, key; - struct amdgpu_vm *vm; - int r; - - dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); - dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); - dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]); - dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]); - - /* Filter retry page faults, let only the first one pass. If - * there are too many outstanding faults, ignore them until - * some faults get cleared. - */ - switch (dw0 & 0xff) { - case SOC15_IH_CLIENTID_VMC: - case SOC15_IH_CLIENTID_UTCL2: - break; - default: - /* Not a VM fault */ - return true; - } - - pasid = dw3 & 0xffff; - /* No PASID, can't identify faulting process */ - if (!pasid) - return true; - - /* Not a retry fault, check fault credit */ - if (!(dw5 & 0x80)) { - if (!amdgpu_vm_pasid_fault_credit(adev, pasid)) - goto ignore_iv; - return true; - } - - /* Track retry faults in per-VM fault FIFO. */ - spin_lock(&adev->vm_manager.pasid_lock); - vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12); - key = AMDGPU_VM_FAULT(pasid, addr); - if (!vm) { - /* VM not found, process it normally */ - spin_unlock(&adev->vm_manager.pasid_lock); - return true; - } else { - r = amdgpu_vm_add_fault(vm->fault_hash, key); - - /* Hash table is full or the fault is already being processed, - * ignore further page faults - */ - if (r != 0) { - spin_unlock(&adev->vm_manager.pasid_lock); - goto ignore_iv; - } - } - /* No locking required with single writer and single reader */ - r = kfifo_put(&vm->faults, key); - if (!r) { - /* FIFO is full. Ignore it until there is space */ - amdgpu_vm_clear_fault(vm->fault_hash, key); - spin_unlock(&adev->vm_manager.pasid_lock); - goto ignore_iv; - } - - spin_unlock(&adev->vm_manager.pasid_lock); - /* It's the first fault for this address, process it normally */ - return true; - -ignore_iv: - adev->irq.ih.rptr += 32; - return false; -} - -/** * vega10_ih_decode_iv - decode an interrupt vector * * @adev: amdgpu_device pointer @@ -385,7 +301,7 @@ static int vega10_ih_sw_init(void *handle) return r; adev->irq.ih.use_doorbell = true; - adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1; + adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; r = amdgpu_irq_init(adev); @@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega10_ih_funcs = { .get_wptr = vega10_ih_get_wptr, - .prescreen_iv = vega10_ih_prescreen_iv, .decode_iv = vega10_ih_decode_iv, .set_rptr = vega10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c index c5c9b2bc190d..422674bb3cdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c @@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev) return 0; } +void vega10_doorbell_index_init(struct amdgpu_device *adev) +{ + adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ; + adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0; + adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1; + adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2; + adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3; + adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4; + adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5; + adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6; + adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7; + adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START; + adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END; + adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0; + adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0; + adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1; + adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH; + adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1; + adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3; + adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5; + adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7; + adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1; + adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3; + adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5; + adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7; + /* In unit of dword doorbell */ + adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1; +} diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index d13fc4fcb517..edce413fda9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -54,4 +54,37 @@ int vega20_reg_base_init(struct amdgpu_device *adev) return 0; } +void vega20_doorbell_index_init(struct amdgpu_device *adev) +{ + adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ; + adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0; + adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1; + adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2; + adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3; + adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4; + adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5; + adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6; + adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7; + adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START; + adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END; + adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0; + adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0; + adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1; + adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2; + adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3; + adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4; + adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5; + adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6; + adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7; + adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH; + adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1; + adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3; + adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5; + adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7; + adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1; + adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3; + adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5; + adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7; + adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1; +} diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 07880d35e9de..77e367459101 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -87,9 +87,9 @@ static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) u32 r; spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(mmPCIE_INDEX, reg); - (void)RREG32(mmPCIE_INDEX); - r = RREG32(mmPCIE_DATA); + WREG32_NO_KIQ(mmPCIE_INDEX, reg); + (void)RREG32_NO_KIQ(mmPCIE_INDEX); + r = RREG32_NO_KIQ(mmPCIE_DATA); spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); return r; } @@ -99,10 +99,10 @@ static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) unsigned long flags; spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(mmPCIE_INDEX, reg); - (void)RREG32(mmPCIE_INDEX); - WREG32(mmPCIE_DATA, v); - (void)RREG32(mmPCIE_DATA); + WREG32_NO_KIQ(mmPCIE_INDEX, reg); + (void)RREG32_NO_KIQ(mmPCIE_INDEX); + WREG32_NO_KIQ(mmPCIE_DATA, v); + (void)RREG32_NO_KIQ(mmPCIE_DATA); spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } @@ -123,8 +123,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) unsigned long flags; spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_11, (reg)); - WREG32(mmSMC_IND_DATA_11, (v)); + WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); + WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } @@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .flush_hdp = &vi_flush_hdp, .invalidate_hdp = &vi_invalidate_hdp, .need_full_reset = &vi_need_full_reset, + .init_doorbell_index = &legacy_doorbell_index_init, }; #define CZ_REV_BRISTOL(rev) \ @@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) return 0; } + +void legacy_doorbell_index_init(struct amdgpu_device *adev) +{ + adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; + adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; + adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; + adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; + adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; + adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; + adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; + adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; + adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; + adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; + adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0; + adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1; + adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; + adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; +} diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 0429fe332269..8de0772f986c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int vi_set_ip_blocks(struct amdgpu_device *adev); +void legacy_doorbell_index_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5f4062b41add..083bd8114db1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -33,6 +33,7 @@ #include <linux/time.h> #include <linux/mm.h> #include <linux/mman.h> +#include <linux/dma-buf.h> #include <asm/processor.h> #include "kfd_priv.h" #include "kfd_device_queue_manager.h" @@ -157,8 +158,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, } if ((args->ring_base_address) && - (!access_ok(VERIFY_WRITE, - (const void __user *) args->ring_base_address, + (!access_ok((const void __user *) args->ring_base_address, sizeof(uint64_t)))) { pr_err("Can't access ring base address\n"); return -EFAULT; @@ -169,31 +169,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, return -EINVAL; } - if (!access_ok(VERIFY_WRITE, - (const void __user *) args->read_pointer_address, + if (!access_ok((const void __user *) args->read_pointer_address, sizeof(uint32_t))) { pr_err("Can't access read pointer\n"); return -EFAULT; } - if (!access_ok(VERIFY_WRITE, - (const void __user *) args->write_pointer_address, + if (!access_ok((const void __user *) args->write_pointer_address, sizeof(uint32_t))) { pr_err("Can't access write pointer\n"); return -EFAULT; } if (args->eop_buffer_address && - !access_ok(VERIFY_WRITE, - (const void __user *) args->eop_buffer_address, + !access_ok((const void __user *) args->eop_buffer_address, sizeof(uint32_t))) { pr_debug("Can't access eop buffer"); return -EFAULT; } if (args->ctx_save_restore_address && - !access_ok(VERIFY_WRITE, - (const void __user *) args->ctx_save_restore_address, + !access_ok((const void __user *) args->ctx_save_restore_address, sizeof(uint32_t))) { pr_debug("Can't access ctx save restore buffer"); return -EFAULT; @@ -364,8 +360,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, } if ((args->ring_base_address) && - (!access_ok(VERIFY_WRITE, - (const void __user *) args->ring_base_address, + (!access_ok((const void __user *) args->ring_base_address, sizeof(uint64_t)))) { pr_err("Can't access ring base address\n"); return -EFAULT; @@ -1273,6 +1268,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, return -EINVAL; } + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { + if (args->size != kfd_doorbell_process_slice(dev)) + return -EINVAL; + offset = kfd_get_process_doorbells(dev, p); + } + mutex_lock(&p->mutex); pdd = kfd_bind_process_to_device(dev, p); @@ -1550,6 +1551,115 @@ copy_from_user_failed: return err; } +static int kfd_ioctl_get_dmabuf_info(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_get_dmabuf_info_args *args = data; + struct kfd_dev *dev = NULL; + struct kgd_dev *dma_buf_kgd; + void *metadata_buffer = NULL; + uint32_t flags; + unsigned int i; + int r; + + /* Find a KFD GPU device that supports the get_dmabuf_info query */ + for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++) + if (dev) + break; + if (!dev) + return -EINVAL; + + if (args->metadata_ptr) { + metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL); + if (!metadata_buffer) + return -ENOMEM; + } + + /* Get dmabuf info from KGD */ + r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd, + &dma_buf_kgd, &args->size, + metadata_buffer, args->metadata_size, + &args->metadata_size, &flags); + if (r) + goto exit; + + /* Reverse-lookup gpu_id from kgd pointer */ + dev = kfd_device_by_kgd(dma_buf_kgd); + if (!dev) { + r = -EINVAL; + goto exit; + } + args->gpu_id = dev->id; + args->flags = flags; + + /* Copy metadata buffer to user mode */ + if (metadata_buffer) { + r = copy_to_user((void __user *)args->metadata_ptr, + metadata_buffer, args->metadata_size); + if (r != 0) + r = -EFAULT; + } + +exit: + kfree(metadata_buffer); + + return r; +} + +static int kfd_ioctl_import_dmabuf(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_import_dmabuf_args *args = data; + struct kfd_process_device *pdd; + struct dma_buf *dmabuf; + struct kfd_dev *dev; + int idr_handle; + uint64_t size; + void *mem; + int r; + + dev = kfd_device_by_id(args->gpu_id); + if (!dev) + return -EINVAL; + + dmabuf = dma_buf_get(args->dmabuf_fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + mutex_lock(&p->mutex); + + pdd = kfd_bind_process_to_device(dev, p); + if (IS_ERR(pdd)) { + r = PTR_ERR(pdd); + goto err_unlock; + } + + r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf, + args->va_addr, pdd->vm, + (struct kgd_mem **)&mem, &size, + NULL); + if (r) + goto err_unlock; + + idr_handle = kfd_process_device_create_obj_handle(pdd, mem); + if (idr_handle < 0) { + r = -EFAULT; + goto err_free; + } + + mutex_unlock(&p->mutex); + + args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); + + return 0; + +err_free: + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); +err_unlock: + mutex_unlock(&p->mutex); + return r; +} + #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ .cmd_drv = 0, .name = #ioctl} @@ -1635,7 +1745,13 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { kfd_ioctl_set_cu_mask, 0), AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE, - kfd_ioctl_get_queue_wave_state, 0) + kfd_ioctl_get_queue_wave_state, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO, + kfd_ioctl_get_dmabuf_info, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF, + kfd_ioctl_import_dmabuf, 0), }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 3783d122f283..b7bc7d7d048f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -133,6 +133,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { #define fiji_cache_info carrizo_cache_info #define polaris10_cache_info carrizo_cache_info #define polaris11_cache_info carrizo_cache_info +#define polaris12_cache_info carrizo_cache_info /* TODO - check & update Vega10 cache details */ #define vega10_cache_info carrizo_cache_info #define raven_cache_info carrizo_cache_info @@ -647,7 +648,12 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = polaris11_cache_info; num_of_cache_types = ARRAY_SIZE(polaris11_cache_info); break; + case CHIP_POLARIS12: + pcache_info = polaris12_cache_info; + num_of_cache_types = ARRAY_SIZE(polaris12_cache_info); + break; case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: pcache_info = vega10_cache_info; num_of_cache_types = ARRAY_SIZE(vega10_cache_info); @@ -847,7 +853,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, */ pgdat = NODE_DATA(numa_node_id); for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) - mem_in_bytes += pgdat->node_zones[zone_type].managed_pages; + mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]); mem_in_bytes <<= PAGE_SHIFT; sub_type_hdr->length_low = lower_32_bits(mem_in_bytes); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index c004647c8cb4..8be9677c0c07 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -205,6 +205,22 @@ static const struct kfd_device_info polaris11_device_info = { .num_sdma_queues_per_engine = 2, }; +static const struct kfd_device_info polaris12_device_info = { + .asic_family = CHIP_POLARIS12, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 4, + .ih_ring_entry_size = 4 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_cik, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = true, + .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, +}; + static const struct kfd_device_info vega10_device_info = { .asic_family = CHIP_VEGA10, .max_pasid_bits = 16, @@ -237,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = { .num_sdma_queues_per_engine = 2, }; +static const struct kfd_device_info vega12_device_info = { + .asic_family = CHIP_VEGA12, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, +}; + static const struct kfd_device_info vega20_device_info = { .asic_family = CHIP_VEGA20, .max_pasid_bits = 16, @@ -331,6 +363,14 @@ static const struct kfd_deviceid supported_devices[] = { { 0x67EB, &polaris11_device_info }, /* Polaris11 */ { 0x67EF, &polaris11_device_info }, /* Polaris11 */ { 0x67FF, &polaris11_device_info }, /* Polaris11 */ + { 0x6980, &polaris12_device_info }, /* Polaris12 */ + { 0x6981, &polaris12_device_info }, /* Polaris12 */ + { 0x6985, &polaris12_device_info }, /* Polaris12 */ + { 0x6986, &polaris12_device_info }, /* Polaris12 */ + { 0x6987, &polaris12_device_info }, /* Polaris12 */ + { 0x6995, &polaris12_device_info }, /* Polaris12 */ + { 0x6997, &polaris12_device_info }, /* Polaris12 */ + { 0x699F, &polaris12_device_info }, /* Polaris12 */ { 0x6860, &vega10_device_info }, /* Vega10 */ { 0x6861, &vega10_device_info }, /* Vega10 */ { 0x6862, &vega10_device_info }, /* Vega10 */ @@ -338,12 +378,24 @@ static const struct kfd_deviceid supported_devices[] = { { 0x6864, &vega10_device_info }, /* Vega10 */ { 0x6867, &vega10_device_info }, /* Vega10 */ { 0x6868, &vega10_device_info }, /* Vega10 */ + { 0x6869, &vega10_device_info }, /* Vega10 */ + { 0x686A, &vega10_device_info }, /* Vega10 */ + { 0x686B, &vega10_device_info }, /* Vega10 */ { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ + { 0x686D, &vega10_device_info }, /* Vega10 */ + { 0x686E, &vega10_device_info }, /* Vega10 */ + { 0x686F, &vega10_device_info }, /* Vega10 */ { 0x687F, &vega10_device_info }, /* Vega10 */ + { 0x69A0, &vega12_device_info }, /* Vega12 */ + { 0x69A1, &vega12_device_info }, /* Vega12 */ + { 0x69A2, &vega12_device_info }, /* Vega12 */ + { 0x69A3, &vega12_device_info }, /* Vega12 */ + { 0x69AF, &vega12_device_info }, /* Vega12 */ { 0x66a0, &vega20_device_info }, /* Vega20 */ { 0x66a1, &vega20_device_info }, /* Vega20 */ { 0x66a2, &vega20_device_info }, /* Vega20 */ { 0x66a3, &vega20_device_info }, /* Vega20 */ + { 0x66a4, &vega20_device_info }, /* Vega20 */ { 0x66a7, &vega20_device_info }, /* Vega20 */ { 0x66af, &vega20_device_info } /* Vega20 */ }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index fb9d66ea13b7..8372556b52eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1547,7 +1547,7 @@ static int get_wave_state(struct device_queue_manager *dqm, u32 *ctl_stack_used_size, u32 *save_area_used_size) { - struct mqd_manager *mqd; + struct mqd_manager *mqd_mgr; int r; dqm_lock(dqm); @@ -1558,19 +1558,19 @@ static int get_wave_state(struct device_queue_manager *dqm, goto dqm_unlock; } - mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); - if (!mqd) { + mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + if (!mqd_mgr) { r = -ENOMEM; goto dqm_unlock; } - if (!mqd->get_wave_state) { + if (!mqd_mgr->get_wave_state) { r = -EINVAL; goto dqm_unlock; } - r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size, - save_area_used_size); + r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, + ctl_stack_used_size, save_area_used_size); dqm_unlock: dqm_unlock(dqm); @@ -1741,10 +1741,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case CHIP_FIJI: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: device_queue_manager_init_vi_tonga(&dqm->asic_ops); break; case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: device_queue_manager_init_v9(&dqm->asic_ops); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index fd60a116be37..c3a5dcfe877a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -24,7 +24,6 @@ #include "kfd_device_queue_manager.h" #include "gca/gfx_8_0_enum.h" #include "gca/gfx_8_0_sh_mask.h" -#include "gca/gfx_8_0_enum.h" #include "oss/oss_3_0_sh_mask.h" static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 3d66cec414af..213ea5454d11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -397,9 +397,11 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_FIJI: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: kfd_init_apertures_vi(pdd, id); break; case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: kfd_init_apertures_v9(pdd, id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index f836897bbf58..a85904ad0d5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -23,7 +23,7 @@ #include "kfd_priv.h" #include "kfd_events.h" #include "soc15_int.h" - +#include "kfd_device_queue_manager.h" static bool event_interrupt_isr_v9(struct kfd_dev *dev, const uint32_t *ih_ring_entry, @@ -39,20 +39,39 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, vmid > dev->vm_info.last_vmid_kfd) return 0; - /* If there is no valid PASID, it's likely a firmware bug */ - pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); - if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt")) - return 0; - source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + + /* This is a known issue for gfx9. Under non HWS, pasid is not set + * in the interrupt payload, so we need to find out the pasid on our + * own. + */ + if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + const uint32_t pasid_mask = 0xffff; - pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n", - client_id, source_id, pasid); + *patched_flag = true; + memcpy(patched_ihre, ih_ring_entry, + dev->device_info->ih_ring_entry_size); + + pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid( + dev->kgd, vmid); + + /* Patch the pasid field */ + patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3]) + & ~pasid_mask) | pasid); + } + + pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", + client_id, source_id, vmid, pasid); pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]); + /* If there is no valid PASID, it's likely a bug */ + if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + return 0; + /* Interrupt types we care about: various signals and faults. * They will be forwarded to a work queue (see below). */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 6c31f7370193..f1596881f20a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -313,6 +313,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, case CHIP_FIJI: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: kernel_queue_init_vi(&kq->ops_asic_specific); break; @@ -322,6 +323,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, break; case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: kernel_queue_init_v9(&kq->ops_asic_specific); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 6910028010d6..aed9b9b82213 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -38,8 +38,10 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, case CHIP_FIJI: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: return mqd_manager_init_vi_tonga(type, dev); case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: return mqd_manager_init_v9(type, dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index c6080ed3b6a7..045a229436a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -226,9 +226,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_FIJI: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: pm->pmf = &kfd_vi_pm_funcs; break; case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: pm->pmf = &kfd_v9_pm_funcs; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index dec8e64f36bd..0689d4ccbbc0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -793,6 +793,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); +struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c73b4ff61f99..5f5b2acedbac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -101,7 +101,25 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) down_read(&topology_lock); list_for_each_entry(top_dev, &topology_device_list, list) - if (top_dev->gpu->pdev == pdev) { + if (top_dev->gpu && top_dev->gpu->pdev == pdev) { + device = top_dev->gpu; + break; + } + + up_read(&topology_lock); + + return device; +} + +struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd) +{ + struct kfd_topology_device *top_dev; + struct kfd_dev *device = NULL; + + down_read(&topology_lock); + + list_for_each_entry(top_dev, &topology_device_list, list) + if (top_dev->gpu && top_dev->gpu->kgd == kgd) { device = top_dev->gpu; break; } @@ -1272,12 +1290,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu) case CHIP_FIJI: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: pr_debug("Adding doorbell packet type capability\n"); dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 << HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; case CHIP_VEGA10: + case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index c97dc9613325..cfde1568c79a 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -32,11 +32,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 -DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet +DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d8d0b206a79c..8a626d16e8e3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -23,6 +23,9 @@ * */ +/* The caprices of the preprocessor require that this be declared right here */ +#define CREATE_TRACE_POINTS + #include "dm_services_types.h" #include "dc.h" #include "dc/inc/core_types.h" @@ -38,7 +41,6 @@ #include "amd_shared.h" #include "amdgpu_dm_irq.h" #include "dm_helpers.h" -#include "dm_services_types.h" #include "amdgpu_dm_mst_types.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" @@ -55,6 +57,7 @@ #include <drm/drmP.h> #include <drm/drm_atomic.h> +#include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> #include <drm/drm_fb_helper.h> @@ -72,6 +75,8 @@ #endif #include "modules/inc/mod_freesync.h" +#include "modules/power/power_helpers.h" +#include "modules/inc/mod_info_packet.h" #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); @@ -129,6 +134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); +static void handle_cursor_update(struct drm_plane *plane, + struct drm_plane_state *old_plane_state); @@ -324,12 +331,29 @@ static void dm_crtc_high_irq(void *interrupt_params) struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; struct amdgpu_crtc *acrtc; + struct dm_crtc_state *acrtc_state; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); if (acrtc) { drm_crtc_handle_vblank(&acrtc->base); amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + + acrtc_state = to_dm_crtc_state(acrtc->base.state); + + if (acrtc_state->stream && + acrtc_state->vrr_params.supported && + acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc_state->stream, + &acrtc_state->vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc_state->stream, + &acrtc_state->vrr_params.adjust); + } } } @@ -398,6 +422,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); + mutex_init(&adev->dm.dc_lock); + if(amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); goto error; @@ -512,6 +538,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); + + mutex_destroy(&adev->dm.dc_lock); + return; } @@ -643,6 +672,26 @@ static int dm_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct dmcu_iram_parameters params; + unsigned int linear_lut[16]; + int i; + struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; + bool ret; + + for (i = 0; i < 16; i++) + linear_lut[i] = 0xFFFF * i / 15; + + params.set = 0; + params.backlight_ramping_start = 0xCCCC; + params.backlight_ramping_reduction = 0xCCCCCCCC; + params.backlight_lut_array_size = 16; + params.backlight_lut_array = linear_lut; + + ret = dmcu_load_iram(dmcu, params); + + if (!ret) + return -EINVAL; + return detect_mst_link_for_all_connectors(adev->ddev); } @@ -969,45 +1018,6 @@ const struct amdgpu_ip_block_version dm_ip_block = }; -static struct drm_atomic_state * -dm_atomic_state_alloc(struct drm_device *dev) -{ - struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); - - if (!state) - return NULL; - - if (drm_atomic_state_init(dev, &state->base) < 0) - goto fail; - - return &state->base; - -fail: - kfree(state); - return NULL; -} - -static void -dm_atomic_state_clear(struct drm_atomic_state *state) -{ - struct dm_atomic_state *dm_state = to_dm_atomic_state(state); - - if (dm_state->context) { - dc_release_state(dm_state->context); - dm_state->context = NULL; - } - - drm_atomic_state_default_clear(state); -} - -static void -dm_atomic_state_alloc_free(struct drm_atomic_state *state) -{ - struct dm_atomic_state *dm_state = to_dm_atomic_state(state); - drm_atomic_state_default_release(state); - kfree(dm_state); -} - /** * DOC: atomic * @@ -1019,9 +1029,6 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = amdgpu_dm_atomic_check, .atomic_commit = amdgpu_dm_atomic_commit, - .atomic_state_alloc = dm_atomic_state_alloc, - .atomic_state_clear = dm_atomic_state_clear, - .atomic_state_free = dm_atomic_state_alloc_free }; static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { @@ -1543,8 +1550,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) } #endif +/* + * Acquires the lock for the atomic state object and returns + * the new atomic state. + * + * This should only be called during atomic check. + */ +static int dm_atomic_get_state(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_private_state *priv_state; + int ret; + + if (*dm_state) + return 0; + + ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx); + if (ret) + return ret; + + priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + + *dm_state = to_dm_atomic_state(priv_state); + + return 0; +} + +struct dm_atomic_state * +dm_atomic_get_new_state(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_private_obj *obj; + struct drm_private_state *new_obj_state; + int i; + + for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { + if (obj->funcs == dm->atomic_obj.funcs) + return to_dm_atomic_state(new_obj_state); + } + + return NULL; +} + +struct dm_atomic_state * +dm_atomic_get_old_state(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_private_obj *obj; + struct drm_private_state *old_obj_state; + int i; + + for_each_old_private_obj_in_state(state, obj, old_obj_state, i) { + if (obj->funcs == dm->atomic_obj.funcs) + return to_dm_atomic_state(old_obj_state); + } + + return NULL; +} + +static struct drm_private_state * +dm_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct dm_atomic_state *old_state, *new_state; + + new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); + if (!new_state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); + + new_state->context = dc_create_state(); + if (!new_state->context) { + kfree(new_state); + return NULL; + } + + old_state = to_dm_atomic_state(obj->state); + if (old_state && old_state->context) + dc_resource_state_copy_construct(old_state->context, + new_state->context); + + return &new_state->base; +} + +static void dm_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dm_atomic_state *dm_state = to_dm_atomic_state(state); + + if (dm_state && dm_state->context) + dc_release_state(dm_state->context); + + kfree(dm_state); +} + +static struct drm_private_state_funcs dm_atomic_state_funcs = { + .atomic_duplicate_state = dm_atomic_duplicate_state, + .atomic_destroy_state = dm_atomic_destroy_state, +}; + static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) { + struct dm_atomic_state *state; int r; adev->mode_info.mode_config_initialized = true; @@ -1562,6 +1678,25 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev->ddev->mode_config.fb_base = adev->gmc.aper_base; + drm_modeset_lock_init(&adev->dm.atomic_obj_lock); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->context = dc_create_state(); + if (!state->context) { + kfree(state); + return -ENOMEM; + } + + dc_resource_state_copy_construct_current(adev->dm.dc, state->context); + + drm_atomic_private_obj_init(adev->ddev, + &adev->dm.atomic_obj, + &state->base, + &dm_atomic_state_funcs); + r = amdgpu_display_modeset_create_props(adev); if (r) return r; @@ -1569,27 +1704,60 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) return 0; } +#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 +#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 + #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) +static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) +{ +#if defined(CONFIG_ACPI) + struct amdgpu_dm_backlight_caps caps; + + if (dm->backlight_caps.caps_valid) + return; + + amdgpu_acpi_get_backlight_caps(dm->adev, &caps); + if (caps.caps_valid) { + dm->backlight_caps.min_input_signal = caps.min_input_signal; + dm->backlight_caps.max_input_signal = caps.max_input_signal; + dm->backlight_caps.caps_valid = true; + } else { + dm->backlight_caps.min_input_signal = + AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + dm->backlight_caps.max_input_signal = + AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + } +#else + dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; +#endif +} + static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); + struct amdgpu_dm_backlight_caps caps; + uint32_t brightness = bd->props.brightness; - /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer - * and 16 bit fractional, where 1.0 is max backlight value. - * bd->props.brightness is 8 bit format and needs to be converted by - * scaling via copy lower byte to upper byte of 16 bit value. - */ - uint32_t brightness = bd->props.brightness * 0x101; - + amdgpu_dm_update_backlight_caps(dm); + caps = dm->backlight_caps; /* - * PWM interperts 0 as 100% rather than 0% because of HW - * limitation for level 0. So limiting minimum brightness level - * to 1. + * The brightness input is in the range 0-255 + * It needs to be rescaled to be between the + * requested min and max input signal + * + * It also needs to be scaled up by 0x101 to + * match the DC interface which has a range of + * 0 to 0xffff */ - if (bd->props.brightness < 1) - brightness = 0x101; + brightness = + brightness + * 0x101 + * (caps.max_input_signal - caps.min_input_signal) + / AMDGPU_MAX_BL_LEVEL + + caps.min_input_signal * 0x101; if (dc_link_set_backlight_level(dm->backlight_link, brightness, 0, 0)) @@ -1619,6 +1787,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) char bl_name[16]; struct backlight_properties props = { 0 }; + amdgpu_dm_update_backlight_caps(dm); + props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; @@ -1850,6 +2020,7 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { drm_mode_config_cleanup(dm->ddev); + drm_atomic_private_obj_fini(&dm->atomic_obj); return; } @@ -1869,73 +2040,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev) /* TODO: implement later */ } -static int amdgpu_notify_freesync(struct drm_device *dev, void *data, - struct drm_file *filp) -{ - struct drm_atomic_state *state; - struct drm_modeset_acquire_ctx ctx; - struct drm_crtc *crtc; - struct drm_connector *connector; - struct drm_connector_state *old_con_state, *new_con_state; - int ret = 0; - uint8_t i; - bool enable = false; - - drm_modeset_acquire_init(&ctx, 0); - - state = drm_atomic_state_alloc(dev); - if (!state) { - ret = -ENOMEM; - goto out; - } - state->acquire_ctx = &ctx; - -retry: - drm_for_each_crtc(crtc, dev) { - ret = drm_atomic_add_affected_connectors(state, crtc); - if (ret) - goto fail; - - /* TODO rework amdgpu_dm_commit_planes so we don't need this */ - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - goto fail; - } - - for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { - struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); - struct drm_crtc_state *new_crtc_state; - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dm_crtc_state *dm_new_crtc_state; - - if (!acrtc) { - ASSERT(0); - continue; - } - - new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - - dm_new_crtc_state->freesync_enabled = enable; - } - - ret = drm_atomic_commit(state); - -fail: - if (ret == -EDEADLK) { - drm_atomic_state_clear(state); - drm_modeset_backoff(&ctx); - goto retry; - } - - drm_atomic_state_put(state); - -out: - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - return ret; -} - static const struct amdgpu_display_funcs dm_display_funcs = { .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ @@ -1948,8 +2052,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = { dm_crtc_get_scanoutpos,/* called unconditionally */ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ .add_connector = NULL, /* VBIOS parsing. DAL does it. */ - .notify_freesync = amdgpu_notify_freesync, - }; #if defined(CONFIG_DEBUG_KERNEL_DC) @@ -2550,7 +2652,8 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ static void fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, const struct drm_display_mode *mode_in, - const struct drm_connector *connector) + const struct drm_connector *connector, + const struct dc_stream_state *old_stream) { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; @@ -2576,7 +2679,18 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, connector); timing_out->scan_type = SCANNING_TYPE_NODATA; timing_out->hdmi_vic = 0; - timing_out->vic = drm_match_cea_mode(mode_in); + + if(old_stream) { + timing_out->vic = old_stream->timing.vic; + timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; + timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; + } else { + timing_out->vic = drm_match_cea_mode(mode_in); + if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) + timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; + if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) + timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; + } timing_out->h_addressable = mode_in->crtc_hdisplay; timing_out->h_total = mode_in->crtc_htotal; @@ -2592,10 +2706,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; timing_out->pix_clk_khz = mode_in->crtc_clock; timing_out->aspect_ratio = get_aspect_ratio(mode_in); - if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) - timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; - if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) - timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; stream->output_color_space = get_output_color_space(timing_out); @@ -2618,9 +2728,9 @@ static void fill_audio_info(struct audio_info *audio_info, cea_revision = drm_connector->display_info.cea_rev; - strncpy(audio_info->display_name, + strscpy(audio_info->display_name, edid_caps->display_name, - AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1); + AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); if (cea_revision >= 3) { audio_info->mode_count = edid_caps->audio_mode_count; @@ -2758,13 +2868,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) static struct dc_stream_state * create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, - const struct dm_connector_state *dm_state) + const struct dm_connector_state *dm_state, + const struct dc_stream_state *old_stream) { struct drm_display_mode *preferred_mode = NULL; struct drm_connector *drm_connector; struct dc_stream_state *stream = NULL; struct drm_display_mode mode = *drm_mode; bool native_mode_found = false; + bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; + int mode_refresh; + int preferred_refresh = 0; + struct dc_sink *sink = NULL; if (aconnector == NULL) { DRM_ERROR("aconnector is NULL!\n"); @@ -2803,6 +2918,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct drm_display_mode, head); + mode_refresh = drm_mode_vrefresh(&mode); + if (preferred_mode == NULL) { /* * This may not be an error, the use case is when we have no @@ -2815,13 +2932,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, dm_state ? (dm_state->scaling != RMX_OFF) : false); + preferred_refresh = drm_mode_vrefresh(preferred_mode); } if (!dm_state) drm_mode_set_crtcinfo(&mode, 0); - fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base); + /* + * If scaling is enabled and refresh rate didn't change + * we copy the vic and polarities of the old timings + */ + if (!scale || mode_refresh != preferred_refresh) + fill_stream_properties_from_drm_display_mode(stream, + &mode, &aconnector->base, NULL); + else + fill_stream_properties_from_drm_display_mode(stream, + &mode, &aconnector->base, old_stream); + update_stream_scaling_settings(&mode, dm_state, stream); fill_audio_info( @@ -2833,6 +2960,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (dm_state && dm_state->freesync_capable) stream->ignore_msa_timing_param = true; + finish: if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) dc_sink_release(sink); @@ -2899,9 +3027,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) dc_stream_retain(state->stream); } - state->adjust = cur->adjust; + state->vrr_params = cur->vrr_params; state->vrr_infopacket = cur->vrr_infopacket; - state->freesync_enabled = cur->freesync_enabled; + state->abm_level = cur->abm_level; + state->vrr_supported = cur->vrr_supported; + state->freesync_config = cur->freesync_config; + state->crc_enabled = cur->crc_enabled; /* TODO Duplicate dc_stream after objects are stream object is flattened */ @@ -3017,6 +3148,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.max_bpc_property) { dm_new_state->max_bpc = val; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + dm_new_state->abm_level = val; + ret = 0; } return ret; @@ -3062,7 +3196,11 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.max_bpc_property) { *val = dm_state->max_bpc; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + *val = dm_state->abm_level; + ret = 0; } + return ret; } @@ -3106,6 +3244,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_enable = false; state->underscan_hborder = 0; state->underscan_vborder = 0; + state->max_bpc = 8; __drm_atomic_helper_connector_reset(connector, &state->base); } @@ -3126,7 +3265,12 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); new_state->freesync_capable = state->freesync_capable; - new_state->freesync_enable = state->freesync_enable; + new_state->abm_level = state->abm_level; + new_state->scaling = state->scaling; + new_state->underscan_enable = state->underscan_enable; + new_state->underscan_hborder = state->underscan_hborder; + new_state->underscan_vborder = state->underscan_vborder; + new_state->max_bpc = state->max_bpc; return &new_state->base; } @@ -3228,7 +3372,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec goto fail; } - stream = create_stream_for_sink(aconnector, mode, NULL); + stream = create_stream_for_sink(aconnector, mode, NULL, NULL); if (stream == NULL) { DRM_ERROR("Failed to create stream for sink!\n"); goto fail; @@ -3499,10 +3643,53 @@ static int dm_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } +static int dm_plane_atomic_async_check(struct drm_plane *plane, + struct drm_plane_state *new_plane_state) +{ + struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(new_plane_state->state, plane); + + /* Only support async updates on cursor planes. */ + if (plane->type != DRM_PLANE_TYPE_CURSOR) + return -EINVAL; + + /* + * DRM calls prepare_fb and cleanup_fb on new_plane_state for + * async commits so don't allow fb changes. + */ + if (old_plane_state->fb != new_plane_state->fb) + return -EINVAL; + + return 0; +} + +static void dm_plane_atomic_async_update(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_plane_state *old_state = + drm_atomic_get_old_plane_state(new_state->state, plane); + + if (plane->state->fb != new_state->fb) + drm_atomic_set_fb_for_plane(plane->state, new_state->fb); + + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_w = new_state->src_w; + plane->state->src_h = new_state->src_h; + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_w = new_state->crtc_w; + plane->state->crtc_h = new_state->crtc_h; + + handle_cursor_update(plane, old_state); +} + static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { .prepare_fb = dm_plane_helper_prepare_fb, .cleanup_fb = dm_plane_helper_cleanup_fb, .atomic_check = dm_plane_atomic_check, + .atomic_async_check = dm_plane_atomic_async_check, + .atomic_async_update = dm_plane_atomic_async_update }; /* @@ -3716,7 +3903,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder, mode->hdisplay = hdisplay; mode->vdisplay = vdisplay; mode->type &= ~DRM_MODE_TYPE_PREFERRED; - strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); + strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); return mode; @@ -3876,6 +4063,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, adev->mode_info.max_bpc_property, 0); + if (connector_type == DRM_MODE_CONNECTOR_eDP && + dc_is_dmcu_initialized(adev->dm.dc)) { + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.abm_level_property, 0); + } + + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + drm_connector_attach_vrr_capable_property( + &aconnector->base); + } } static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, @@ -4180,6 +4378,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, static void handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state) { + struct amdgpu_device *adev = plane->dev->dev_private; struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; @@ -4204,9 +4403,12 @@ static void handle_cursor_update(struct drm_plane *plane, if (!position.enable) { /* turn off cursor */ - if (crtc_state && crtc_state->stream) + if (crtc_state && crtc_state->stream) { + mutex_lock(&adev->dm.dc_lock); dc_stream_set_cursor_position(crtc_state->stream, &position); + mutex_unlock(&adev->dm.dc_lock); + } return; } @@ -4224,6 +4426,7 @@ static void handle_cursor_update(struct drm_plane *plane, attributes.pitch = attributes.width; if (crtc_state->stream) { + mutex_lock(&adev->dm.dc_lock); if (!dc_stream_set_cursor_attributes(crtc_state->stream, &attributes)) DRM_ERROR("DC failed to set cursor attributes\n"); @@ -4231,6 +4434,7 @@ static void handle_cursor_update(struct drm_plane *plane, if (!dc_stream_set_cursor_position(crtc_state->stream, &position)) DRM_ERROR("DC failed to set cursor position\n"); + mutex_unlock(&adev->dm.dc_lock); } } @@ -4252,6 +4456,102 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc) acrtc->crtc_id); } +struct dc_stream_status *dc_state_get_stream_status( + struct dc_state *state, + struct dc_stream_state *stream) +{ + uint8_t i; + + for (i = 0; i < state->stream_count; i++) { + if (stream == state->streams[i]) + return &state->stream_status[i]; + } + + return NULL; +} + +static void update_freesync_state_on_stream( + struct amdgpu_display_manager *dm, + struct dm_crtc_state *new_crtc_state, + struct dc_stream_state *new_stream, + struct dc_plane_state *surface, + u32 flip_timestamp_in_us) +{ + struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; + struct dc_info_packet vrr_infopacket = {0}; + struct mod_freesync_config config = new_crtc_state->freesync_config; + + if (!new_stream) + return; + + /* + * TODO: Determine why min/max totals and vrefresh can be 0 here. + * For now it's sufficient to just guard against these conditions. + */ + + if (!new_stream->timing.h_total || !new_stream->timing.v_total) + return; + + if (new_crtc_state->vrr_supported && + config.min_refresh_in_uhz && + config.max_refresh_in_uhz) { + config.state = new_crtc_state->base.vrr_enabled ? + VRR_STATE_ACTIVE_VARIABLE : + VRR_STATE_INACTIVE; + } else { + config.state = VRR_STATE_UNSUPPORTED; + } + + mod_freesync_build_vrr_params(dm->freesync_module, + new_stream, + &config, &vrr_params); + + if (surface) { + mod_freesync_handle_preflip( + dm->freesync_module, + surface, + new_stream, + flip_timestamp_in_us, + &vrr_params); + } + + mod_freesync_build_vrr_infopacket( + dm->freesync_module, + new_stream, + &vrr_params, + PACKET_TYPE_VRR, + TRANSFER_FUNC_UNKNOWN, + &vrr_infopacket); + + new_crtc_state->freesync_timing_changed = + (memcmp(&new_crtc_state->vrr_params.adjust, + &vrr_params.adjust, + sizeof(vrr_params.adjust)) != 0); + + new_crtc_state->freesync_vrr_info_changed = + (memcmp(&new_crtc_state->vrr_infopacket, + &vrr_infopacket, + sizeof(vrr_infopacket)) != 0); + + new_crtc_state->vrr_params = vrr_params; + new_crtc_state->vrr_infopacket = vrr_infopacket; + + new_stream->adjust = new_crtc_state->vrr_params.adjust; + new_stream->vrr_infopacket = vrr_infopacket; + + if (new_crtc_state->freesync_vrr_info_changed) + DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", + new_crtc_state->base.crtc->base.id, + (int)new_crtc_state->base.vrr_enabled, + (int)vrr_params.state); + + if (new_crtc_state->freesync_timing_changed) + DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n", + new_crtc_state->base.crtc->base.id, + vrr_params.adjust.v_total_min, + vrr_params.adjust.v_total_max); +} + /* * Executes flip * @@ -4263,6 +4563,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, struct dc_state *state) { unsigned long flags; + uint64_t timestamp_ns; uint32_t target_vblank; int r, vpos, hpos; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -4273,8 +4574,10 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, struct dc_flip_addrs addr = { {0} }; /* TODO eliminate or rename surface_update */ struct dc_surface_update surface_updates[1] = { {0} }; + struct dc_stream_update stream_update = {0}; struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); struct dc_stream_status *stream_status; + struct dc_plane_state *surface; /* Prepare wait for target vblank early - before the fence-waits */ @@ -4324,6 +4627,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, addr.address.grph.addr.high_part = upper_32_bits(afb->address); addr.flip_immediate = async_flip; + timestamp_ns = ktime_get_ns(); + addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000); + if (acrtc->base.state->event) prepare_flip_isr(acrtc); @@ -4337,21 +4643,51 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, return; } - surface_updates->surface = stream_status->plane_states[0]; - if (!surface_updates->surface) { + surface = stream_status->plane_states[0]; + surface_updates->surface = surface; + + if (!surface) { DRM_ERROR("No surface for CRTC: id=%d\n", acrtc->crtc_id); return; } surface_updates->flip_addr = &addr; + if (acrtc_state->stream) { + update_freesync_state_on_stream( + &adev->dm, + acrtc_state, + acrtc_state->stream, + surface, + addr.flip_timestamp_in_us); + + if (acrtc_state->freesync_timing_changed) + stream_update.adjust = + &acrtc_state->stream->adjust; + + if (acrtc_state->freesync_vrr_info_changed) + stream_update.vrr_infopacket = + &acrtc_state->stream->vrr_infopacket; + } + + /* Update surface timing information. */ + surface->time.time_elapsed_in_us[surface->time.index] = + addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us; + surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us; + surface->time.index++; + if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) + surface->time.index = 0; + + mutex_lock(&adev->dm.dc_lock); + dc_commit_updates_for_stream(adev->dm.dc, surface_updates, 1, acrtc_state->stream, - NULL, + &stream_update, &surface_updates->surface, state); + mutex_unlock(&adev->dm.dc_lock); DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n", __func__, @@ -4366,6 +4702,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, * with a dc_plane_state and follow the atomic model a bit more closely here. */ static bool commit_planes_to_stream( + struct amdgpu_display_manager *dm, struct dc *dc, struct dc_plane_state **plane_states, uint8_t new_plane_count, @@ -4382,6 +4719,7 @@ static bool commit_planes_to_stream( struct dc_stream_state *dc_stream = dm_new_crtc_state->stream; struct dc_stream_update *stream_update = kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL); + unsigned int abm_level; if (!stream_update) { BREAK_TO_DEBUGGER(); @@ -4409,9 +4747,9 @@ static bool commit_planes_to_stream( stream_update->dst = dc_stream->dst; stream_update->out_transfer_func = dc_stream->out_transfer_func; - if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) { - stream_update->vrr_infopacket = &dc_stream->vrr_infopacket; - stream_update->adjust = &dc_stream->adjust; + if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) { + abm_level = dm_new_crtc_state->abm_level; + stream_update->abm_level = &abm_level; } for (i = 0; i < new_plane_count; i++) { @@ -4441,11 +4779,13 @@ static bool commit_planes_to_stream( updates[i].scaling_info = &scaling_info[i]; } + mutex_lock(&dm->dc_lock); dc_commit_updates_for_stream( dc, updates, new_plane_count, dc_stream, stream_update, plane_states, state); + mutex_unlock(&dm->dc_lock); kfree(flip_addr); kfree(plane_info); @@ -4455,6 +4795,7 @@ static bool commit_planes_to_stream( } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, + struct dc_state *dc_state, struct drm_device *dev, struct amdgpu_display_manager *dm, struct drm_crtc *pcrtc, @@ -4471,7 +4812,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); - struct dm_atomic_state *dm_state = to_dm_atomic_state(state); int planes_count = 0; unsigned long flags; @@ -4532,7 +4872,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, crtc, fb, (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, - dm_state->context); + dc_state); } } @@ -4549,15 +4889,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - dc_stream_attach->adjust = acrtc_state->adjust; - dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket; + dc_stream_attach->abm_level = acrtc_state->abm_level; - if (false == commit_planes_to_stream(dm->dc, + if (false == commit_planes_to_stream(dm, + dm->dc, plane_states_constructed, planes_count, acrtc_state, dm_old_crtc_state, - dm_state->context)) + dc_state)) dm_error("%s: Failed to attach plane!\n", __func__); } else { /*TODO BUG Here should go disable planes on CRTC. */ @@ -4625,6 +4965,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_device *adev = dev->dev_private; struct amdgpu_display_manager *dm = &adev->dm; struct dm_atomic_state *dm_state; + struct dc_state *dc_state = NULL, *dc_state_temp = NULL; uint32_t i, j; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; @@ -4637,7 +4978,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_update_legacy_modeset_state(dev, state); - dm_state = to_dm_atomic_state(state); + dm_state = dm_atomic_get_new_state(state); + if (dm_state && dm_state->context) { + dc_state = dm_state->context; + } else { + /* No state changes, retain current state. */ + dc_state_temp = dc_create_state(); + ASSERT(dc_state_temp); + dc_state = dc_state_temp; + dc_resource_state_copy_construct_current(dm->dc, dc_state); + } /* update changed items */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -4710,9 +5060,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } /* for_each_crtc_in_state() */ - if (dm_state->context) { - dm_enable_per_frame_crtc_master_sync(dm_state->context); - WARN_ON(!dc_commit_state(dm->dc, dm_state->context)); + if (dc_state) { + dm_enable_per_frame_crtc_master_sync(dc_state); + mutex_lock(&dm->dc_lock); + WARN_ON(!dc_commit_state(dm->dc, dc_state)); + mutex_unlock(&dm->dc_lock); } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { @@ -4725,13 +5077,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_stream_get_status(dm_new_crtc_state->stream); if (!status) + status = dc_state_get_stream_status(dc_state, + dm_new_crtc_state->stream); + + if (!status) DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); else acrtc->otg_inst = status->primary_otg_inst; } } - /* Handle scaling and underscan changes*/ + /* Handle scaling, underscan, and abm changes*/ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); @@ -4747,11 +5103,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; - /* Skip anything that is not scaling or underscan changes */ - if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) - continue; dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + /* Skip anything that is not scaling or underscan changes */ + if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) && + (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level)) + continue; update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); @@ -4763,17 +5122,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) WARN_ON(!status); WARN_ON(!status->plane_count); - dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust; - dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket; + dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; /*TODO How it works with MPO ?*/ if (!commit_planes_to_stream( + dm, dm->dc, status->plane_states, status->plane_count, dm_new_crtc_state, to_dm_crtc_state(old_crtc_state), - dm_state->context)) + dc_state)) dm_error("%s: Failed to update stream scaling!\n", __func__); } @@ -4806,7 +5165,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) - amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank); + amdgpu_dm_commit_planes(state, dc_state, dev, + dm, crtc, &wait_for_vblank); } @@ -4846,6 +5206,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for (i = 0; i < crtc_disable_count; i++) pm_runtime_put_autosuspend(dev->dev); pm_runtime_mark_last_busy(dev->dev); + + if (dc_state_temp) + dc_release_state(dc_state_temp); } @@ -4989,20 +5352,18 @@ static int do_aquire_global_lock(struct drm_device *dev, return ret < 0 ? ret : 0; } -void set_freesync_on_stream(struct amdgpu_display_manager *dm, - struct dm_crtc_state *new_crtc_state, - struct dm_connector_state *new_con_state, - struct dc_stream_state *new_stream) +static void get_freesync_config_for_crtc( + struct dm_crtc_state *new_crtc_state, + struct dm_connector_state *new_con_state) { struct mod_freesync_config config = {0}; - struct mod_vrr_params vrr = {0}; - struct dc_info_packet vrr_infopacket = {0}; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); - if (new_con_state->freesync_capable && - new_con_state->freesync_enable) { - config.state = new_crtc_state->freesync_enabled ? + new_crtc_state->vrr_supported = new_con_state->freesync_capable; + + if (new_con_state->freesync_capable) { + config.state = new_crtc_state->base.vrr_enabled ? VRR_STATE_ACTIVE_VARIABLE : VRR_STATE_INACTIVE; config.min_refresh_in_uhz = @@ -5010,21 +5371,21 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm, config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; config.vsif_supported = true; + config.btr = true; } - mod_freesync_build_vrr_params(dm->freesync_module, - new_stream, - &config, &vrr); + new_crtc_state->freesync_config = config; +} - mod_freesync_build_vrr_infopacket(dm->freesync_module, - new_stream, - &vrr, - packet_type_fs1, - NULL, - &vrr_infopacket); +static void reset_freesync_config_for_crtc( + struct dm_crtc_state *new_crtc_state) +{ + new_crtc_state->vrr_supported = false; - new_crtc_state->adjust = vrr.adjust; - new_crtc_state->vrr_infopacket = vrr_infopacket; + memset(&new_crtc_state->vrr_params, 0, + sizeof(new_crtc_state->vrr_params)); + memset(&new_crtc_state->vrr_infopacket, 0, + sizeof(new_crtc_state->vrr_infopacket)); } static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, @@ -5032,11 +5393,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, bool enable, bool *lock_and_validation_needed) { + struct dm_atomic_state *dm_state = NULL; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; int i; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; - struct dm_atomic_state *dm_state = to_dm_atomic_state(state); struct dc_stream_state *new_stream; int ret = 0; @@ -5084,7 +5445,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, new_stream = create_stream_for_sink(aconnector, &new_crtc_state->mode, - dm_new_conn_state); + dm_new_conn_state, + dm_old_crtc_state->stream); /* * we can have no stream on ACTION_SET if a display @@ -5099,8 +5461,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, break; } - set_freesync_on_stream(dm, dm_new_crtc_state, - dm_new_conn_state, new_stream); + dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { @@ -5110,9 +5471,6 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, } } - if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled) - new_crtc_state->mode_changed = true; - if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto next_crtc; @@ -5134,6 +5492,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto next_crtc; + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto fail; + DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", crtc->base.id); @@ -5149,6 +5511,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, dc_stream_release(dm_old_crtc_state->stream); dm_new_crtc_state->stream = NULL; + reset_freesync_config_for_crtc(dm_new_crtc_state); + *lock_and_validation_needed = true; } else {/* Add stream for any updated/enabled CRTC */ @@ -5168,6 +5532,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, WARN_ON(dm_new_crtc_state->stream); + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto fail; + dm_new_crtc_state->stream = new_stream; dc_stream_retain(new_stream); @@ -5226,7 +5594,9 @@ next_crtc: amdgpu_dm_set_ctm(dm_new_crtc_state); } - + /* Update Freesync settings. */ + get_freesync_config_for_crtc(dm_new_crtc_state, + dm_new_conn_state); } return ret; @@ -5242,12 +5612,13 @@ static int dm_update_planes_state(struct dc *dc, bool enable, bool *lock_and_validation_needed) { + + struct dm_atomic_state *dm_state = NULL; struct drm_crtc *new_plane_crtc, *old_plane_crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; - struct dm_atomic_state *dm_state = to_dm_atomic_state(state); struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; int i ; /* TODO return page_flip_needed() function */ @@ -5285,6 +5656,10 @@ static int dm_update_planes_state(struct dc *dc, DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", plane->base.id, old_plane_crtc->base.id); + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + return ret; + if (!dc_remove_plane_from_context( dc, dm_old_crtc_state->stream, @@ -5339,6 +5714,12 @@ static int dm_update_planes_state(struct dc *dc, return ret; } + ret = dm_atomic_get_state(state, &dm_state); + if (ret) { + dc_plane_state_release(dc_new_plane_state); + return ret; + } + /* * Any atomic check errors that occur after this will * not need a release. The plane state will be attached @@ -5370,11 +5751,14 @@ static int dm_update_planes_state(struct dc *dc, return ret; } -enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state) -{ - - int i, j, num_plane; +static int +dm_determine_update_type_for_commit(struct dc *dc, + struct drm_atomic_state *state, + enum surface_update_type *out_type) +{ + struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; + int i, j, num_plane, ret = 0; struct drm_plane_state *old_plane_state, *new_plane_state; struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; struct drm_crtc *new_plane_crtc, *old_plane_crtc; @@ -5394,7 +5778,7 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru DRM_ERROR("Plane or surface update failed to allocate"); /* Set type to FULL to avoid crashing in DC*/ update_type = UPDATE_TYPE_FULL; - goto ret; + goto cleanup; } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -5448,27 +5832,40 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru } if (num_plane > 0) { - status = dc_stream_get_status(new_dm_crtc_state->stream); + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto cleanup; + + old_dm_state = dm_atomic_get_old_state(state); + if (!old_dm_state) { + ret = -EINVAL; + goto cleanup; + } + + status = dc_state_get_stream_status(old_dm_state->context, + new_dm_crtc_state->stream); + update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, &stream_update, status); if (update_type > UPDATE_TYPE_MED) { update_type = UPDATE_TYPE_FULL; - goto ret; + goto cleanup; } } } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) { update_type = UPDATE_TYPE_FULL; - goto ret; + goto cleanup; } } -ret: +cleanup: kfree(updates); kfree(surface); - return update_type; + *out_type = update_type; + return ret; } /** @@ -5500,8 +5897,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { struct amdgpu_device *adev = dev->dev_private; + struct dm_atomic_state *dm_state = NULL; struct dc *dc = adev->dm.dc; - struct dm_atomic_state *dm_state = to_dm_atomic_state(state); struct drm_connector *connector; struct drm_connector_state *old_con_state, *new_con_state; struct drm_crtc *crtc; @@ -5522,12 +5919,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed && - (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled)) + !new_crtc_state->vrr_enabled) continue; if (!new_crtc_state->enable) @@ -5542,10 +5936,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } - dm_state->context = dc_create_state(); - ASSERT(dm_state->context); - dc_resource_state_copy_construct_current(dc, dm_state->context); - /* Remove exiting planes if they are modified */ ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); if (ret) { @@ -5598,7 +5988,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } - update_type = dm_determine_update_type_for_commit(dc, state); + ret = dm_determine_update_type_for_commit(dc, state, &update_type); + if (ret) + goto fail; if (overall_update_type < update_type) overall_update_type = update_type; @@ -5616,6 +6008,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (overall_update_type > UPDATE_TYPE_FAST) { + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto fail; ret = do_aquire_global_lock(dev, state); if (ret) @@ -5625,6 +6020,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = -EINVAL; goto fail; } + } else if (state->legacy_cursor_update) { + /* + * This is a fast cursor update coming from the plane update + * helper, check if it can be done asynchronously for better + * performance. + */ + state->async_update = !drm_atomic_helper_async_check(dev, state); } /* Must be success */ @@ -5670,14 +6072,15 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - struct dm_connector_state *dm_con_state; + struct dm_connector_state *dm_con_state = NULL; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; + bool freesync_capable = false; if (!connector->state) { DRM_ERROR("%s - Connector has no state", __func__); - return; + goto update; } if (!edid) { @@ -5687,9 +6090,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector->max_vfreq = 0; amdgpu_dm_connector->pixel_clock_mhz = 0; - dm_con_state->freesync_capable = false; - dm_con_state->freesync_enable = false; - return; + goto update; } dm_con_state = to_dm_connector_state(connector->state); @@ -5697,10 +6098,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, edid_check_required = false; if (!amdgpu_dm_connector->dc_sink) { DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); - return; + goto update; } if (!adev->dm.freesync_module) - return; + goto update; /* * if edid non zero restrict freesync only for dp and edp */ @@ -5712,7 +6113,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, amdgpu_dm_connector); } } - dm_con_state->freesync_capable = false; if (edid_check_required == true && (edid->version > 1 || (edid->version == 1 && edid->revision > 1))) { for (i = 0; i < 4; i++) { @@ -5744,8 +6144,16 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) { - dm_con_state->freesync_capable = true; + freesync_capable = true; } } + +update: + if (dm_con_state) + dm_con_state->freesync_capable = freesync_capable; + + if (connector->vrr_capable_property) + drm_connector_set_vrr_capable_property(connector, + freesync_capable); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 607c3cdd7d0c..fbd161ddc3f4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -84,6 +84,18 @@ struct dm_comressor_info { }; /** + * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI + * @min_input_signal: minimum possible input in range 0-255 + * @max_input_signal: maximum possible input in range 0-255 + * @caps_valid: true if these values are from the ACPI interface + */ +struct amdgpu_dm_backlight_caps { + int min_input_signal; + int max_input_signal; + bool caps_valid; +}; + +/** * struct amdgpu_display_manager - Central amdgpu display manager device * * @dc: Display Core control structure @@ -112,6 +124,25 @@ struct amdgpu_display_manager { u16 display_indexes_num; /** + * @atomic_obj + * + * In combination with &dm_atomic_state it helps manage + * global atomic state that doesn't map cleanly into existing + * drm resources, like &dc_context. + */ + struct drm_private_obj atomic_obj; + + struct drm_modeset_lock atomic_obj_lock; + + /** + * @dc_lock: + * + * Guards access to DC functions that can issue register write + * sequences. + */ + struct mutex dc_lock; + + /** * @irq_handler_list_low_tab: * * Low priority IRQ handler table. @@ -158,6 +189,7 @@ struct amdgpu_display_manager { struct backlight_device *backlight_dev; const struct dc_link *backlight_link; + struct amdgpu_dm_backlight_caps backlight_caps; struct mod_freesync *freesync_module; @@ -231,15 +263,21 @@ struct dm_crtc_state { int crc_skip_count; bool crc_enabled; - bool freesync_enabled; - struct dc_crtc_timing_adjust adjust; + bool freesync_timing_changed; + bool freesync_vrr_info_changed; + + bool vrr_supported; + struct mod_freesync_config freesync_config; + struct mod_vrr_params vrr_params; struct dc_info_packet vrr_infopacket; + + int abm_level; }; #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) struct dm_atomic_state { - struct drm_atomic_state base; + struct drm_private_state base; struct dc_state *context; }; @@ -254,8 +292,8 @@ struct dm_connector_state { uint8_t underscan_hborder; uint8_t max_bpc; bool underscan_enable; - bool freesync_enable; bool freesync_capable; + uint8_t abm_level; }; #define to_dm_connector_state(x)\ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9fdeca096407..5e7ca1f3a8d1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -342,10 +342,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, master->connector_id); aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); + drm_connector_attach_encoder(&aconnector->base, + &aconnector->mst_encoder->base); - /* - * TODO: understand why this one is needed - */ drm_object_attach_property( &connector->base, dev->mode_config.path_property, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h new file mode 100644 index 000000000000..d898981684d5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -0,0 +1,104 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM amdgpu_dm + +#if !defined(_AMDGPU_DM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _AMDGPU_DM_TRACE_H_ + +#include <linux/tracepoint.h> + +TRACE_EVENT(amdgpu_dc_rreg, + TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value), + TP_ARGS(read_count, reg, value), + TP_STRUCT__entry( + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->reg = reg; + __entry->value = value; + *read_count = *read_count + 1; + ), + TP_printk("reg=0x%08lx, value=0x%08lx", + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + +TRACE_EVENT(amdgpu_dc_wreg, + TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value), + TP_ARGS(write_count, reg, value), + TP_STRUCT__entry( + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->reg = reg; + __entry->value = value; + *write_count = *write_count + 1; + ), + TP_printk("reg=0x%08lx, value=0x%08lx", + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + + +TRACE_EVENT(amdgpu_dc_performance, + TP_PROTO(unsigned long read_count, unsigned long write_count, + unsigned long *last_read, unsigned long *last_write, + const char *func, unsigned int line), + TP_ARGS(read_count, write_count, last_read, last_write, func, line), + TP_STRUCT__entry( + __field(uint32_t, reads) + __field(uint32_t, writes) + __field(uint32_t, read_delta) + __field(uint32_t, write_delta) + __string(func, func) + __field(uint32_t, line) + ), + TP_fast_assign( + __entry->reads = read_count; + __entry->writes = write_count; + __entry->read_delta = read_count - *last_read; + __entry->write_delta = write_count - *last_write; + __assign_str(func, func); + __entry->line = line; + *last_read = read_count; + *last_write = write_count; + ), + TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)", + __get_str(func), __entry->line, + (unsigned long)__entry->read_delta, + (unsigned long)__entry->reads, + (unsigned long)__entry->write_delta, + (unsigned long)__entry->writes) +); +#endif /* _AMDGPU_DM_TRACE_H_ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE amdgpu_dm_trace +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 751bb614fc0e..c513ab6f3843 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -638,6 +638,7 @@ static enum bp_result get_ss_info_v4_1( { enum bp_result result = BP_RESULT_OK; struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL; + struct atom_smu_info_v3_3 *smu_info = NULL; if (!ss_info) return BP_RESULT_BADINPUT; @@ -650,6 +651,7 @@ static enum bp_result get_ss_info_v4_1( if (!disp_cntl_tbl) return BP_RESULT_BADBIOSTABLE; + ss_info->type.STEP_AND_DELAY_INFO = false; ss_info->spread_percentage_divider = 1000; /* BIOS no longer uses target clock. Always enable for now */ @@ -688,6 +690,19 @@ static enum bp_result get_ss_info_v4_1( */ result = BP_RESULT_UNSUPPORTED; break; + case AS_SIGNAL_TYPE_XGMI: + smu_info = GET_IMAGE(struct atom_smu_info_v3_3, + DATA_TABLES(smu_info)); + if (!smu_info) + return BP_RESULT_BADBIOSTABLE; + + ss_info->spread_spectrum_percentage = + smu_info->waflclk_ss_percentage; + ss_info->spread_spectrum_range = + smu_info->gpuclk_ss_rate_10hz * 10; + if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) + ss_info->type.CENTER_MODE = true; + break; default: result = BP_RESULT_UNSUPPORTED; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 65b006ad372e..8196f3bb10c7 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -67,6 +67,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( return true; #endif case DCE_VERSION_12_0: + case DCE_VERSION_12_1: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 3279e26c3440..5fd52094d459 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -62,6 +62,55 @@ const static char DC_BUILD_ID[] = "production-build"; +/** + * DOC: Overview + * + * DC is the OS-agnostic component of the amdgpu DC driver. + * + * DC maintains and validates a set of structs representing the state of the + * driver and writes that state to AMD hardware + * + * Main DC HW structs: + * + * struct dc - The central struct. One per driver. Created on driver load, + * destroyed on driver unload. + * + * struct dc_context - One per driver. + * Used as a backpointer by most other structs in dc. + * + * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP + * plugpoints). Created on driver load, destroyed on driver unload. + * + * struct dc_sink - One per display. Created on boot or hotplug. + * Destroyed on shutdown or hotunplug. A dc_link can have a local sink + * (the display directly attached). It may also have one or more remote + * sinks (in the Multi-Stream Transport case) + * + * struct resource_pool - One per driver. Represents the hw blocks not in the + * main pipeline. Not directly accessible by dm. + * + * Main dc state structs: + * + * These structs can be created and destroyed as needed. There is a full set of + * these structs in dc->current_state representing the currently programmed state. + * + * struct dc_state - The global DC state to track global state information, + * such as bandwidth values. + * + * struct dc_stream_state - Represents the hw configuration for the pipeline from + * a framebuffer to a display. Maps one-to-one with dc_sink. + * + * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, + * and may have more in the Multi-Plane Overlay case. + * + * struct resource_context - Represents the programmable state of everything in + * the resource_pool. Not directly accessible by dm. + * + * struct pipe_ctx - A member of struct resource_context. Represents the + * internal hardware pipeline components. Each dc_plane_state has either + * one or two (in the pipe-split case). + */ + /******************************************************************************* * Private functions ******************************************************************************/ @@ -102,10 +151,6 @@ static bool create_links( return false; } - if (connectors_num == 0 && num_virtual_links == 0) { - dm_error("DC: Number of connectors is zero!\n"); - } - dm_output_to_console( "DC: %s: connectors_num: physical:%d, virtual:%d\n", __func__, @@ -175,6 +220,17 @@ failed_alloc: return false; } +static struct dc_perf_trace *dc_perf_trace_create(void) +{ + return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); +} + +static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) +{ + kfree(*perf_trace); + *perf_trace = NULL; +} + /** ***************************************************************************** * Function: dc_stream_adjust_vmin_vmax @@ -240,7 +296,7 @@ bool dc_stream_get_crtc_position(struct dc *dc, } /** - * dc_stream_configure_crc: Configure CRC capture for the given stream. + * dc_stream_configure_crc() - Configure CRC capture for the given stream. * @dc: DC Object * @stream: The stream to configure CRC on. * @enable: Enable CRC if true, disable otherwise. @@ -292,7 +348,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, } /** - * dc_stream_get_crc: Get CRC values for the given stream. + * dc_stream_get_crc() - Get CRC values for the given stream. * @dc: DC object * @stream: The DC stream state of the stream to get CRCs from. * @r_cr, g_y, b_cb: CRC values for the three channels are stored here. @@ -328,7 +384,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option) { struct bit_depth_reduction_params params; - struct dc_link *link = stream->status.link; + struct dc_link *link = stream->sink->link; struct pipe_ctx *pipes = NULL; int i; @@ -536,6 +592,8 @@ static void destruct(struct dc *dc) if (dc->ctx->created_bios) dal_bios_parser_destroy(&dc->ctx->dc_bios); + dc_perf_trace_destroy(&dc->ctx->perf_trace); + kfree(dc->ctx); dc->ctx = NULL; @@ -659,6 +717,12 @@ static bool construct(struct dc *dc, goto fail; } + dc_ctx->perf_trace = dc_perf_trace_create(); + if (!dc_ctx->perf_trace) { + ASSERT_CRITICAL(false); + goto fail; + } + /* Create GPIO service */ dc_ctx->gpio_service = dal_gpio_service_create( dc_version, @@ -1329,6 +1393,11 @@ static enum surface_update_type check_update_surfaces_for_stream( return overall_type; } +/** + * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) + * + * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types + */ enum surface_update_type dc_check_update_surfaces_for_stream( struct dc *dc, struct dc_surface_update *updates, @@ -1398,7 +1467,8 @@ static void commit_planes_do_stream_update(struct dc *dc, if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || - stream_update->vsc_infopacket) { + stream_update->vsc_infopacket || + stream_update->vsp_infopacket) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); } @@ -1409,6 +1479,14 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->output_csc_transform) dc_stream_program_csc_matrix(dc, stream); + if (stream_update->dither_option) { + resource_build_bit_depth_reduction_params(pipe_ctx->stream, + &pipe_ctx->stream->bit_depth_params); + pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, + &stream->bit_depth_params, + &stream->clamping); + } + /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -1492,9 +1570,6 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (update_type == UPDATE_TYPE_FULL) - context_timing_trace(dc, &context->res_ctx); - // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { /* Lock the top pipe while updating plane addrs, since freesync requires @@ -1631,6 +1706,9 @@ enum dc_irq_source dc_interrupt_to_irq_source( return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); } +/** + * dc_interrupt_set() - Enable/disable an AMD hw interrupt source + */ bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) { @@ -1686,6 +1764,15 @@ void dc_resume(struct dc *dc) core_link_resume(dc->links[i]); } +bool dc_is_dmcu_initialized(struct dc *dc) +{ + struct dmcu *dmcu = dc->res_pool->dmcu; + + if (dmcu) + return dmcu->funcs->is_dmcu_initialized(dmcu); + return false; +} + bool dc_submit_i2c( struct dc *dc, uint32_t link_index, @@ -1715,6 +1802,11 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink return true; } +/** + * dc_link_add_remote_sink() - Create a sink and attach it to an existing link + * + * EDID length is in bytes + */ struct dc_sink *dc_link_add_remote_sink( struct dc_link *link, const uint8_t *edid, @@ -1773,6 +1865,12 @@ fail_add_sink: return NULL; } +/** + * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link + * + * Note that this just removes the struct dc_sink - it doesn't + * program hardware or alter other members of dc_link + */ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) { int i; @@ -1810,4 +1908,4 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz; info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz; info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz; -}
\ No newline at end of file +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 7ee9c033acbd..52deacf39841 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -198,6 +198,13 @@ static bool program_hpd_filter( return result; } +/** + * dc_link_detect_sink() - Determine if there is a sink connected + * + * @type: Returned connection type + * Does not detect downstream devices, such as MST sinks + * or display connected through active dongles + */ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) { uint32_t is_hpd_high = 0; @@ -208,6 +215,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) return true; } + if (link->connector_signal == SIGNAL_TYPE_EDP) + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + /* todo: may need to lock gpio access */ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (hpd_pin == NULL) @@ -324,15 +334,15 @@ static enum signal_type get_basic_signal_type( return SIGNAL_TYPE_NONE; } -/* - * @brief - * Check whether there is a dongle on DP connector +/** + * dc_link_is_dp_sink_present() - Check if there is a native DP + * or passive DP-HDMI dongle connected */ bool dc_link_is_dp_sink_present(struct dc_link *link) { enum gpio_result gpio_result; uint32_t clock_pin = 0; - + uint8_t retry = 0; struct ddc *ddc; enum connector_id connector_id = @@ -361,11 +371,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) return present; } - /* Read GPIO: DP sink is present if both clock and data pins are zero */ - /* [anaumov] in DAL2, there was no check for GPIO failure */ - - gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); - ASSERT(gpio_result == GPIO_RESULT_OK); + /* + * Read GPIO: DP sink is present if both clock and data pins are zero + * + * [W/A] plug-unplug DP cable, sometimes customer board has + * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI + * then monitor can't br light up. Add retry 3 times + * But in real passive dongle, it need additional 3ms to detect + */ + do { + gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); + ASSERT(gpio_result == GPIO_RESULT_OK); + if (clock_pin) + udelay(1000); + else + break; + } while (retry++ < 3); present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; @@ -593,6 +614,14 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); } +/** + * dc_link_detect() - Detect if a sink is attached to a given link + * + * link->local_sink is created or destroyed as needed. + * + * This does not create remote sinks but will trigger DM + * to start MST detection if a branch is detected. + */ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; @@ -688,12 +717,26 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) same_dpcd = false; } - /* Active dongle downstream unplug */ + /* Active dongle plug in without display or downstream unplug*/ if (link->type == dc_connection_active_dongle && link->dpcd_caps.sink_count. bits.SINK_COUNT == 0) { - if (prev_sink != NULL) + if (prev_sink != NULL) { + /* Downstream unplug */ dc_sink_release(prev_sink); + } else { + /* Empty dongle plug in */ + for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) { + int fail_count = 0; + + dp_verify_link_cap(link, + &link->reported_link_cap, + &fail_count); + + if (fail_count == 0) + break; + } + } return true; } @@ -1396,8 +1439,6 @@ static enum dc_status enable_link_dp( else status = DC_FAIL_DP_LINK_TRAINING; - enable_stream_features(pipe_ctx); - return status; } @@ -2175,11 +2216,11 @@ bool dc_link_set_backlight_level(const struct dc_link *link, backlight_pwm_u16_16, backlight_pwm_u16_16); if (dc_is_embedded_signal(link->connector_signal)) { - if (stream != NULL) { - for (i = 0; i < MAX_PIPES; i++) { + for (i = 0; i < MAX_PIPES; i++) { + if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { if (core_dc->current_state->res_ctx. - pipe_ctx[i].stream - == stream) + pipe_ctx[i].stream->sink->link + == link) /* DMCU -1 for all controller id values, * therefore +1 here */ @@ -2218,7 +2259,7 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) struct dc *core_dc = link->ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; - if (dmcu != NULL && link->psr_enabled) + if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled) dmcu->funcs->set_psr_enable(dmcu, enable, wait); return true; @@ -2594,6 +2635,9 @@ void core_link_enable_stream( core_dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->sink->link->cur_link_settings); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + enable_stream_features(pipe_ctx); + dc_link_set_backlight_level(pipe_ctx->stream->sink->link, pipe_ctx->stream->bl_pwm_level, 0, @@ -2606,11 +2650,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + core_dc->hwss.blank_stream(pipe_ctx); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); - core_dc->hwss.blank_stream(pipe_ctx); - core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index d91df5ef0cb3..0caacb60b02f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1089,6 +1089,121 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) return max_link_cap; } +static enum dc_status read_hpd_rx_irq_data( + struct dc_link *link, + union hpd_irq_data *irq_data) +{ + static enum dc_status retval; + + /* The HW reads 16 bytes from 200h on HPD, + * but if we get an AUX_DEFER, the HW cannot retry + * and this causes the CTS tests 4.3.2.1 - 3.2.4 to + * fail, so we now explicitly read 6 bytes which is + * the req from the above mentioned test cases. + * + * For DP 1.4 we need to read those from 2002h range. + */ + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT, + irq_data->raw, + sizeof(union hpd_irq_data)); + else { + /* Read 14 bytes in a single read and then copy only the required fields. + * This is more efficient than doing it in two separate AUX reads. */ + + uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; + + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT_ESI, + tmp, + sizeof(tmp)); + + if (retval != DC_OK) + return retval; + + irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; + } + + return retval; +} + +static bool hpd_rx_irq_check_link_loss_status( + struct dc_link *link, + union hpd_irq_data *hpd_irq_dpcd_data) +{ + uint8_t irq_reg_rx_power_state = 0; + enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; + union lane_status lane_status; + uint32_t lane; + bool sink_status_changed; + bool return_code; + + sink_status_changed = false; + return_code = false; + + if (link->cur_link_settings.lane_count == 0) + return return_code; + + /*1. Check that Link Status changed, before re-training.*/ + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* check status of lanes 0,1 + * changed DpcdAddress_Lane01Status (0x202) + */ + lane_status.raw = get_nibble_at_index( + &hpd_irq_dpcd_data->bytes.lane01_status.raw, + lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + sink_status_changed = true; + break; + } + } + + /* Check interlane align.*/ + if (sink_status_changed || + !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + + DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); + + return_code = true; + + /*2. Check that we can handle interrupt: Not in FS DOS, + * Not in "Display Timeout" state, Link is trained. + */ + dpcd_result = core_link_read_dpcd(link, + DP_SET_POWER, + &irq_reg_rx_power_state, + sizeof(irq_reg_rx_power_state)); + + if (dpcd_result != DC_OK) { + DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", + __func__); + } else { + if (irq_reg_rx_power_state != DP_SET_POWER_D0) + return_code = false; + } + } + + return return_code; +} + bool dp_verify_link_cap( struct dc_link *link, struct dc_link_settings *known_limit_link_setting, @@ -1104,12 +1219,14 @@ bool dp_verify_link_cap( struct clock_source *dp_cs; enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; enum link_training_result status; + union hpd_irq_data irq_data; if (link->dc->debug.skip_detection_link_training) { link->verified_link_cap = *known_limit_link_setting; return true; } + memset(&irq_data, 0, sizeof(irq_data)); success = false; skip_link_training = false; @@ -1168,9 +1285,15 @@ bool dp_verify_link_cap( (*fail_count)++; } - if (success) + if (success) { link->verified_link_cap = *cur; - + udelay(1000); + if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK) + if (hpd_rx_irq_check_link_loss_status( + link, + &irq_data)) + (*fail_count)++; + } /* always disable the link before trying another * setting or before returning we'll enable it later * based on the actual mode we're driving @@ -1572,122 +1695,6 @@ void decide_link_settings(struct dc_stream_state *stream, } /*************************Short Pulse IRQ***************************/ - -static bool hpd_rx_irq_check_link_loss_status( - struct dc_link *link, - union hpd_irq_data *hpd_irq_dpcd_data) -{ - uint8_t irq_reg_rx_power_state = 0; - enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; - union lane_status lane_status; - uint32_t lane; - bool sink_status_changed; - bool return_code; - - sink_status_changed = false; - return_code = false; - - if (link->cur_link_settings.lane_count == 0) - return return_code; - - /*1. Check that Link Status changed, before re-training.*/ - - /*parse lane status*/ - for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { - /* check status of lanes 0,1 - * changed DpcdAddress_Lane01Status (0x202) - */ - lane_status.raw = get_nibble_at_index( - &hpd_irq_dpcd_data->bytes.lane01_status.raw, - lane); - - if (!lane_status.bits.CHANNEL_EQ_DONE_0 || - !lane_status.bits.CR_DONE_0 || - !lane_status.bits.SYMBOL_LOCKED_0) { - /* if one of the channel equalization, clock - * recovery or symbol lock is dropped - * consider it as (link has been - * dropped) dp sink status has changed - */ - sink_status_changed = true; - break; - } - } - - /* Check interlane align.*/ - if (sink_status_changed || - !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { - - DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); - - return_code = true; - - /*2. Check that we can handle interrupt: Not in FS DOS, - * Not in "Display Timeout" state, Link is trained. - */ - dpcd_result = core_link_read_dpcd(link, - DP_SET_POWER, - &irq_reg_rx_power_state, - sizeof(irq_reg_rx_power_state)); - - if (dpcd_result != DC_OK) { - DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", - __func__); - } else { - if (irq_reg_rx_power_state != DP_SET_POWER_D0) - return_code = false; - } - } - - return return_code; -} - -static enum dc_status read_hpd_rx_irq_data( - struct dc_link *link, - union hpd_irq_data *irq_data) -{ - static enum dc_status retval; - - /* The HW reads 16 bytes from 200h on HPD, - * but if we get an AUX_DEFER, the HW cannot retry - * and this causes the CTS tests 4.3.2.1 - 3.2.4 to - * fail, so we now explicitly read 6 bytes which is - * the req from the above mentioned test cases. - * - * For DP 1.4 we need to read those from 2002h range. - */ - if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) - retval = core_link_read_dpcd( - link, - DP_SINK_COUNT, - irq_data->raw, - sizeof(union hpd_irq_data)); - else { - /* Read 14 bytes in a single read and then copy only the required fields. - * This is more efficient than doing it in two separate AUX reads. */ - - uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; - - retval = core_link_read_dpcd( - link, - DP_SINK_COUNT_ESI, - tmp, - sizeof(tmp)); - - if (retval != DC_OK) - return retval; - - irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; - irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; - } - - return retval; -} - static bool allow_hpd_rx_irq(const struct dc_link *link) { /* @@ -2196,7 +2203,7 @@ static void get_active_converter_info( } if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { - uint8_t det_caps[4]; + uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ union dwnstream_port_caps_byte0 *port_caps = (union dwnstream_port_caps_byte0 *)det_caps; core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, @@ -2240,7 +2247,8 @@ static void get_active_converter_info( translate_dpcd_max_bpc( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); - link->dpcd_caps.dongle_caps.extendedCapValid = true; + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; } break; @@ -2371,11 +2379,22 @@ static bool retrieve_link_cap(struct dc_link *link) dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) { - core_link_read_dpcd( + uint8_t ext_cap_data[16]; + + memset(ext_cap_data, '\0', sizeof(ext_cap_data)); + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( link, DP_DP13_DPCD_REV, - dpcd_data, - sizeof(dpcd_data)); + ext_cap_data, + sizeof(ext_cap_data)); + if (status == DC_OK) { + memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); + break; + } + } + if (status != DC_OK) + dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 82cd1d6e6e59..0065ec7d5330 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -96,6 +96,7 @@ void dp_enable_link_phy( link_settings, clock_source); } + link->cur_link_settings = *link_settings; dp_receiver_power_ctrl(link, true); } @@ -307,6 +308,7 @@ void dp_retrain_link_dp_test(struct dc_link *link, link->link_enc, link_setting, pipes[i].clock_source->id); + link->cur_link_settings = *link_setting; dp_receiver_power_ctrl(link, true); @@ -316,7 +318,6 @@ void dp_retrain_link_dp_test(struct dc_link *link, skip_video_pattern, LINK_TRAINING_ATTEMPTS); - link->cur_link_settings = *link_setting; link->dc->hwss.enable_stream(&pipes[i]); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index fc65b0055167..76137df74a53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -83,7 +83,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) dc_version = DCE_VERSION_11_22; break; case FAMILY_AI: - dc_version = DCE_VERSION_12_0; + if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev)) + dc_version = DCE_VERSION_12_1; + else + dc_version = DCE_VERSION_12_0; break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case FAMILY_RV: @@ -136,6 +139,7 @@ struct resource_pool *dc_create_resource_pool( num_virtual_links, dc); break; case DCE_VERSION_12_0: + case DCE_VERSION_12_1: res_pool = dce120_create_resource_pool( num_virtual_links, dc); break; @@ -478,10 +482,29 @@ static enum pixel_format convert_pixel_format_to_dalsurface( return dal_pixel_format; } -static void rect_swap_helper(struct rect *rect) +static inline void get_vp_scan_direction( + enum dc_rotation_angle rotation, + bool horizontal_mirror, + bool *orthogonal_rotation, + bool *flip_vert_scan_dir, + bool *flip_horz_scan_dir) { - swap(rect->height, rect->width); - swap(rect->x, rect->y); + *orthogonal_rotation = false; + *flip_vert_scan_dir = false; + *flip_horz_scan_dir = false; + if (rotation == ROTATION_ANGLE_180) { + *flip_vert_scan_dir = true; + *flip_horz_scan_dir = true; + } else if (rotation == ROTATION_ANGLE_90) { + *orthogonal_rotation = true; + *flip_horz_scan_dir = true; + } else if (rotation == ROTATION_ANGLE_270) { + *orthogonal_rotation = true; + *flip_vert_scan_dir = true; + } + + if (horizontal_mirror) + *flip_horz_scan_dir = !*flip_horz_scan_dir; } static void calculate_viewport(struct pipe_ctx *pipe_ctx) @@ -490,33 +513,14 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) const struct dc_stream_state *stream = pipe_ctx->stream; struct scaler_data *data = &pipe_ctx->plane_res.scl_data; struct rect surf_src = plane_state->src_rect; - struct rect clip = { 0 }; + struct rect clip, dest; int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; bool pri_split = pipe_ctx->bottom_pipe && pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; bool sec_split = pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; - bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; - - - /* - * We need take horizontal mirror into account. On an unrotated surface this means - * that the viewport offset is actually the offset from the other side of source - * image so we have to subtract the right edge of the viewport from the right edge of - * the source window. Similar to mirror we need to take into account how offset is - * affected for 270/180 rotations - */ - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { - flip_vert_scan_dir = true; - flip_horz_scan_dir = true; - } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90) - flip_vert_scan_dir = true; - else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) - flip_horz_scan_dir = true; - - if (pipe_ctx->plane_state->horizontal_mirror) - flip_horz_scan_dir = !flip_horz_scan_dir; + bool orthogonal_rotation, flip_y_start, flip_x_start; if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { @@ -524,13 +528,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) sec_split = false; } - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || - pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) - rect_swap_helper(&surf_src); - /* The actual clip is an intersection between stream * source and surface clip */ + dest = plane_state->dst_rect; clip.x = stream->src.x > plane_state->clip_rect.x ? stream->src.x : plane_state->clip_rect.x; @@ -547,66 +548,77 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) stream->src.y + stream->src.height - clip.y : plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; - /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio - * note: surf_src.ofs should be added after rotation/mirror offset direction - * adjustment since it is already in viewport space - * num_pixels = clip.num_pix * scl_ratio + /* + * Need to calculate how scan origin is shifted in vp space + * to correctly rotate clip and dst */ - data->viewport.x = (clip.x - plane_state->dst_rect.x) * - surf_src.width / plane_state->dst_rect.width; - data->viewport.width = clip.width * - surf_src.width / plane_state->dst_rect.width; - - data->viewport.y = (clip.y - plane_state->dst_rect.y) * - surf_src.height / plane_state->dst_rect.height; - data->viewport.height = clip.height * - surf_src.height / plane_state->dst_rect.height; + get_vp_scan_direction( + plane_state->rotation, + plane_state->horizontal_mirror, + &orthogonal_rotation, + &flip_y_start, + &flip_x_start); - if (flip_vert_scan_dir) - data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; - if (flip_horz_scan_dir) - data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; + if (orthogonal_rotation) { + swap(clip.x, clip.y); + swap(clip.width, clip.height); + swap(dest.x, dest.y); + swap(dest.width, dest.height); + } + if (flip_x_start) { + clip.x = dest.x + dest.width - clip.x - clip.width; + dest.x = 0; + } + if (flip_y_start) { + clip.y = dest.y + dest.height - clip.y - clip.height; + dest.y = 0; + } - data->viewport.x += surf_src.x; - data->viewport.y += surf_src.y; + /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio + * num_pixels = clip.num_pix * scl_ratio + */ + data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width; + data->viewport.width = clip.width * surf_src.width / dest.width; + + data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height; + data->viewport.height = clip.height * surf_src.height / dest.height; + + /* Handle split */ + if (pri_split || sec_split) { + if (orthogonal_rotation) { + if (flip_y_start != pri_split) + data->viewport.height /= 2; + else { + data->viewport.y += data->viewport.height / 2; + /* Ceil offset pipe */ + data->viewport.height = (data->viewport.height + 1) / 2; + } + } else { + if (flip_x_start != pri_split) + data->viewport.width /= 2; + else { + data->viewport.x += data->viewport.width / 2; + /* Ceil offset pipe */ + data->viewport.width = (data->viewport.width + 1) / 2; + } + } + } /* Round down, compensate in init */ data->viewport_c.x = data->viewport.x / vpc_div; data->viewport_c.y = data->viewport.y / vpc_div; - data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? - dc_fixpt_half : dc_fixpt_zero; - data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? - dc_fixpt_half : dc_fixpt_zero; + data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero; + data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero; + /* Round up, assume original video size always even dimensions */ data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div; data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div; - - /* Handle hsplit */ - if (sec_split) { - data->viewport.x += data->viewport.width / 2; - data->viewport_c.x += data->viewport_c.width / 2; - /* Ceil offset pipe */ - data->viewport.width = (data->viewport.width + 1) / 2; - data->viewport_c.width = (data->viewport_c.width + 1) / 2; - } else if (pri_split) { - if (data->viewport.width > 1) - data->viewport.width /= 2; - if (data->viewport_c.width > 1) - data->viewport_c.width /= 2; - } - - if (plane_state->rotation == ROTATION_ANGLE_90 || - plane_state->rotation == ROTATION_ANGLE_270) { - rect_swap_helper(&data->viewport_c); - rect_swap_helper(&data->viewport); - } } -static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full) +static void calculate_recout(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; const struct dc_stream_state *stream = pipe_ctx->stream; - struct rect surf_src = plane_state->src_rect; struct rect surf_clip = plane_state->clip_rect; bool pri_split = pipe_ctx->bottom_pipe && pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; @@ -614,10 +626,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM; - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || - pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) - rect_swap_helper(&surf_src); - pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x; if (stream->src.x < surf_clip.x) pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x @@ -646,7 +654,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full stream->dst.y + stream->dst.height - pipe_ctx->plane_res.scl_data.recout.y; - /* Handle h & vsplit */ + /* Handle h & v split, handle rotation using viewport */ if (sec_split && top_bottom_split) { pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2; @@ -655,44 +663,14 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2; } else if (pri_split && top_bottom_split) pipe_ctx->plane_res.scl_data.recout.height /= 2; - else if (pri_split || sec_split) { - /* HMirror XOR Secondary_pipe XOR Rotation_180 */ - bool right_view = (sec_split != plane_state->horizontal_mirror) != - (plane_state->rotation == ROTATION_ANGLE_180); - - if (plane_state->rotation == ROTATION_ANGLE_90 - || plane_state->rotation == ROTATION_ANGLE_270) - /* Secondary_pipe XOR Rotation_270 */ - right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split; - - if (right_view) { - pipe_ctx->plane_res.scl_data.recout.x += - pipe_ctx->plane_res.scl_data.recout.width / 2; - /* Ceil offset pipe */ - pipe_ctx->plane_res.scl_data.recout.width = - (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; - } else { - if (pipe_ctx->plane_res.scl_data.recout.width > 1) - pipe_ctx->plane_res.scl_data.recout.width /= 2; - } - } - /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset) - * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl - * ratio) - */ - recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x) - * stream->dst.width / stream->src.width - - surf_src.x * plane_state->dst_rect.width / surf_src.width - * stream->dst.width / stream->src.width; - recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y) - * stream->dst.height / stream->src.height - - surf_src.y * plane_state->dst_rect.height / surf_src.height - * stream->dst.height / stream->src.height; - - recout_full->width = plane_state->dst_rect.width - * stream->dst.width / stream->src.width; - recout_full->height = plane_state->dst_rect.height - * stream->dst.height / stream->src.height; + else if (sec_split) { + pipe_ctx->plane_res.scl_data.recout.x += + pipe_ctx->plane_res.scl_data.recout.width / 2; + /* Ceil offset pipe */ + pipe_ctx->plane_res.scl_data.recout.width = + (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; + } else if (pri_split) + pipe_ctx->plane_res.scl_data.recout.width /= 2; } static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) @@ -705,9 +683,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) const int out_w = stream->dst.width; const int out_h = stream->dst.height; + /*Swap surf_src height and width since scaling ratios are in recout rotation*/ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) - rect_swap_helper(&surf_src); + swap(surf_src.height, surf_src.width); pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction( surf_src.width, @@ -744,351 +723,202 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.ratios.vert_c, 19); } -static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full) +static inline void adjust_vp_and_init_for_seamless_clip( + bool flip_scan_dir, + int recout_skip, + int src_size, + int taps, + struct fixed31_32 ratio, + struct fixed31_32 *init, + int *vp_offset, + int *vp_size) { - struct scaler_data *data = &pipe_ctx->plane_res.scl_data; - struct rect src = pipe_ctx->plane_state->src_rect; - int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 - || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; - bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; - - /* - * Need to calculate the scan direction for viewport to make adjustments - */ - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { - flip_vert_scan_dir = true; - flip_horz_scan_dir = true; - } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90) - flip_vert_scan_dir = true; - else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) - flip_horz_scan_dir = true; - - if (pipe_ctx->plane_state->horizontal_mirror) - flip_horz_scan_dir = !flip_horz_scan_dir; - - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || - pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { - rect_swap_helper(&src); - rect_swap_helper(&data->viewport_c); - rect_swap_helper(&data->viewport); - } - - /* - * Init calculated according to formula: - * init = (scaling_ratio + number_of_taps + 1) / 2 - * init_bot = init + scaling_ratio - * init_c = init + truncated_vp_c_offset(from calculate viewport) - */ - data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19); - - data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19); - - data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19); - - data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19); - - if (!flip_horz_scan_dir) { + if (!flip_scan_dir) { /* Adjust for viewport end clip-off */ - if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) { - int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x; - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.h, data->ratios.horz)); + if ((*vp_offset + *vp_size) < src_size) { + int vp_clip = src_size - *vp_size - *vp_offset; + int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio)); int_part = int_part > 0 ? int_part : 0; - data->viewport.width += int_part < vp_clip ? int_part : vp_clip; - } - if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) { - int vp_clip = (src.x + src.width) / vpc_div - - data->viewport_c.width - data->viewport_c.x; - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c)); - - int_part = int_part > 0 ? int_part : 0; - data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip; + *vp_size += int_part < vp_clip ? int_part : vp_clip; } /* Adjust for non-0 viewport offset */ - if (data->viewport.x) { + if (*vp_offset) { int int_part; - data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int( - data->ratios.horz, data->recout.x - recout_full->x)); - int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x; - if (int_part < data->taps.h_taps) { - int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ? - (data->taps.h_taps - int_part) : data->viewport.x; - data->viewport.x -= int_adj; - data->viewport.width += int_adj; + *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip)); + int_part = dc_fixpt_floor(*init) - *vp_offset; + if (int_part < taps) { + int int_adj = *vp_offset >= (taps - int_part) ? + (taps - int_part) : *vp_offset; + *vp_offset -= int_adj; + *vp_size += int_adj; int_part += int_adj; - } else if (int_part > data->taps.h_taps) { - data->viewport.x += int_part - data->taps.h_taps; - data->viewport.width -= int_part - data->taps.h_taps; - int_part = data->taps.h_taps; + } else if (int_part > taps) { + *vp_offset += int_part - taps; + *vp_size -= int_part - taps; + int_part = taps; } - data->inits.h.value &= 0xffffffff; - data->inits.h = dc_fixpt_add_int(data->inits.h, int_part); - } - - if (data->viewport_c.x) { - int int_part; - - data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int( - data->ratios.horz_c, data->recout.x - recout_full->x)); - int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x; - if (int_part < data->taps.h_taps_c) { - int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ? - (data->taps.h_taps_c - int_part) : data->viewport_c.x; - data->viewport_c.x -= int_adj; - data->viewport_c.width += int_adj; - int_part += int_adj; - } else if (int_part > data->taps.h_taps_c) { - data->viewport_c.x += int_part - data->taps.h_taps_c; - data->viewport_c.width -= int_part - data->taps.h_taps_c; - int_part = data->taps.h_taps_c; - } - data->inits.h_c.value &= 0xffffffff; - data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part); + init->value &= 0xffffffff; + *init = dc_fixpt_add_int(*init, int_part); } } else { /* Adjust for non-0 viewport offset */ - if (data->viewport.x) { - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.h, data->ratios.horz)); + if (*vp_offset) { + int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio)); int_part = int_part > 0 ? int_part : 0; - data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x; - data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x; - } - if (data->viewport_c.x) { - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c)); - - int_part = int_part > 0 ? int_part : 0; - data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x; - data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x; + *vp_size += int_part < *vp_offset ? int_part : *vp_offset; + *vp_offset -= int_part < *vp_offset ? int_part : *vp_offset; } /* Adjust for viewport end clip-off */ - if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) { - int int_part; - int end_offset = src.x + src.width - - data->viewport.x - data->viewport.width; - - /* - * this is init if vp had no offset, keep in mind this is from the - * right side of vp due to scan direction - */ - data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int( - data->ratios.horz, data->recout.x - recout_full->x)); - /* - * this is the difference between first pixel of viewport available to read - * and init position, takning into account scan direction - */ - int_part = dc_fixpt_floor(data->inits.h) - end_offset; - if (int_part < data->taps.h_taps) { - int int_adj = end_offset >= (data->taps.h_taps - int_part) ? - (data->taps.h_taps - int_part) : end_offset; - data->viewport.width += int_adj; - int_part += int_adj; - } else if (int_part > data->taps.h_taps) { - data->viewport.width += int_part - data->taps.h_taps; - int_part = data->taps.h_taps; - } - data->inits.h.value &= 0xffffffff; - data->inits.h = dc_fixpt_add_int(data->inits.h, int_part); - } - - if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) { + if ((*vp_offset + *vp_size) < src_size) { int int_part; - int end_offset = (src.x + src.width) / vpc_div - - data->viewport_c.x - data->viewport_c.width; + int end_offset = src_size - *vp_offset - *vp_size; /* * this is init if vp had no offset, keep in mind this is from the * right side of vp due to scan direction */ - data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int( - data->ratios.horz_c, data->recout.x - recout_full->x)); + *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip)); /* * this is the difference between first pixel of viewport available to read * and init position, takning into account scan direction */ - int_part = dc_fixpt_floor(data->inits.h_c) - end_offset; - if (int_part < data->taps.h_taps_c) { - int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ? - (data->taps.h_taps_c - int_part) : end_offset; - data->viewport_c.width += int_adj; + int_part = dc_fixpt_floor(*init) - end_offset; + if (int_part < taps) { + int int_adj = end_offset >= (taps - int_part) ? + (taps - int_part) : end_offset; + *vp_size += int_adj; int_part += int_adj; - } else if (int_part > data->taps.h_taps_c) { - data->viewport_c.width += int_part - data->taps.h_taps_c; - int_part = data->taps.h_taps_c; + } else if (int_part > taps) { + *vp_size += int_part - taps; + int_part = taps; } - data->inits.h_c.value &= 0xffffffff; - data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part); + init->value &= 0xffffffff; + *init = dc_fixpt_add_int(*init, int_part); } - } - if (!flip_vert_scan_dir) { - /* Adjust for viewport end clip-off */ - if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) { - int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y; - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.v, data->ratios.vert)); - - int_part = int_part > 0 ? int_part : 0; - data->viewport.height += int_part < vp_clip ? int_part : vp_clip; - } - if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) { - int vp_clip = (src.y + src.height) / vpc_div - - data->viewport_c.height - data->viewport_c.y; - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c)); - - int_part = int_part > 0 ? int_part : 0; - data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip; - } - - /* Adjust for non-0 viewport offset */ - if (data->viewport.y) { - int int_part; - - data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int( - data->ratios.vert, data->recout.y - recout_full->y)); - int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y; - if (int_part < data->taps.v_taps) { - int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ? - (data->taps.v_taps - int_part) : data->viewport.y; - data->viewport.y -= int_adj; - data->viewport.height += int_adj; - int_part += int_adj; - } else if (int_part > data->taps.v_taps) { - data->viewport.y += int_part - data->taps.v_taps; - data->viewport.height -= int_part - data->taps.v_taps; - int_part = data->taps.v_taps; - } - data->inits.v.value &= 0xffffffff; - data->inits.v = dc_fixpt_add_int(data->inits.v, int_part); - } - - if (data->viewport_c.y) { - int int_part; +} - data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int( - data->ratios.vert_c, data->recout.y - recout_full->y)); - int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y; - if (int_part < data->taps.v_taps_c) { - int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ? - (data->taps.v_taps_c - int_part) : data->viewport_c.y; - data->viewport_c.y -= int_adj; - data->viewport_c.height += int_adj; - int_part += int_adj; - } else if (int_part > data->taps.v_taps_c) { - data->viewport_c.y += int_part - data->taps.v_taps_c; - data->viewport_c.height -= int_part - data->taps.v_taps_c; - int_part = data->taps.v_taps_c; - } - data->inits.v_c.value &= 0xffffffff; - data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part); - } - } else { - /* Adjust for non-0 viewport offset */ - if (data->viewport.y) { - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.v, data->ratios.vert)); +static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) +{ + const struct dc_plane_state *plane_state = pipe_ctx->plane_state; + const struct dc_stream_state *stream = pipe_ctx->stream; + struct scaler_data *data = &pipe_ctx->plane_res.scl_data; + struct rect src = pipe_ctx->plane_state->src_rect; + int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v; + int vpc_div = (data->format == PIXEL_FORMAT_420BPP8 + || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1; + bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir; - int_part = int_part > 0 ? int_part : 0; - data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y; - data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y; - } - if (data->viewport_c.y) { - int int_part = dc_fixpt_floor( - dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c)); + /* + * Need to calculate the scan direction for viewport to make adjustments + */ + get_vp_scan_direction( + plane_state->rotation, + plane_state->horizontal_mirror, + &orthogonal_rotation, + &flip_vert_scan_dir, + &flip_horz_scan_dir); + + /* Calculate src rect rotation adjusted to recout space */ + surf_size_h = src.x + src.width; + surf_size_v = src.y + src.height; + if (flip_horz_scan_dir) + src.x = 0; + if (flip_vert_scan_dir) + src.y = 0; + if (orthogonal_rotation) { + swap(src.x, src.y); + swap(src.width, src.height); + } - int_part = int_part > 0 ? int_part : 0; - data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y; - data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y; - } + /* Recout matching initial vp offset = recout_offset - (stream dst offset + + * ((surf dst offset - stream src offset) * 1/ stream scaling ratio) + * - (surf surf_src offset * 1/ full scl ratio)) + */ + recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x) + * stream->dst.width / stream->src.width - + src.x * plane_state->dst_rect.width / src.width + * stream->dst.width / stream->src.width); + recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y) + * stream->dst.height / stream->src.height - + src.y * plane_state->dst_rect.height / src.height + * stream->dst.height / stream->src.height); + if (orthogonal_rotation) + swap(recout_skip_h, recout_skip_v); + /* + * Init calculated according to formula: + * init = (scaling_ratio + number_of_taps + 1) / 2 + * init_bot = init + scaling_ratio + * init_c = init + truncated_vp_c_offset(from calculate viewport) + */ + data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19); - /* Adjust for viewport end clip-off */ - if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) { - int int_part; - int end_offset = src.y + src.height - - data->viewport.y - data->viewport.height; + data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19); - /* - * this is init if vp had no offset, keep in mind this is from the - * right side of vp due to scan direction - */ - data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int( - data->ratios.vert, data->recout.y - recout_full->y)); - /* - * this is the difference between first pixel of viewport available to read - * and init position, taking into account scan direction - */ - int_part = dc_fixpt_floor(data->inits.v) - end_offset; - if (int_part < data->taps.v_taps) { - int int_adj = end_offset >= (data->taps.v_taps - int_part) ? - (data->taps.v_taps - int_part) : end_offset; - data->viewport.height += int_adj; - int_part += int_adj; - } else if (int_part > data->taps.v_taps) { - data->viewport.height += int_part - data->taps.v_taps; - int_part = data->taps.v_taps; - } - data->inits.v.value &= 0xffffffff; - data->inits.v = dc_fixpt_add_int(data->inits.v, int_part); - } + data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19); - if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) { - int int_part; - int end_offset = (src.y + src.height) / vpc_div - - data->viewport_c.y - data->viewport_c.height; + data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19); - /* - * this is init if vp had no offset, keep in mind this is from the - * right side of vp due to scan direction - */ - data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int( - data->ratios.vert_c, data->recout.y - recout_full->y)); - /* - * this is the difference between first pixel of viewport available to read - * and init position, taking into account scan direction - */ - int_part = dc_fixpt_floor(data->inits.v_c) - end_offset; - if (int_part < data->taps.v_taps_c) { - int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ? - (data->taps.v_taps_c - int_part) : end_offset; - data->viewport_c.height += int_adj; - int_part += int_adj; - } else if (int_part > data->taps.v_taps_c) { - data->viewport_c.height += int_part - data->taps.v_taps_c; - int_part = data->taps.v_taps_c; - } - data->inits.v_c.value &= 0xffffffff; - data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part); - } - } + /* + * Taps, inits and scaling ratios are in recout space need to rotate + * to viewport rotation before adjustment + */ + adjust_vp_and_init_for_seamless_clip( + flip_horz_scan_dir, + recout_skip_h, + surf_size_h, + orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps, + orthogonal_rotation ? data->ratios.vert : data->ratios.horz, + orthogonal_rotation ? &data->inits.v : &data->inits.h, + &data->viewport.x, + &data->viewport.width); + adjust_vp_and_init_for_seamless_clip( + flip_horz_scan_dir, + recout_skip_h, + surf_size_h / vpc_div, + orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c, + orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c, + orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c, + &data->viewport_c.x, + &data->viewport_c.width); + adjust_vp_and_init_for_seamless_clip( + flip_vert_scan_dir, + recout_skip_v, + surf_size_v, + orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps, + orthogonal_rotation ? data->ratios.horz : data->ratios.vert, + orthogonal_rotation ? &data->inits.h : &data->inits.v, + &data->viewport.y, + &data->viewport.height); + adjust_vp_and_init_for_seamless_clip( + flip_vert_scan_dir, + recout_skip_v, + surf_size_v / vpc_div, + orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c, + orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c, + orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c, + &data->viewport_c.y, + &data->viewport_c.height); /* Interlaced inits based on final vert inits */ data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert); data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); - if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || - pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { - rect_swap_helper(&data->viewport_c); - rect_swap_helper(&data->viewport); - } } bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; - struct rect recout_full = { 0 }; bool res = false; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Important: scaling ratio calculation requires pixel format, @@ -1105,7 +935,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) return false; - calculate_recout(pipe_ctx, &recout_full); + calculate_recout(pipe_ctx); /** * Setting line buffer pixel depth to 24bpp yields banding @@ -1146,7 +976,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) if (res) /* May need to re-check lb size after this in some obscure scenario */ - calculate_inits_and_adj_vp(pipe_ctx, &recout_full); + calculate_inits_and_adj_vp(pipe_ctx); DC_LOG_SCALER( "%s: Viewport:\nheight:%d width:%d x:%d " @@ -1356,6 +1186,9 @@ bool dc_add_plane_to_context( return false; } + tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream); + ASSERT(tail_pipe); + free_pipe = acquire_free_pipe_for_stream(context, pool, stream); #if defined(CONFIG_DRM_AMD_DC_DCN1_0) @@ -1373,10 +1206,6 @@ bool dc_add_plane_to_context( free_pipe->plane_state = plane_state; if (head_pipe != free_pipe) { - - tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream); - ASSERT(tail_pipe); - free_pipe->stream_res.tg = tail_pipe->stream_res.tg; free_pipe->stream_res.abm = tail_pipe->stream_res.abm; free_pipe->stream_res.opp = tail_pipe->stream_res.opp; @@ -1622,6 +1451,14 @@ static bool are_stream_backends_same( return true; } +/** + * dc_is_stream_unchanged() - Compare two stream states for equivalence. + * + * Checks if there a difference between the two states + * that would require a mode change. + * + * Does not compare cursor position or attributes. + */ bool dc_is_stream_unchanged( struct dc_stream_state *old_stream, struct dc_stream_state *stream) { @@ -1632,6 +1469,9 @@ bool dc_is_stream_unchanged( return true; } +/** + * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams. + */ bool dc_is_stream_scaling_unchanged( struct dc_stream_state *old_stream, struct dc_stream_state *stream) { @@ -1791,16 +1631,19 @@ bool resource_is_stream_unchanged( return false; } +/** + * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state. + */ enum dc_status dc_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream) { - struct dc_context *dc_ctx = dc->ctx; enum dc_status res; + DC_LOGGER_INIT(dc->ctx->logger); if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { - DC_ERROR("Max streams reached, can't add stream %p !\n", stream); + DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); return DC_ERROR_UNEXPECTED; } @@ -1810,11 +1653,14 @@ enum dc_status dc_add_stream_to_ctx( res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream); if (res != DC_OK) - DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res); + DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); return res; } +/** + * dc_remove_stream_from_ctx() - Remove a stream from a dc_state. + */ enum dc_status dc_remove_stream_from_ctx( struct dc *dc, struct dc_state *new_ctx, @@ -1976,6 +1822,8 @@ enum dc_status resource_map_pool_resources( } */ + calculate_phy_pix_clks(stream); + /* acquire new resources */ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); @@ -2033,6 +1881,12 @@ enum dc_status resource_map_pool_resources( return DC_ERROR_UNEXPECTED; } +/** + * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state + * Is a shallow copy. Increments refcounts on existing streams and planes. + * @dc: copy out of dc->current_state + * @dst_ctx: copy into this + */ void dc_resource_state_copy_construct_current( const struct dc *dc, struct dc_state *dst_ctx) @@ -2048,6 +1902,14 @@ void dc_resource_state_construct( dst_ctx->dccg = dc->res_pool->clk_mgr; } +/** + * dc_validate_global_state() - Determine if HW can support a given state + * Checks HW resource availability and bandwidth requirement. + * @dc: dc struct for this driver + * @new_ctx: state to be validated + * + * Return: DC_OK if the result can be programmed. Otherwise, an error code. + */ enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx) @@ -2375,113 +2237,15 @@ static void set_vendor_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { - uint32_t length = 0; - bool hdmi_vic_mode = false; - uint8_t checksum = 0; - uint32_t i = 0; - enum dc_timing_3d_format format; - // Can be different depending on packet content /*todo*/ - // unsigned int length = pPathMode->dolbyVision ? 24 : 5; - - info_packet->valid = false; - - format = stream->timing.timing_3d_format; - if (stream->view_format == VIEW_3D_FORMAT_NONE) - format = TIMING_3D_FORMAT_NONE; - - /* Can be different depending on packet content */ - length = 5; - - if (stream->timing.hdmi_vic != 0 - && stream->timing.h_total >= 3840 - && stream->timing.v_total >= 2160) - hdmi_vic_mode = true; - - /* According to HDMI 1.4a CTS, VSIF should be sent - * for both 3D stereo and HDMI VIC modes. - * For all other modes, there is no VSIF sent. */ + /* SPD info packet for FreeSync */ - if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode) + /* Check if Freesync is supported. Return if false. If true, + * set the corresponding bit in the info packet + */ + if (!stream->vsp_infopacket.valid) return; - /* 24bit IEEE Registration identifier (0x000c03). LSB first. */ - info_packet->sb[1] = 0x03; - info_packet->sb[2] = 0x0C; - info_packet->sb[3] = 0x00; - - /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format. - * The value for HDMI_Video_Format are: - * 0x0 (0b000) - No additional HDMI video format is presented in this - * packet - * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC - * parameter follows - * 0x2 (0b010) - 3D format indication present. 3D_Structure and - * potentially 3D_Ext_Data follows - * 0x3..0x7 (0b011..0b111) - reserved for future use */ - if (format != TIMING_3D_FORMAT_NONE) - info_packet->sb[4] = (2 << 5); - else if (hdmi_vic_mode) - info_packet->sb[4] = (1 << 5); - - /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2): - * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure. - * The value for 3D_Structure are: - * 0x0 - Frame Packing - * 0x1 - Field Alternative - * 0x2 - Line Alternative - * 0x3 - Side-by-Side (full) - * 0x4 - L + depth - * 0x5 - L + depth + graphics + graphics-depth - * 0x6 - Top-and-Bottom - * 0x7 - Reserved for future use - * 0x8 - Side-by-Side (Half) - * 0x9..0xE - Reserved for future use - * 0xF - Not used */ - switch (format) { - case TIMING_3D_FORMAT_HW_FRAME_PACKING: - case TIMING_3D_FORMAT_SW_FRAME_PACKING: - info_packet->sb[5] = (0x0 << 4); - break; - - case TIMING_3D_FORMAT_SIDE_BY_SIDE: - case TIMING_3D_FORMAT_SBS_SW_PACKED: - info_packet->sb[5] = (0x8 << 4); - length = 6; - break; - - case TIMING_3D_FORMAT_TOP_AND_BOTTOM: - case TIMING_3D_FORMAT_TB_SW_PACKED: - info_packet->sb[5] = (0x6 << 4); - break; - - default: - break; - } - - /*PB5: If PB4 is set to 0x1 (extended resolution format) - * fill PB5 with the correct HDMI VIC code */ - if (hdmi_vic_mode) - info_packet->sb[5] = stream->timing.hdmi_vic; - - /* Header */ - info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */ - info_packet->hb1 = 0x01; /* Version */ - - /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */ - info_packet->hb2 = (uint8_t) (length); - - /* Calculate checksum */ - checksum = 0; - checksum += info_packet->hb0; - checksum += info_packet->hb1; - checksum += info_packet->hb2; - - for (i = 1; i <= length; i++) - checksum += info_packet->sb[i]; - - info_packet->sb[0] = (uint8_t) (0x100 - checksum); - - info_packet->valid = true; + *info_packet = stream->vsp_infopacket; } static void set_spd_info_packet( @@ -2537,10 +2301,6 @@ void dc_resource_state_destruct(struct dc_state *context) } } -/* - * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced - * by the src_ctx - */ void dc_resource_state_copy_construct( const struct dc_state *src_ctx, struct dc_state *dst_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index e113439aaa86..66e5c4623a49 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -100,8 +100,6 @@ static void construct(struct dc_stream_state *stream, /* EDID CAP translation for HDMI 2.0 */ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; - stream->status.link = stream->sink->link; - update_stream_signal(stream); stream->out_transfer_func = dc_create_transfer_func(); @@ -172,7 +170,7 @@ struct dc_stream_status *dc_stream_get_status( } /** - * Update the cursor attributes and set cursor surface address + * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address */ bool dc_stream_set_cursor_attributes( struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index d16a20c84792..4b5bbb13ce7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -36,9 +36,10 @@ #include "inc/hw_sequencer.h" #include "inc/compressor.h" +#include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.04" +#define DC_VER "3.2.08" #define MAX_SURFACES 3 #define MAX_STREAMS 6 @@ -47,13 +48,6 @@ /******************************************************************************* * Display Core Interfaces ******************************************************************************/ -struct dmcu_version { - unsigned int date; - unsigned int month; - unsigned int year; - unsigned int interface_version; -}; - struct dc_versions { const char *dc_ver; struct dmcu_version dmcu_version; @@ -748,5 +742,6 @@ void dc_set_power_state( struct dc *dc, enum dc_acpi_cm_power_state power_state); void dc_resume(struct dc *dc); +bool dc_is_dmcu_initialized(struct dc *dc); #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index fcfd50b5dba0..4842d2378bbf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -234,14 +234,14 @@ uint32_t generic_reg_wait(const struct dc_context *ctx, if (field_value == condition_value) { if (i * delay_between_poll_us > 1000 && !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) - dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n", + DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); return reg_val; } } - dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n", + DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", delay_between_poll_us, time_out_num_tries, func_name, line); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 7825e4b5e97c..e72fce4eca65 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -192,7 +192,6 @@ enum surface_pixel_format { /*swaped & float*/ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F, /*grow graphics here if necessary */ - SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr = SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, @@ -200,6 +199,7 @@ enum surface_pixel_format { SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr, SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb, SURFACE_PIXEL_FORMAT_SUBSAMPLE_END, + SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, SURFACE_PIXEL_FORMAT_INVALID /*grow 444 video here if necessary */ @@ -358,15 +358,16 @@ union dc_tiling_info { } gfx8; struct { + enum swizzle_mode_values swizzle; unsigned int num_pipes; - unsigned int num_banks; + unsigned int max_compressed_frags; unsigned int pipe_interleave; + + unsigned int num_banks; unsigned int num_shader_engines; unsigned int num_rb_per_se; - unsigned int max_compressed_frags; bool shaderEnable; - enum swizzle_mode_values swizzle; bool meta_linear; bool rb_aligned; bool pipe_aligned; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 8738f27a8708..29f19d57ff7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -128,8 +128,10 @@ struct dc_link { const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); -/* - * Return an enumerated dc_link. dc_link order is constant and determined at +/** + * dc_get_link_at_index() - Return an enumerated dc_link. + * + * dc_link order is constant and determined at * boot time. They cannot be created or destroyed. * Use dc_get_caps() to get number of links. */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index c5bd1fbb6982..be34d638e15d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -56,6 +56,7 @@ struct dc_stream_state { struct dc_crtc_timing_adjust adjust; struct dc_info_packet vrr_infopacket; struct dc_info_packet vsc_infopacket; + struct dc_info_packet vsp_infopacket; struct rect src; /* composition area */ struct rect dst; /* stream addressable area */ @@ -104,8 +105,6 @@ struct dc_stream_state { bool dpms_off; bool apply_edp_fast_boot_optimization; - struct dc_stream_status status; - struct dc_cursor_attributes cursor_attributes; struct dc_cursor_position cursor_position; uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode @@ -131,11 +130,13 @@ struct dc_stream_update { struct dc_crtc_timing_adjust *adjust; struct dc_info_packet *vrr_infopacket; struct dc_info_packet *vsc_infopacket; + struct dc_info_packet *vsp_infopacket; bool *dpms_off; struct colorspace_transform *gamut_remap; enum dc_color_space *output_color_space; + enum dc_dither_option *dither_option; struct dc_csc_transform *output_csc_transform; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 6e12d640d020..0b20ae23f169 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -73,10 +73,18 @@ struct hw_asic_id { void *atombios_base_address; }; +struct dc_perf_trace { + unsigned long read_count; + unsigned long write_count; + unsigned long last_entry_read; + unsigned long last_entry_write; +}; + struct dc_context { struct dc *dc; void *driver_context; /* e.g. amdgpu_device */ + struct dc_perf_trace *perf_trace; void *cgs_device; enum dce_environment dce_environment; @@ -191,7 +199,6 @@ union display_content_support { }; struct dc_panel_patch { - unsigned int disconnect_delay; unsigned int dppowerup_delay; unsigned int extra_t12_ms; }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index 9a28a04417d1..afd287f08bc9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -94,7 +94,7 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = { /*ClocksStatePerformance*/ { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; -static int dentist_get_divider_from_did(int did) +int dentist_get_divider_from_did(int did) { if (did < DENTIST_BASE_DID_1) did = DENTIST_BASE_DID_1; @@ -277,7 +277,8 @@ static int dce_set_clock( if (requested_clk_khz == 0) clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; - dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) + dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); return actual_clock; } @@ -324,9 +325,11 @@ int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) bp->funcs->set_dce_clock(bp, &dce_clk_params); if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { - if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) - dmcu->funcs->set_psr_wait_loop(dmcu, - actual_clock / 1000 / 7); + if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { + if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) + dmcu->funcs->set_psr_wait_loop(dmcu, + actual_clock / 1000 / 7); + } } clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; @@ -588,6 +591,8 @@ static void dce11_pplib_apply_display_requirements( dc, context->bw.dce.sclk_khz); + pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; + pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dce.sclk_deep_sleep_khz; @@ -671,6 +676,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; + int unpatched_disp_clk = context->bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ @@ -685,6 +695,8 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr, clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); + + context->bw.dce.dispclk_khz = unpatched_disp_clk; } static void dce12_update_clocks(struct clk_mgr *clk_mgr, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h index 046077797416..3bceb31d910d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h @@ -165,4 +165,6 @@ struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx); void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); +int dentist_get_divider_from_did(int did); + #endif /* _DCE_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index c47c81883d3c..cce0d18f91da 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -908,7 +908,6 @@ static void dce110_stream_encoder_dp_blank( struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - uint32_t retries = 0; uint32_t reg1 = 0; uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; @@ -926,30 +925,28 @@ static void dce110_stream_encoder_dp_blank( * (2 = start of the next vertical blank) */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); /* Larger delay to wait until VBLANK - use max retry of - * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode + - * a little more because we may not trust delay accuracy. - */ + * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode + + * a little more because we may not trust delay accuracy. + */ max_retries = DP_BLANK_MAX_RETRY * 150; /* disable DP stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); /* the encoder stops sending the video stream - * at the start of the vertical blanking. - * Poll for DP_VID_STREAM_STATUS == 0 - */ + * at the start of the vertical blanking. + * Poll for DP_VID_STREAM_STATUS == 0 + */ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, max_retries); - ASSERT(retries <= max_retries); - /* Tell the DP encoder to ignore timing from CRTC, must be done after - * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is - * complete, stream status will be stuck in video stream enabled state, - * i.e. DP_VID_STREAM_STATUS stuck at 1. - */ + * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is + * complete, stream status will be stuck in video stream enabled state, + * i.e. DP_VID_STREAM_STATUS stuck at 1. + */ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index bc50a8e25f4f..87771676acac 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c @@ -117,6 +117,18 @@ void dce100_prepare_bandwidth( false); } +void dce100_optimize_bandwidth( + struct dc *dc, + struct dc_state *context) +{ + dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); + + dc->res_pool->clk_mgr->funcs->update_clocks( + dc->res_pool->clk_mgr, + context, + true); +} + /**************************************************************************/ void dce100_hw_sequencer_construct(struct dc *dc) @@ -125,6 +137,6 @@ void dce100_hw_sequencer_construct(struct dc *dc) dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; - dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; + dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c index 1f7f25013217..52d50e24a995 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c @@ -64,65 +64,37 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = { static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600; -enum fbc_idle_force { - /* Bit 0 - Display registers updated */ - FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001, - - /* Bit 2 - FBC_GRPH_COMP_EN register updated */ - FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002, - /* Bit 3 - FBC_SRC_SEL register updated */ - FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004, - /* Bit 4 - FBC_MIN_COMPRESSION register updated */ - FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008, - /* Bit 5 - FBC_ALPHA_COMP_EN register updated */ - FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010, - /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */ - FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020, - /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */ - FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040, - - /* Bit 24 - Memory write to region 0 defined by MC registers. */ - FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000, - /* Bit 25 - Memory write to region 1 defined by MC registers */ - FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000, - /* Bit 26 - Memory write to region 2 defined by MC registers */ - FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000, - /* Bit 27 - Memory write to region 3 defined by MC registers. */ - FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000, - - /* Bit 28 - Memory write from any client other than MCIF */ - FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000, - /* Bit 29 - CG statics screen signal is inactive */ - FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000, -}; - - static uint32_t align_to_chunks_number_per_line(uint32_t pixels) { return 256 * ((pixels + 255) / 256); } -static void reset_lb_on_vblank(struct dc_context *ctx) +static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst) { - uint32_t value, frame_count; + uint32_t value; + uint32_t frame_count; + uint32_t status_pos; uint32_t retry = 0; - uint32_t status_pos = - dm_read_reg(ctx, mmCRTC_STATUS_POSITION); + struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); + + cp110->offsets = reg_offsets[crtc_inst]; + + status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION)); /* Only if CRTC is enabled and counter is moving we wait for one frame. */ - if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) { + if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) { /* Resetting LB on VBlank */ - value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL); + value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL)); set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL); set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2); - dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value); + dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value); - frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT); + frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)); for (retry = 10000; retry > 0; retry--) { - if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT)) + if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT))) break; udelay(10); } @@ -130,13 +102,11 @@ static void reset_lb_on_vblank(struct dc_context *ctx) dm_error("Frame count did not increase for 100ms.\n"); /* Resetting LB on VBlank */ - value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL); + value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL)); set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL); set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2); - dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value); - + dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value); } - } static void wait_for_fbc_state_changed( @@ -226,10 +196,10 @@ void dce110_compressor_enable_fbc( uint32_t addr; uint32_t value, misc_value; - addr = mmFBC_CNTL; value = dm_read_reg(compressor->ctx, addr); set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN); + /* params->inst is valid HW CRTC instance start from 0 */ set_reg_field_value( value, params->inst, @@ -238,8 +208,10 @@ void dce110_compressor_enable_fbc( /* Keep track of enum controller_id FBC is attached to */ compressor->is_enabled = true; - compressor->attached_inst = params->inst; - cp110->offsets = reg_offsets[params->inst]; + /* attached_inst is SW CRTC instance start from 1 + * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc + */ + compressor->attached_inst = params->inst + CONTROLLER_ID_D0; /* Toggle it as there is bug in HW */ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN); @@ -268,9 +240,10 @@ void dce110_compressor_enable_fbc( void dce110_compressor_disable_fbc(struct compressor *compressor) { struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); + uint32_t crtc_inst = 0; if (compressor->options.bits.FBC_SUPPORT) { - if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) { + if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) { uint32_t reg_data; /* Turn off compression */ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL); @@ -284,8 +257,10 @@ void dce110_compressor_disable_fbc(struct compressor *compressor) wait_for_fbc_state_changed(cp110, false); } - /* Sync line buffer - dce100/110 only*/ - reset_lb_on_vblank(compressor->ctx); + /* Sync line buffer which fbc was attached to dce100/110 only */ + if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3) + reset_lb_on_vblank(compressor, + crtc_inst - CONTROLLER_ID_D0); } } @@ -328,6 +303,8 @@ void dce110_compressor_program_compressed_surface_address_and_pitch( uint32_t compressed_surf_address_low_part = compressor->compr_surface_address.addr.low_part; + cp110->offsets = reg_offsets[params->inst]; + /* Clear content first. */ dm_write_reg( compressor->ctx, @@ -410,13 +387,7 @@ void dce110_compressor_set_fbc_invalidation_triggers( value = dm_read_reg(compressor->ctx, addr); set_reg_field_value( value, - fbc_trigger | - FBC_IDLE_FORCE_GRPH_COMP_EN | - FBC_IDLE_FORCE_SRC_SEL_CHANGE | - FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE | - FBC_IDLE_FORCE_ALPHA_COMP_EN | - FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN | - FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF, + fbc_trigger, FBC_IDLE_FORCE_CLEAR_MASK, FBC_IDLE_FORCE_CLEAR_MASK); dm_write_reg(compressor->ctx, addr, value); @@ -549,7 +520,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor, compressor->base.channel_interleave_size = 0; compressor->base.dram_channels_num = 0; compressor->base.lpt_channels_num = 0; - compressor->base.attached_inst = 0; + compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED; compressor->base.is_enabled = false; compressor->base.funcs = &dce110_compressor_funcs; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 9724a17e352b..4bf24758217f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1267,10 +1267,19 @@ static void program_scaler(const struct dc *dc, pipe_ctx->plane_res.scl_data.lb_params.depth, &pipe_ctx->stream->bit_depth_params); - if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) + if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) { + /* + * The way 420 is packed, 2 channels carry Y component, 1 channel + * alternate between Cb and Cr, so both channels need the pixel + * value for Y + */ + if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + color.color_r_cr = color.color_g_y; + pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color( pipe_ctx->stream_res.tg, &color); + } pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data); @@ -1766,12 +1775,13 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx, * Check if FBC can be enabled */ static bool should_enable_fbc(struct dc *dc, - struct dc_state *context, - uint32_t *pipe_idx) + struct dc_state *context, + uint32_t *pipe_idx) { uint32_t i; struct pipe_ctx *pipe_ctx = NULL; struct resource_context *res_ctx = &context->res_ctx; + unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; ASSERT(dc->fbc_compressor); @@ -1786,14 +1796,28 @@ static bool should_enable_fbc(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { if (res_ctx->pipe_ctx[i].stream) { + pipe_ctx = &res_ctx->pipe_ctx[i]; - *pipe_idx = i; - break; + + if (!pipe_ctx) + continue; + + /* fbc not applicable on underlay pipe */ + if (pipe_ctx->pipe_idx != underlay_idx) { + *pipe_idx = i; + break; + } } } - /* Pipe context should be found */ - ASSERT(pipe_ctx); + if (i == dc->res_pool->pipe_count) + return false; + + if (!pipe_ctx->stream->sink) + return false; + + if (!pipe_ctx->stream->sink->link) + return false; /* Only supports eDP */ if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) @@ -1817,8 +1841,9 @@ static bool should_enable_fbc(struct dc *dc, /* * Enable FBC */ -static void enable_fbc(struct dc *dc, - struct dc_state *context) +static void enable_fbc( + struct dc *dc, + struct dc_state *context) { uint32_t pipe_idx = 0; @@ -1828,10 +1853,9 @@ static void enable_fbc(struct dc *dc, struct compressor *compr = dc->fbc_compressor; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; - params.source_view_width = pipe_ctx->stream->timing.h_addressable; params.source_view_height = pipe_ctx->stream->timing.v_addressable; - + params.inst = pipe_ctx->stream_res.tg->inst; compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr; compr->funcs->surface_address_and_pitch(compr, ¶ms); @@ -2046,10 +2070,10 @@ enum dc_status dce110_apply_ctx_to_hw( return status; } - dcb->funcs->set_scratch_critical_state(dcb, false); - if (dc->fbc_compressor) - enable_fbc(dc, context); + enable_fbc(dc, dc->current_state); + + dcb->funcs->set_scratch_critical_state(dcb, false); return DC_OK; } @@ -2282,7 +2306,7 @@ static void dce110_enable_per_frame_crtc_position_reset( int i; gsl_params.gsl_group = 0; - gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst; + gsl_params.gsl_master = 0; for (i = 0; i < group_size; i++) grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock( @@ -2408,7 +2432,6 @@ static void dce110_program_front_end_for_pipe( struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; - unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; unsigned int i; DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); @@ -2449,15 +2472,6 @@ static void dce110_program_front_end_for_pipe( program_scaler(dc, pipe_ctx); - /* fbc not applicable on Underlay pipe */ - if (dc->fbc_compressor && old_pipe->stream && - pipe_ctx->pipe_idx != underlay_idx) { - if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) - dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); - else - enable_fbc(dc, dc->current_state); - } - mi->funcs->mem_input_program_surface_config( mi, plane_state->format, @@ -2534,6 +2548,9 @@ static void dce110_apply_ctx_for_surface( if (num_planes == 0) return; + if (dc->fbc_compressor) + dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -2576,6 +2593,9 @@ static void dce110_apply_ctx_for_surface( (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) dc->hwss.pipe_control_lock(dc, pipe_ctx, false); } + + if (dc->fbc_compressor) + enable_fbc(dc, dc->current_state); } static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 6d40b3d54ac1..cdd1d6b7b9f2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -41,7 +41,6 @@ #include "dce/dce_mem_input.h" #include "dce/dce_link_encoder.h" #include "dce/dce_stream_encoder.h" -#include "dce/dce_mem_input.h" #include "dce/dce_ipp.h" #include "dce/dce_transform.h" #include "dce/dce_opp.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c index 20f531d27e2b..54abedbf1b43 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c @@ -223,7 +223,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, &dc->res_pool->pp_smu_req; struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; - struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; + uint32_t requested_dcf_clock_in_khz = 0; bool send_request_to_increase = false; bool send_request_to_lower = false; int display_count; @@ -263,8 +263,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, // F Clock if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; - clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz); @@ -293,10 +291,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, */ if (send_request_to_increase) { /*use dcfclk to request voltage*/ - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; - clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); + requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); - notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); + notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); if (pp_smu->set_display_requirement) pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); @@ -317,10 +314,9 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, if (!send_request_to_increase && send_request_to_lower) { /*use dcfclk to request voltage*/ - clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; - clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); + requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); - notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); + notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); if (pp_smu->set_display_requirement) pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); @@ -332,12 +328,10 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, *smu_req_cur = smu_req; } - static const struct clk_mgr_funcs dcn1_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = dcn1_update_clocks }; - struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx) { struct dc_debug_options *debug = &ctx->dc->debug; @@ -377,3 +371,5 @@ struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx) return &clk_mgr_dce->base; } + + diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h index 9dbaf6578006..a995eda443a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h @@ -28,6 +28,12 @@ #include "../dce/dce_clk_mgr.h" +struct clk_bypass { + uint32_t dcfclk_bypass; + uint32_t dispclk_pypass; + uint32_t dprefclk_bypass; +}; + void dcn1_pplib_apply_display_requirements( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 3eea44092a04..7469333a2c8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -324,7 +324,7 @@ bool cm_helper_translate_curve_to_hw_format( if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) return false; - PERF_TRACE(); + PERF_TRACE_CTX(output_tf->ctx); corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; @@ -513,7 +513,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS) return false; - PERF_TRACE(); + PERF_TRACE_CTX(output_tf->ctx); corner_points = lut_params->corner_points; rgb_resulted = lut_params->rgb_resulted; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 4254e7e1a509..c7d1e678ebf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -100,7 +100,7 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub) REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable); - return true ? false : enable; + return enable ? true : false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 74132a1f3046..345af015d061 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -99,6 +99,14 @@ static unsigned int hubp1_get_underflow_status(struct hubp *hubp) return hubp_underflow; } + +void hubp1_clear_underflow(struct hubp *hubp) +{ + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + + REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1); +} + static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); @@ -565,19 +573,6 @@ void hubp1_program_deadline( REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler, DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler); - if (REG(PREFETCH_SETTINS)) - REG_SET_2(PREFETCH_SETTINS, 0, - DST_Y_PREFETCH, dlg_attr->dst_y_prefetch, - VRATIO_PREFETCH, dlg_attr->vratio_prefetch); - else - REG_SET_2(PREFETCH_SETTINGS, 0, - DST_Y_PREFETCH, dlg_attr->dst_y_prefetch, - VRATIO_PREFETCH, dlg_attr->vratio_prefetch); - - REG_SET_2(VBLANK_PARAMETERS_0, 0, - DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank, - DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank); - REG_SET(REF_FREQ_TO_PIX_FREQ, 0, REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq); @@ -585,9 +580,6 @@ void hubp1_program_deadline( REG_SET(VBLANK_PARAMETERS_1, 0, REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l); - REG_SET(VBLANK_PARAMETERS_3, 0, - REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l); - if (REG(NOM_PARAMETERS_0)) REG_SET(NOM_PARAMETERS_0, 0, DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l); @@ -602,27 +594,13 @@ void hubp1_program_deadline( REG_SET(NOM_PARAMETERS_5, 0, REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l); - REG_SET_2(PER_LINE_DELIVERY_PRE, 0, - REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l, - REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c); - REG_SET_2(PER_LINE_DELIVERY, 0, REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l, REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c); - if (REG(PREFETCH_SETTINS_C)) - REG_SET(PREFETCH_SETTINS_C, 0, - VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c); - else - REG_SET(PREFETCH_SETTINGS_C, 0, - VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c); - REG_SET(VBLANK_PARAMETERS_2, 0, REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c); - REG_SET(VBLANK_PARAMETERS_4, 0, - REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c); - if (REG(NOM_PARAMETERS_2)) REG_SET(NOM_PARAMETERS_2, 0, DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c); @@ -642,10 +620,6 @@ void hubp1_program_deadline( QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm, QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm); - REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0, - MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank, - QoS_LEVEL_FLIP, ttu_attr->qos_level_flip); - /* TTU - per luma/chroma */ /* Assumed surf0 is luma and 1 is chroma */ @@ -654,25 +628,15 @@ void hubp1_program_deadline( QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l, QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l); - REG_SET(DCN_SURF0_TTU_CNTL1, 0, - REFCYC_PER_REQ_DELIVERY_PRE, - ttu_attr->refcyc_per_req_delivery_pre_l); - REG_SET_3(DCN_SURF1_TTU_CNTL0, 0, REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c, QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c, QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c); - REG_SET(DCN_SURF1_TTU_CNTL1, 0, - REFCYC_PER_REQ_DELIVERY_PRE, - ttu_attr->refcyc_per_req_delivery_pre_c); - REG_SET_3(DCN_CUR0_TTU_CNTL0, 0, REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0, QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0, QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0); - REG_SET(DCN_CUR0_TTU_CNTL1, 0, - REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0); } static void hubp1_setup( @@ -690,6 +654,48 @@ static void hubp1_setup( hubp1_vready_workaround(hubp, pipe_dest); } +static void hubp1_setup_interdependent( + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *ttu_attr) +{ + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + + REG_SET_2(PREFETCH_SETTINS, 0, + DST_Y_PREFETCH, dlg_attr->dst_y_prefetch, + VRATIO_PREFETCH, dlg_attr->vratio_prefetch); + + REG_SET(PREFETCH_SETTINS_C, 0, + VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c); + + REG_SET_2(VBLANK_PARAMETERS_0, 0, + DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank, + DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank); + + REG_SET(VBLANK_PARAMETERS_3, 0, + REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l); + + REG_SET(VBLANK_PARAMETERS_4, 0, + REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c); + + REG_SET_2(PER_LINE_DELIVERY_PRE, 0, + REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c); + + REG_SET(DCN_SURF0_TTU_CNTL1, 0, + REFCYC_PER_REQ_DELIVERY_PRE, + ttu_attr->refcyc_per_req_delivery_pre_l); + REG_SET(DCN_SURF1_TTU_CNTL1, 0, + REFCYC_PER_REQ_DELIVERY_PRE, + ttu_attr->refcyc_per_req_delivery_pre_c); + REG_SET(DCN_CUR0_TTU_CNTL1, 0, + REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0); + + REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0, + MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank, + QoS_LEVEL_FLIP, ttu_attr->qos_level_flip); +} + bool hubp1_is_flip_pending(struct hubp *hubp) { uint32_t flip_pending = 0; @@ -1178,6 +1184,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { hubp1_program_surface_config, .hubp_is_flip_pending = hubp1_is_flip_pending, .hubp_setup = hubp1_setup, + .hubp_setup_interdependent = hubp1_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings, .hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings, .set_blank = hubp1_set_blank, @@ -1190,6 +1197,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = { .hubp_clk_cntl = hubp1_clk_cntl, .hubp_vtg_sel = hubp1_vtg_sel, .hubp_read_state = hubp1_read_state, + .hubp_clear_underflow = hubp1_clear_underflow, .hubp_disable_control = hubp1_disable_control, .hubp_get_underflow_status = hubp1_get_underflow_status, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 4890273b632b..62d4232e7796 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -251,6 +251,7 @@ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\ @@ -435,6 +436,7 @@ type HUBP_NO_OUTSTANDING_REQ;\ type HUBP_VTG_SEL;\ type HUBP_UNDERFLOW_STATUS;\ + type HUBP_UNDERFLOW_CLEAR;\ type NUM_PIPES;\ type NUM_BANKS;\ type PIPE_INTERLEAVE;\ @@ -739,6 +741,7 @@ void dcn10_hubp_construct( const struct dcn_mi_mask *hubp_mask); void hubp1_read_state(struct hubp *hubp); +void hubp1_clear_underflow(struct hubp *hubp); enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 87495dea45ec..91e015e14355 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1227,7 +1227,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, tf = plane_state->in_transfer_func; if (plane_state->gamma_correction && - !plane_state->gamma_correction->is_identity + !dpp_base->ctx->dc->debug.always_use_regamma + && !plane_state->gamma_correction->is_identity && dce_use_lut(plane_state->format)) dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); @@ -1400,7 +1401,7 @@ static void dcn10_enable_per_frame_crtc_position_reset( if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset) grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset( grouped_pipes[i]->stream_res.tg, - grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst, + 0, &grouped_pipes[i]->stream->triggered_crtc_reset); DC_SYNC_INFO("Waiting for trigger\n"); @@ -1770,7 +1771,7 @@ bool is_rgb_cspace(enum dc_color_space output_color_space) } } -static void dcn10_get_surface_visual_confirm_color( +void dcn10_get_surface_visual_confirm_color( const struct pipe_ctx *pipe_ctx, struct tg_color *color) { @@ -1806,7 +1807,7 @@ static void dcn10_get_surface_visual_confirm_color( } } -static void dcn10_get_hdr_visual_confirm_color( +void dcn10_get_hdr_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) { @@ -2067,6 +2068,10 @@ void update_dchubp_dpp( &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param); + hubp->funcs->hubp_setup_interdependent( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs); } size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport; @@ -2154,6 +2159,15 @@ static void dcn10_blank_pixel_data( color_space = stream->output_color_space; color_space_to_black_color(dc, color_space, &black_color); + /* + * The way 420 is packed, 2 channels carry Y component, 1 channel + * alternate between Cb and Cr, so both channels need the pixel + * value for Y + */ + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + black_color.color_r_cr = black_color.color_g_y; + + if (stream_res->tg->funcs->set_blank_color) stream_res->tg->funcs->set_blank_color( stream_res->tg, @@ -2337,6 +2351,34 @@ static void dcn10_apply_ctx_for_surface( dcn10_pipe_control_lock(dc, top_pipe_to_program, false); + if (top_pipe_to_program->plane_state && + top_pipe_to_program->plane_state->update_flags.bits.full_update) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + /* Skip inactive pipes and ones already updated */ + if (!pipe_ctx->stream || pipe_ctx->stream == stream + || !pipe_ctx->plane_state) + continue; + + pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); + + pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( + pipe_ctx->plane_res.hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx->stream || pipe_ctx->stream == stream + || !pipe_ctx->plane_state) + continue; + + dcn10_pipe_control_lock(dc, pipe_ctx, false); + } + if (num_planes == 0) false_optc_underflow_wa(dc, stream, tg); @@ -2710,6 +2752,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .set_avmute = dce110_set_avmute, .log_hw_state = dcn10_log_hw_state, .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .edp_backlight_control = hwss_edp_backlight_control, .edp_power_control = hwss_edp_power_control, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 5e5610c9e600..f8eea10e4c64 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -51,6 +51,8 @@ void dcn10_get_hw_state( char *pBuf, unsigned int bufSize, unsigned int mask); +void dcn10_clear_status_bits(struct dc *dc, unsigned int mask); + bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); @@ -61,6 +63,14 @@ void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); +void dcn10_get_surface_visual_confirm_color( + const struct pipe_ctx *pipe_ctx, + struct tg_color *color); + +void dcn10_get_hdr_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); + void update_dchubp_dpp( struct dc *dc, struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 64158900730f..cd469014baa3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -44,6 +44,7 @@ #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "dcn10_cm_common.h" +#include "dcn10_clk_mgr.h" static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) { @@ -454,12 +455,6 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int remaining_buffer -= chars_printed; pBuf += chars_printed; - - // Clear underflow for debug purposes - // We want to keep underflow sticky bit on for the longevity tests outside of test environment. - // This function is called only from Windows or Diags test environment, hence it's safe to clear - // it from here without affecting the original intent. - tg->funcs->clear_optc_underflow(tg); } } @@ -469,19 +464,75 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize) { unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; - chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz," - "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n" - "%d,%d,%d,%d,%d,%d,%d\n", + chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk," + "dppclk,fclk,socclk\n" + "%d,%d,%d,%d,%d,%d\n", dc->current_state->bw.dcn.clk.dcfclk_khz, dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz, dc->current_state->bw.dcn.clk.dispclk_khz, dc->current_state->bw.dcn.clk.dppclk_khz, - dc->current_state->bw.dcn.clk.max_supported_dppclk_khz, dc->current_state->bw.dcn.clk.fclk_khz, dc->current_state->bw.dcn.clk.socclk_khz); - return chars_printed; + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + return bufSize - remaining_buffer; +} + +static void dcn10_clear_otpc_underflow(struct dc *dc) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + for (i = 0; i < pool->timing_generator_count; i++) { + struct timing_generator *tg = pool->timing_generators[i]; + struct dcn_otg_state s = {0}; + + optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); + + if (s.otg_enabled & 1) + tg->funcs->clear_optc_underflow(tg); + } +} + +static void dcn10_clear_hubp_underflow(struct dc *dc) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + struct hubp *hubp = pool->hubps[i]; + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); + + hubp->funcs->hubp_read_state(hubp); + + if (!s->blank_en) + hubp->funcs->hubp_clear_underflow(hubp); + } +} + +void dcn10_clear_status_bits(struct dc *dc, unsigned int mask) +{ + /* + * Mask Format + * Bit 0 - 31: Status bit to clear + * + * Mask = 0x0 means clear all status bits + */ + const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW = 0x1; + const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW = 0x2; + + if (mask == 0x0) + mask = 0xFFFFFFFF; + + if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW) + dcn10_clear_hubp_underflow(dc); + + if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW) + dcn10_clear_otpc_underflow(dc); } void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask) @@ -491,16 +542,16 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne * Bit 0 - 15: Hardware block mask * Bit 15: 1 = Invariant Only, 0 = All */ - const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1; - const unsigned int DC_HW_STATE_MASK_HUBP = 0x2; - const unsigned int DC_HW_STATE_MASK_RQ = 0x4; - const unsigned int DC_HW_STATE_MASK_DLG = 0x8; - const unsigned int DC_HW_STATE_MASK_TTU = 0x10; - const unsigned int DC_HW_STATE_MASK_CM = 0x20; - const unsigned int DC_HW_STATE_MASK_MPCC = 0x40; - const unsigned int DC_HW_STATE_MASK_OTG = 0x80; - const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100; - const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000; + const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1; + const unsigned int DC_HW_STATE_MASK_HUBP = 0x2; + const unsigned int DC_HW_STATE_MASK_RQ = 0x4; + const unsigned int DC_HW_STATE_MASK_DLG = 0x8; + const unsigned int DC_HW_STATE_MASK_TTU = 0x10; + const unsigned int DC_HW_STATE_MASK_CM = 0x20; + const unsigned int DC_HW_STATE_MASK_MPCC = 0x40; + const unsigned int DC_HW_STATE_MASK_OTG = 0x80; + const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100; + const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000; unsigned int chars_printed = 0; unsigned int remaining_buf_size = bufSize; @@ -556,6 +607,9 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne remaining_buf_size -= chars_printed; } - if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) + if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) { chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 7d1f66797cb3..7c138615f17d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -335,9 +335,8 @@ void optc1_program_timing( /* Enable stereo - only when we need to pack 3D frame. Other types * of stereo handled in explicit call */ - h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ? - 1 : 0; + h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing); REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, h_div_2); @@ -360,20 +359,19 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab static void optc1_unblank_crtc(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t vertical_interrupt_enable = 0; - - REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL, - OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable); - - /* temporary work around for vertical interrupt, once vertical interrupt enabled, - * this check will be removed. - */ - if (vertical_interrupt_enable) - optc1_set_blank_data_double_buffer(optc, true); REG_UPDATE_2(OTG_BLANK_CONTROL, OTG_BLANK_DATA_EN, 0, OTG_BLANK_DE_MODE, 0); + + /* W/A for automated testing + * Automated testing will fail underflow test as there + * sporadic underflows which occur during the optc blank + * sequence. As a w/a, clear underflow on unblank. + * This prevents the failure, but will not mask actual + * underflow that affect real use cases. + */ + optc1_clear_optc_underflow(optc); } /** @@ -1422,3 +1420,9 @@ void dcn10_timing_generator_init(struct optc *optc1) optc1->min_h_sync_width = 8; optc1->min_v_sync_width = 1; } + +bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index c1b114209fe8..8bacf0b6e27e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -565,4 +565,6 @@ bool optc1_configure_crc(struct timing_generator *optc, bool optc1_get_crc(struct timing_generator *optc, uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb); +bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); + #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 47dbe4bb294a..5d4772dec0ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -202,7 +202,6 @@ enum dcn10_clk_src_array_id { #define MMHUB_SR(reg_name)\ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ mm ## reg_name - /* macros to expend register list macro defined in HW object header file * end *********************/ @@ -436,7 +435,6 @@ static const struct dcn_optc_mask tg_mask = { TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; - static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_0), NBIO_SR(BIOS_SCRATCH_3), @@ -497,7 +495,6 @@ static const struct dce110_clk_src_mask cs_mask = { CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) }; - static const struct resource_caps res_cap = { .num_timing_generator = 4, .num_opp = 4, @@ -1277,7 +1274,6 @@ static bool construct( goto fail; } } - pool->base.clk_mgr = dcn1_clk_mgr_create(ctx); if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 6f9078f3c4d3..b8b5525a389a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -766,7 +766,6 @@ void enc1_stream_encoder_dp_blank( struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - uint32_t retries = 0; uint32_t reg1 = 0; uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; @@ -803,8 +802,6 @@ void enc1_stream_encoder_dp_blank( 0, 10, max_retries); - ASSERT(retries <= max_retries); - /* Tell the DP encoder to ignore timing from CRTC, must be done after * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is * complete, stream status will be stuck in video stream enabled state, diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h index 34a701ca879e..65663f4d93e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h +++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h @@ -33,6 +33,7 @@ #define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data) #define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data) +#define EVENT_LOG_CUST_MSG(tag, a, ...) #endif diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index beb08fd12b1d..0029a39efb1c 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -102,7 +102,7 @@ struct pp_smu_funcs_rv { */ void (*set_display_count)(struct pp_smu *pp, int count); - /* which SMU message? are reader and writer WM separate SMU msg? */ + /* reader and writer WM's are sent together as part of one table*/ /* * PPSMC_MSG_SetDriverDramAddrHigh * PPSMC_MSG_SetDriverDramAddrLow diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 28128c02de00..1961cc6d9143 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -31,6 +31,8 @@ #define __DM_SERVICES_H__ +#include "amdgpu_dm_trace.h" + /* TODO: remove when DC is complete. */ #include "dm_services_types.h" #include "logger_interface.h" @@ -70,6 +72,7 @@ static inline uint32_t dm_read_reg_func( } #endif value = cgs_read_register(ctx->cgs_device, address); + trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); return value; } @@ -90,6 +93,7 @@ static inline void dm_write_reg_func( } #endif cgs_write_register(ctx->cgs_device, address, value); + trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); } static inline uint32_t dm_read_index_reg( @@ -351,8 +355,12 @@ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx, /* * performance tracing */ -void dm_perf_trace_timestamp(const char *func_name, unsigned int line); -#define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__) +#define PERF_TRACE() trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\ + CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\ + &CTX->perf_trace->last_entry_write, __func__, __LINE__) +#define PERF_TRACE_CTX(__CTX) trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\ + __CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\ + &__CTX->perf_trace->last_entry_write, __func__, __LINE__) /* diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index f20161c5706d..dada04296025 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create( struct dc_context *ctx) { struct gpio_service *service; - uint32_t index_of_id; service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL); @@ -78,44 +77,33 @@ struct gpio_service *dal_gpio_service_create( goto failure_1; } - /* allocate and initialize business storage */ + /* allocate and initialize busyness storage */ { - const uint32_t bits_per_uint = sizeof(uint32_t) << 3; - index_of_id = 0; service->ctx = ctx; do { uint32_t number_of_bits = service->factory.number_of_pins[index_of_id]; + uint32_t i = 0; - uint32_t number_of_uints = - (number_of_bits + bits_per_uint - 1) / - bits_per_uint; - - uint32_t *slot; - - if (number_of_bits) { - uint32_t index_of_uint = 0; + if (number_of_bits) { + service->busyness[index_of_id] = + kcalloc(number_of_bits, sizeof(char), + GFP_KERNEL); - slot = kcalloc(number_of_uints, - sizeof(uint32_t), - GFP_KERNEL); - - if (!slot) { + if (!service->busyness[index_of_id]) { BREAK_TO_DEBUGGER(); goto failure_2; } do { - slot[index_of_uint] = 0; - - ++index_of_uint; - } while (index_of_uint < number_of_uints); - } else - slot = NULL; - - service->busyness[index_of_id] = slot; + service->busyness[index_of_id][i] = 0; + ++i; + } while (i < number_of_bits); + } else { + service->busyness[index_of_id] = NULL; + } ++index_of_id; } while (index_of_id < GPIO_ID_COUNT); @@ -125,13 +113,8 @@ struct gpio_service *dal_gpio_service_create( failure_2: while (index_of_id) { - uint32_t *slot; - --index_of_id; - - slot = service->busyness[index_of_id]; - - kfree(slot); + kfree(service->busyness[index_of_id]); } failure_1: @@ -169,9 +152,7 @@ void dal_gpio_service_destroy( uint32_t index_of_id = 0; do { - uint32_t *slot = (*ptr)->busyness[index_of_id]; - - kfree(slot); + kfree((*ptr)->busyness[index_of_id]); ++index_of_id; } while (index_of_id < GPIO_ID_COUNT); @@ -192,11 +173,7 @@ static bool is_pin_busy( enum gpio_id id, uint32_t en) { - const uint32_t bits_per_uint = sizeof(uint32_t) << 3; - - const uint32_t *slot = service->busyness[id] + (en / bits_per_uint); - - return 0 != (*slot & (1 << (en % bits_per_uint))); + return service->busyness[id][en]; } static void set_pin_busy( @@ -204,10 +181,7 @@ static void set_pin_busy( enum gpio_id id, uint32_t en) { - const uint32_t bits_per_uint = sizeof(uint32_t) << 3; - - service->busyness[id][en / bits_per_uint] |= - (1 << (en % bits_per_uint)); + service->busyness[id][en] = true; } static void set_pin_free( @@ -215,10 +189,7 @@ static void set_pin_free( enum gpio_id id, uint32_t en) { - const uint32_t bits_per_uint = sizeof(uint32_t) << 3; - - service->busyness[id][en / bits_per_uint] &= - ~(1 << (en % bits_per_uint)); + service->busyness[id][en] = false; } enum gpio_result dal_gpio_service_open( diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h index c7f3081f59cc..1d501a43d13b 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h @@ -36,10 +36,9 @@ struct gpio_service { /* * @brief * Business storage. - * For each member of 'enum gpio_id', - * store array of bits (packed into uint32_t slots), - * index individual bit by 'en' value */ - uint32_t *busyness[GPIO_ID_COUNT]; + * one byte For each member of 'enum gpio_id' + */ + char *busyness[GPIO_ID_COUNT]; }; enum gpio_result dal_gpio_service_open( diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index a683f4102e65..c2028c4744a6 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -79,6 +79,7 @@ bool dal_hw_factory_init( dal_hw_factory_dce110_init(factory); return true; case DCE_VERSION_12_0: + case DCE_VERSION_12_1: dal_hw_factory_dce120_init(factory); return true; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 096f45628630..236ca28784a9 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -76,6 +76,7 @@ bool dal_hw_translate_init( dal_hw_translate_dce110_init(translate); return true; case DCE_VERSION_12_0: + case DCE_VERSION_12_1: dal_hw_translate_dce120_init(translate); return true; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c index e56093f26eed..1ad6e49102ff 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c @@ -90,6 +90,7 @@ struct i2caux *dal_i2caux_create( case DCE_VERSION_10_0: return dal_i2caux_dce100_create(ctx); case DCE_VERSION_12_0: + case DCE_VERSION_12_1: return dal_i2caux_dce120_create(ctx); #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case DCN_VERSION_1_0: diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h index bcb18f5e1e60..7a147a9762a0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/compressor.h +++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h @@ -77,6 +77,7 @@ struct compressor_funcs { }; struct compressor { struct dc_context *ctx; + /* CONTROLLER_ID_D0 + instance, CONTROLLER_ID_UNDEFINED = 0 */ uint32_t attached_inst; bool is_enabled; const struct compressor_funcs *funcs; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index e3ee96afa60e..b168a5e9dd9d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -272,6 +272,17 @@ union bw_context { struct dce_bw_output dce; }; +/** + * struct dc_state - The full description of a state requested by a user + * + * @streams: Stream properties + * @stream_status: The planes on a given stream + * @res_ctx: Persistent state of resources + * @bw: The output from bandwidth and watermark calculations + * @pp_display_cfg: PowerPlay clocks and settings + * @dcn_bw_vars: non-stack memory to support bandwidth calculations + * + */ struct dc_state { struct dc_stream_state *streams[MAX_PIPES]; struct dc_stream_status stream_status[MAX_PIPES]; @@ -279,7 +290,6 @@ struct dc_state { struct resource_context res_ctx; - /* The output from BW and WM calculations. */ union bw_context bw; /* Note: these are big structures, do *not* put on stack! */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index 4550747fb61c..cb85eaa9857f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -32,6 +32,13 @@ enum dmcu_state { DMCU_RUNNING = 1 }; +struct dmcu_version { + unsigned int date; + unsigned int month; + unsigned int year; + unsigned int interface_version; +}; + struct dmcu { struct dc_context *ctx; const struct dmcu_funcs *funcs; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 334c48cdafdc..04c6989aac58 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -63,6 +63,11 @@ struct hubp_funcs { struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); + void (*hubp_setup_interdependent)( + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs); + void (*dcc_control)(struct hubp *hubp, bool enable, bool independent_64b_blks); void (*mem_program_viewport)( @@ -121,6 +126,7 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); void (*hubp_read_state)(struct hubp *hubp); + void (*hubp_clear_underflow)(struct hubp *hubp); void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index e9b702ce02dd..d6a85f48b6d1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -200,6 +200,7 @@ struct hw_sequencer_funcs { void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx); void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); + void (*clear_status_bits)(struct dc *dc, unsigned int mask); void (*wait_for_mpcc_disconnect)(struct dc *dc, struct resource_pool *res_pool, diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index f8dbfa5b89f2..7fd78a696800 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -41,6 +41,7 @@ enum as_signal_type { AS_SIGNAL_TYPE_LVDS, AS_SIGNAL_TYPE_DISPLAY_PORT, AS_SIGNAL_TYPE_GPU_PLL, + AS_SIGNAL_TYPE_XGMI, AS_SIGNAL_TYPE_UNKNOWN }; diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index 89627133e188..f5bd869d4320 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -42,6 +42,7 @@ enum dce_version { DCE_VERSION_11_2, DCE_VERSION_11_22, DCE_VERSION_12_0, + DCE_VERSION_12_1, DCE_VERSION_MAX, DCN_VERSION_1_0, #if defined(CONFIG_DRM_AMD_DC_DCN1_01) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 7480f072c375..479b77c2e89e 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -813,20 +813,26 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, const struct hw_x_point *coord_x = coordinate_x; struct fixed31_32 scaledX = dc_fixpt_zero; struct fixed31_32 scaledX1 = dc_fixpt_zero; - struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display); - struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); - struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content); - struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); + struct fixed31_32 max_display; + struct fixed31_32 min_display; + struct fixed31_32 max_content; + struct fixed31_32 min_content; struct fixed31_32 clip = dc_fixpt_one; struct fixed31_32 output; bool use_eetf = false; bool is_clipped = false; - struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); + struct fixed31_32 sdr_white_level; if (fs_params == NULL || fs_params->max_content == 0 || fs_params->max_display == 0) return false; + max_display = dc_fixpt_from_int(fs_params->max_display); + min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); + max_content = dc_fixpt_from_int(fs_params->max_content); + min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); + sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); + if (fs_params->min_display > 1000) // cap at 0.1 at the bottom min_display = dc_fixpt_from_fraction(1, 10); if (fs_params->max_display < 100) // cap at 100 at the top @@ -1755,7 +1761,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, struct pwl_float_data *rgb_user = NULL; struct pwl_float_data_ex *curve = NULL; - struct gamma_pixel *axix_x = NULL; + struct gamma_pixel *axis_x = NULL; struct pixel_gamma_point *coeff = NULL; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; bool ret = false; @@ -1781,10 +1787,10 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, GFP_KERNEL); if (!curve) goto curve_alloc_fail; - axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x), + axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x), GFP_KERNEL); - if (!axix_x) - goto axix_x_alloc_fail; + if (!axis_x) + goto axis_x_alloc_fail; coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), GFP_KERNEL); if (!coeff) @@ -1797,7 +1803,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, tf = input_tf->tf; build_evenly_distributed_points( - axix_x, + axis_x, ramp->num_entries, dividers); @@ -1822,7 +1828,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, tf_pts->x_point_at_y1_blue = 1; map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axix_x, curve, + coordinates_x, axis_x, curve, MAX_HW_POINTS, tf_pts, mapUserRamp && ramp->type != GAMMA_CUSTOM); if (ramp->type == GAMMA_CUSTOM) @@ -1832,8 +1838,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, kvfree(coeff); coeff_alloc_fail: - kvfree(axix_x); -axix_x_alloc_fail: + kvfree(axis_x); +axis_x_alloc_fail: kvfree(curve); curve_alloc_fail: kvfree(rgb_user); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 620a171620ee..1544ed3f1747 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -608,12 +608,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr, static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf, struct dc_info_packet *infopacket) { - if (app_tf != transfer_func_unknown) { + if (app_tf != TRANSFER_FUNC_UNKNOWN) { infopacket->valid = true; infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active] - if (app_tf == transfer_func_gamma_22) { + if (app_tf == TRANSFER_FUNC_GAMMA_22) { infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active] } } @@ -688,11 +688,11 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, return; switch (packet_type) { - case packet_type_fs2: + case PACKET_TYPE_FS2: build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket); break; - case packet_type_vrr: - case packet_type_fs1: + case PACKET_TYPE_VRR: + case PACKET_TYPE_FS1: default: build_vrr_infopacket_v1(stream->signal, vrr, infopacket); } diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index 786b34380f85..5b1c9a4c7643 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -26,15 +26,13 @@ #ifndef MOD_INFO_PACKET_H_ #define MOD_INFO_PACKET_H_ -struct info_packet_inputs { - const struct dc_stream_state *pStream; -}; +#include "mod_shared.h" -struct info_packets { - struct dc_info_packet *pVscInfoPacket; -}; +//Forward Declarations +struct dc_stream_state; +struct dc_info_packet; -void mod_build_infopackets(struct info_packet_inputs *inputs, - struct info_packets *info_packets); +void mod_build_vsc_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet); #endif diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h index 238c431ae483..1bd02c0ac30c 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h @@ -23,27 +23,26 @@ * */ - #ifndef MOD_SHARED_H_ #define MOD_SHARED_H_ enum color_transfer_func { - transfer_func_unknown, - transfer_func_srgb, - transfer_func_bt709, - transfer_func_pq2084, - transfer_func_pq2084_interim, - transfer_func_linear_0_1, - transfer_func_linear_0_125, - transfer_func_dolbyvision, - transfer_func_gamma_22, - transfer_func_gamma_26 + TRANSFER_FUNC_UNKNOWN, + TRANSFER_FUNC_SRGB, + TRANSFER_FUNC_BT709, + TRANSFER_FUNC_PQ2084, + TRANSFER_FUNC_PQ2084_INTERIM, + TRANSFER_FUNC_LINEAR_0_1, + TRANSFER_FUNC_LINEAR_0_125, + TRANSFER_FUNC_GAMMA_22, + TRANSFER_FUNC_GAMMA_26 }; enum vrr_packet_type { - packet_type_vrr, - packet_type_fs1, - packet_type_fs2 + PACKET_TYPE_VRR, + PACKET_TYPE_FS1, + PACKET_TYPE_FS2 }; + #endif /* MOD_SHARED_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index ff8bfb9b43b0..db06fab2ad5c 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -25,6 +25,10 @@ #include "mod_info_packet.h" #include "core_types.h" +#include "dc_types.h" +#include "mod_shared.h" + +#define HDMI_INFOFRAME_TYPE_VENDOR 0x81 enum ColorimetryRGBDP { ColorimetryRGB_DP_sRGB = 0, @@ -41,7 +45,7 @@ enum ColorimetryYCCDP { ColorimetryYCC_DP_ITU2020YCbCr = 7, }; -static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, +void mod_build_vsc_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet) { unsigned int vscPacketRevision = 0; @@ -159,7 +163,7 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, * DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4 * (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate * the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and - * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).) + * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").) */ if (vscPacketRevision == 0x5) { /* Secondary-data Packet ID = 0 */ @@ -320,10 +324,3 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } -void mod_build_infopackets(struct info_packet_inputs *inputs, - struct info_packets *info_packets) -{ - if (info_packets->pVscInfoPacket != NULL) - mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket); -} - diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile b/drivers/gpu/drm/amd/display/modules/power/Makefile new file mode 100644 index 000000000000..87851f892a52 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/power/Makefile @@ -0,0 +1,31 @@ +# +# Copyright 2017 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'power' sub-module of DAL. +# + +MOD_POWER = power_helpers.o + +AMD_DAL_MOD_POWER = $(addprefix $(AMDDALPATH)/modules/power/,$(MOD_POWER)) +#$(info ************ DAL POWER MODULE MAKEFILE ************) + +AMD_DISPLAY_FILES += $(AMD_DAL_MOD_POWER)
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c new file mode 100644 index 000000000000..00f63b7dd32f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -0,0 +1,326 @@ +/* Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "power_helpers.h" +#include "dc/inc/hw/dmcu.h" + +#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b)) + +/* Possible Min Reduction config from least aggressive to most aggressive + * 0 1 2 3 4 5 6 7 8 9 10 11 12 + * 100 98.0 94.1 94.1 85.1 80.3 75.3 69.4 60.0 57.6 50.2 49.8 40.0 % + */ +static const unsigned char min_reduction_table[13] = { +0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66}; + +/* Possible Max Reduction configs from least aggressive to most aggressive + * 0 1 2 3 4 5 6 7 8 9 10 11 12 + * 96.1 89.8 85.1 80.3 69.4 64.7 64.7 50.2 39.6 30.2 30.2 30.2 19.6 % + */ +static const unsigned char max_reduction_table[13] = { +0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32}; + +/* Predefined ABM configuration sets. We may have different configuration sets + * in order to satisfy different power/quality requirements. + */ +static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = { +/* ABM Level 1, ABM Level 2, ABM Level 3, ABM Level 4 */ +{ 2, 5, 7, 8 }, /* Default - Medium aggressiveness */ +{ 2, 5, 8, 11 }, /* Alt #1 - Increased aggressiveness */ +{ 0, 2, 4, 8 }, /* Alt #2 - Minimal aggressiveness */ +{ 3, 6, 10, 12 }, /* Alt #3 - Super aggressiveness */ +}; + +#define NUM_AMBI_LEVEL 5 +#define NUM_AGGR_LEVEL 4 +#define NUM_POWER_FN_SEGS 8 +#define NUM_BL_CURVE_SEGS 16 + +/* NOTE: iRAM is 256B in size */ +struct iram_table_v_2 { + /* flags */ + uint16_t flags; /* 0x00 U16 */ + + /* parameters for ABM2.0 algorithm */ + uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ + uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ + uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ + uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ + uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x52 U2.6 */ + uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x66 U2.6 */ + uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x7a U0.8 */ + uint8_t deviation_gain; /* 0x7f U0.8 */ + + /* parameters for crgb conversion */ + uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ + + /* parameters for custom curve */ + /* thresholds for brightness --> backlight */ + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ + /* offsets for brightness --> backlight */ + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ + + /* For reading PSR State directly from IRAM */ + uint8_t psr_state; /* 0xf0 */ + uint8_t dmcu_interface_version; /* 0xf1 */ + uint8_t dmcu_date_version_year_b0; /* 0xf2 */ + uint8_t dmcu_date_version_year_b1; /* 0xf3 */ + uint8_t dmcu_date_version_month; /* 0xf4 */ + uint8_t dmcu_date_version_day; /* 0xf5 */ + uint8_t dmcu_state; /* 0xf6 */ + + uint16_t blRampReduction; /* 0xf7 */ + uint16_t blRampStart; /* 0xf9 */ + uint8_t dummy5; /* 0xfb */ + uint8_t dummy6; /* 0xfc */ + uint8_t dummy7; /* 0xfd */ + uint8_t dummy8; /* 0xfe */ + uint8_t dummy9; /* 0xff */ +}; + +static uint16_t backlight_8_to_16(unsigned int backlight_8bit) +{ + return (uint16_t)(backlight_8bit * 0x101); +} + +static void fill_backlight_transform_table(struct dmcu_iram_parameters params, + struct iram_table_v_2 *table) +{ + unsigned int i; + unsigned int num_entries = NUM_BL_CURVE_SEGS; + unsigned int query_input_8bit; + unsigned int query_output_8bit; + unsigned int lut_index; + + table->backlight_thresholds[0] = 0; + table->backlight_offsets[0] = params.backlight_lut_array[0]; + table->backlight_thresholds[num_entries-1] = 0xFFFF; + table->backlight_offsets[num_entries-1] = + params.backlight_lut_array[params.backlight_lut_array_size - 1]; + + /* Setup all brightness levels between 0% and 100% exclusive + * Fills brightness-to-backlight transform table. Backlight custom curve + * describes transform from brightness to backlight. It will be defined + * as set of thresholds and set of offsets, together, implying + * extrapolation of custom curve into 16 uniformly spanned linear + * segments. Each threshold/offset represented by 16 bit entry in + * format U4.10. + */ + for (i = 1; i+1 < num_entries; i++) { + query_input_8bit = DIV_ROUNDUP((i * 256), num_entries); + + lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1); + ASSERT(lut_index < params.backlight_lut_array_size); + query_output_8bit = params.backlight_lut_array[lut_index] >> 8; + + table->backlight_thresholds[i] = + backlight_8_to_16(query_input_8bit); + table->backlight_offsets[i] = + backlight_8_to_16(query_output_8bit); + } +} + +bool dmcu_load_iram(struct dmcu *dmcu, + struct dmcu_iram_parameters params) +{ + struct iram_table_v_2 ram_table; + unsigned int set = params.set; + + if (dmcu == NULL) + return false; + + if (!dmcu->funcs->is_dmcu_initialized(dmcu)) + return true; + + memset(&ram_table, 0, sizeof(ram_table)); + + ram_table.flags = 0x0; + ram_table.deviation_gain = 0xb3; + + ram_table.blRampReduction = + cpu_to_be16(params.backlight_ramping_reduction); + ram_table.blRampStart = + cpu_to_be16(params.backlight_ramping_start); + + ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]]; + ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]]; + ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]]; + ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]]; + ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]]; + ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]]; + ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]]; + ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]]; + ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]]; + ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]]; + + ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]]; + ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]]; + ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]]; + ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]]; + ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]]; + ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]]; + ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]]; + ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]]; + ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]]; + ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]]; + + ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]]; + ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]]; + ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]]; + ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]]; + ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]]; + ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]]; + ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]]; + ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]]; + ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]]; + ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]]; + + ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]]; + ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]]; + ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]]; + ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]]; + ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]]; + ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]]; + ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]]; + ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]]; + ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]]; + ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]]; + + ram_table.bright_pos_gain[0][0] = 0x20; + ram_table.bright_pos_gain[0][1] = 0x20; + ram_table.bright_pos_gain[0][2] = 0x20; + ram_table.bright_pos_gain[0][3] = 0x20; + ram_table.bright_pos_gain[1][0] = 0x20; + ram_table.bright_pos_gain[1][1] = 0x20; + ram_table.bright_pos_gain[1][2] = 0x20; + ram_table.bright_pos_gain[1][3] = 0x20; + ram_table.bright_pos_gain[2][0] = 0x20; + ram_table.bright_pos_gain[2][1] = 0x20; + ram_table.bright_pos_gain[2][2] = 0x20; + ram_table.bright_pos_gain[2][3] = 0x20; + ram_table.bright_pos_gain[3][0] = 0x20; + ram_table.bright_pos_gain[3][1] = 0x20; + ram_table.bright_pos_gain[3][2] = 0x20; + ram_table.bright_pos_gain[3][3] = 0x20; + ram_table.bright_pos_gain[4][0] = 0x20; + ram_table.bright_pos_gain[4][1] = 0x20; + ram_table.bright_pos_gain[4][2] = 0x20; + ram_table.bright_pos_gain[4][3] = 0x20; + ram_table.bright_neg_gain[0][1] = 0x00; + ram_table.bright_neg_gain[0][2] = 0x00; + ram_table.bright_neg_gain[0][3] = 0x00; + ram_table.bright_neg_gain[1][0] = 0x00; + ram_table.bright_neg_gain[1][1] = 0x00; + ram_table.bright_neg_gain[1][2] = 0x00; + ram_table.bright_neg_gain[1][3] = 0x00; + ram_table.bright_neg_gain[2][0] = 0x00; + ram_table.bright_neg_gain[2][1] = 0x00; + ram_table.bright_neg_gain[2][2] = 0x00; + ram_table.bright_neg_gain[2][3] = 0x00; + ram_table.bright_neg_gain[3][0] = 0x00; + ram_table.bright_neg_gain[3][1] = 0x00; + ram_table.bright_neg_gain[3][2] = 0x00; + ram_table.bright_neg_gain[3][3] = 0x00; + ram_table.bright_neg_gain[4][0] = 0x00; + ram_table.bright_neg_gain[4][1] = 0x00; + ram_table.bright_neg_gain[4][2] = 0x00; + ram_table.bright_neg_gain[4][3] = 0x00; + ram_table.dark_pos_gain[0][0] = 0x00; + ram_table.dark_pos_gain[0][1] = 0x00; + ram_table.dark_pos_gain[0][2] = 0x00; + ram_table.dark_pos_gain[0][3] = 0x00; + ram_table.dark_pos_gain[1][0] = 0x00; + ram_table.dark_pos_gain[1][1] = 0x00; + ram_table.dark_pos_gain[1][2] = 0x00; + ram_table.dark_pos_gain[1][3] = 0x00; + ram_table.dark_pos_gain[2][0] = 0x00; + ram_table.dark_pos_gain[2][1] = 0x00; + ram_table.dark_pos_gain[2][2] = 0x00; + ram_table.dark_pos_gain[2][3] = 0x00; + ram_table.dark_pos_gain[3][0] = 0x00; + ram_table.dark_pos_gain[3][1] = 0x00; + ram_table.dark_pos_gain[3][2] = 0x00; + ram_table.dark_pos_gain[3][3] = 0x00; + ram_table.dark_pos_gain[4][0] = 0x00; + ram_table.dark_pos_gain[4][1] = 0x00; + ram_table.dark_pos_gain[4][2] = 0x00; + ram_table.dark_pos_gain[4][3] = 0x00; + ram_table.dark_neg_gain[0][0] = 0x00; + ram_table.dark_neg_gain[0][1] = 0x00; + ram_table.dark_neg_gain[0][2] = 0x00; + ram_table.dark_neg_gain[0][3] = 0x00; + ram_table.dark_neg_gain[1][0] = 0x00; + ram_table.dark_neg_gain[1][1] = 0x00; + ram_table.dark_neg_gain[1][2] = 0x00; + ram_table.dark_neg_gain[1][3] = 0x00; + ram_table.dark_neg_gain[2][0] = 0x00; + ram_table.dark_neg_gain[2][1] = 0x00; + ram_table.dark_neg_gain[2][2] = 0x00; + ram_table.dark_neg_gain[2][3] = 0x00; + ram_table.dark_neg_gain[3][0] = 0x00; + ram_table.dark_neg_gain[3][1] = 0x00; + ram_table.dark_neg_gain[3][2] = 0x00; + ram_table.dark_neg_gain[3][3] = 0x00; + ram_table.dark_neg_gain[4][0] = 0x00; + ram_table.dark_neg_gain[4][1] = 0x00; + ram_table.dark_neg_gain[4][2] = 0x00; + ram_table.dark_neg_gain[4][3] = 0x00; + ram_table.iir_curve[0] = 0x65; + ram_table.iir_curve[1] = 0x65; + ram_table.iir_curve[2] = 0x65; + ram_table.iir_curve[3] = 0x65; + ram_table.iir_curve[4] = 0x65; + ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6); + ram_table.crgb_thresh[1] = cpu_to_be16(0x1648); + ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3); + ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41); + ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46); + ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21); + ram_table.crgb_thresh[6] = cpu_to_be16(0x2167); + ram_table.crgb_thresh[7] = cpu_to_be16(0x2384); + ram_table.crgb_offset[0] = cpu_to_be16(0x2999); + ram_table.crgb_offset[1] = cpu_to_be16(0x3999); + ram_table.crgb_offset[2] = cpu_to_be16(0x4666); + ram_table.crgb_offset[3] = cpu_to_be16(0x5999); + ram_table.crgb_offset[4] = cpu_to_be16(0x6333); + ram_table.crgb_offset[5] = cpu_to_be16(0x7800); + ram_table.crgb_offset[6] = cpu_to_be16(0x8c00); + ram_table.crgb_offset[7] = cpu_to_be16(0xa000); + ram_table.crgb_slope[0] = cpu_to_be16(0x3147); + ram_table.crgb_slope[1] = cpu_to_be16(0x2978); + ram_table.crgb_slope[2] = cpu_to_be16(0x23a2); + ram_table.crgb_slope[3] = cpu_to_be16(0x1f55); + ram_table.crgb_slope[4] = cpu_to_be16(0x1c63); + ram_table.crgb_slope[5] = cpu_to_be16(0x1a0f); + ram_table.crgb_slope[6] = cpu_to_be16(0x178d); + ram_table.crgb_slope[7] = cpu_to_be16(0x15ab); + + fill_backlight_transform_table( + params, &ram_table); + + return dmcu->funcs->load_iram( + dmcu, 0, (char *)(&ram_table), sizeof(ram_table)); +} diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h new file mode 100644 index 000000000000..da5df00fedce --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -0,0 +1,47 @@ +/* Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MODULES_POWER_POWER_HELPERS_H_ +#define MODULES_POWER_POWER_HELPERS_H_ + +#include "dc/inc/hw/dmcu.h" + + +enum abm_defines { + abm_defines_max_level = 4, + abm_defines_max_config = 4, +}; + +struct dmcu_iram_parameters { + unsigned int *backlight_lut_array; + unsigned int backlight_lut_array_size; + unsigned int backlight_ramping_reduction; + unsigned int backlight_ramping_start; + unsigned int set; +}; + +bool dmcu_load_iram(struct dmcu *dmcu, + struct dmcu_iram_parameters params); + +#endif /* MODULES_POWER_POWER_HELPERS_H_ */ diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h index 9b9699fc433f..c72cbfe8f684 100644 --- a/drivers/gpu/drm/amd/include/amd_acpi.h +++ b/drivers/gpu/drm/amd/include/amd_acpi.h @@ -52,6 +52,30 @@ struct atif_sbios_requests { u8 backlight_level; /* panel backlight level (0-255) */ } __packed; +struct atif_qbtc_arguments { + u16 size; /* structure size in bytes (includes size field) */ + u8 requested_display; /* which display is requested */ +} __packed; + +#define ATIF_QBTC_MAX_DATA_POINTS 99 + +struct atif_qbtc_data_point { + u8 luminance; /* luminance in percent */ + u8 ipnut_signal; /* input signal in range 0-255 */ +} __packed; + +struct atif_qbtc_output { + u16 size; /* structure size in bytes (includes size field) */ + u16 flags; /* all zeroes */ + u8 error_code; /* error code */ + u8 ac_level; /* default brightness on AC power */ + u8 dc_level; /* default brightness on DC power */ + u8 min_input_signal; /* max input signal in range 0-255 */ + u8 max_input_signal; /* min input signal in range 0-255 */ + u8 number_of_points; /* number of data points */ + struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS]; +} __packed; + #define ATIF_NOTIFY_MASK 0x3 #define ATIF_NOTIFY_NONE 0 #define ATIF_NOTIFY_81 1 @@ -126,26 +150,18 @@ struct atcs_pref_req_output { * DWORD - supported functions bit vector */ /* Notifications mask */ -# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0) -# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1) # define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2) # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3) # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4) -# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5) -# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6) # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7) # define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8) +# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED (1 << 12) /* supported functions vector */ # define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0) # define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1) -# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2) -# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3) -# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4) -# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5) -# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6) -# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7) # define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12) -# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14) +# define ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED (1 << 15) +# define ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED (1 << 16) # define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED (1 << 20) #define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1 /* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS @@ -170,6 +186,10 @@ struct atcs_pref_req_output { * n (0xd0-0xd9) is specified in notify command code. * bit 2: * 1 - lid changes not reported though int10 + * bit 3: + * 1 - system bios controls overclocking + * bit 4: + * 1 - enable overclocking */ #define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2 /* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS @@ -177,28 +197,23 @@ struct atcs_pref_req_output { * OUTPUT: * WORD - structure size in bytes (includes size field) * DWORD - pending sbios requests - * BYTE - panel expansion mode + * BYTE - reserved (all zeroes) * BYTE - thermal state: target gfx controller * BYTE - thermal state: state id (0: exit state, non-0: state) * BYTE - forced power state: target gfx controller - * BYTE - forced power state: state id + * BYTE - forced power state: state id (0: forced state, non-0: state) * BYTE - system power source * BYTE - panel backlight level (0-255) + * BYTE - GPU package power limit: target gfx controller + * DWORD - GPU package power limit: value (24:8 fractional format, Watts) */ /* pending sbios requests */ -# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0) -# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1) # define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2) # define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3) # define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4) -# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5) -# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6) # define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7) # define ATIF_DGPU_DISPLAY_EVENT (1 << 8) -/* panel expansion mode */ -# define ATIF_PANEL_EXPANSION_DISABLE 0 -# define ATIF_PANEL_EXPANSION_FULL 1 -# define ATIF_PANEL_EXPANSION_ASPECT 2 +# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST (1 << 12) /* target gfx controller */ # define ATIF_TARGET_GFX_SINGLE 0 # define ATIF_TARGET_GFX_PX_IGPU 1 @@ -208,76 +223,6 @@ struct atcs_pref_req_output { # define ATIF_POWER_SOURCE_DC 2 # define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3 # define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4 -#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3 -/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - selected displays - * WORD - connected displays - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * WORD - selected displays - */ -# define ATIF_LCD1 (1 << 0) -# define ATIF_CRT1 (1 << 1) -# define ATIF_TV (1 << 2) -# define ATIF_DFP1 (1 << 3) -# define ATIF_CRT2 (1 << 4) -# define ATIF_LCD2 (1 << 5) -# define ATIF_DFP2 (1 << 7) -# define ATIF_CV (1 << 8) -# define ATIF_DFP3 (1 << 9) -# define ATIF_DFP4 (1 << 10) -# define ATIF_DFP5 (1 << 11) -# define ATIF_DFP6 (1 << 12) -#define ATIF_FUNCTION_GET_LID_STATE 0x4 -/* ARG0: ATIF_FUNCTION_GET_LID_STATE - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - lid state (0: open, 1: closed) - * - * GET_LID_STATE only works at boot and resume, for general lid - * status, use the kernel provided status - */ -#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5 -/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - 0 - * BYTE - TV standard - */ -# define ATIF_TV_STD_NTSC 0 -# define ATIF_TV_STD_PAL 1 -# define ATIF_TV_STD_PALM 2 -# define ATIF_TV_STD_PAL60 3 -# define ATIF_TV_STD_NTSCJ 4 -# define ATIF_TV_STD_PALCN 5 -# define ATIF_TV_STD_PALN 6 -# define ATIF_TV_STD_SCART_RGB 9 -#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6 -/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS - * ARG1: - * WORD - structure size in bytes (includes size field) - * BYTE - 0 - * BYTE - TV standard - * OUTPUT: none - */ -#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7 -/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - panel expansion mode - */ -#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8 -/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS - * ARG1: - * WORD - structure size in bytes (includes size field) - * BYTE - panel expansion mode - * OUTPUT: none - */ #define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD /* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION * ARG1: @@ -286,21 +231,43 @@ struct atcs_pref_req_output { * BYTE - current temperature (degress Celsius) * OUTPUT: none */ -#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF -/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES - * ARG1: none +#define ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS 0x10 +/* ARG0: ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS + * ARG1: + * WORD - structure size in bytes (includes size field) + * BYTE - requested display * OUTPUT: - * WORD - number of gfx devices - * WORD - device structure size in bytes (excludes device size field) - * DWORD - flags \ - * WORD - bus number } repeated structure - * WORD - device number / + * WORD - structure size in bytes (includes size field) + * WORD - flags (currently all 16 bits are reserved) + * BYTE - error code (on failure, disregard all below fields) + * BYTE - AC level (default brightness in percent when machine has full power) + * BYTE - DC level (default brightness in percent when machine is on battery) + * BYTE - min input signal, in range 0-255, corresponding to 0% backlight + * BYTE - max input signal, in range 0-255, corresponding to 100% backlight + * BYTE - number of reported data points + * BYTE - luminance level in percent \ repeated structure + * BYTE - input signal in range 0-255 / does not have entries for 0% and 100% + */ +/* requested display */ +# define ATIF_QBTC_REQUEST_LCD1 0 +# define ATIF_QBTC_REQUEST_CRT1 1 +# define ATIF_QBTC_REQUEST_DFP1 3 +# define ATIF_QBTC_REQUEST_CRT2 4 +# define ATIF_QBTC_REQUEST_LCD2 5 +# define ATIF_QBTC_REQUEST_DFP2 7 +# define ATIF_QBTC_REQUEST_DFP3 9 +# define ATIF_QBTC_REQUEST_DFP4 10 +# define ATIF_QBTC_REQUEST_DFP5 11 +# define ATIF_QBTC_REQUEST_DFP6 12 +/* error code */ +# define ATIF_QBTC_ERROR_CODE_SUCCESS 0 +# define ATIF_QBTC_ERROR_CODE_FAILURE 1 +# define ATIF_QBTC_ERROR_CODE_DEVICE_NOT_SUPPORTED 2 +#define ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION 0x11 +/* ARG0: ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION + * ARG1: none + * OUTPUT: none */ -/* flags */ -# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0) -# define ATIF_XGP_PORT (1 << 1) -# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2) -# define ATIF_XGP_PORT_IN_DOCK (1 << 3) #define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION 0x15 /* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION * ARG1: none diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h new file mode 100644 index 000000000000..8f515875a34d --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _mmhub_9_4_0_OFFSET_HEADER +#define _mmhub_9_4_0_OFFSET_HEADER + + +// addressBlock: mmhub_utcl2_vmsharedpfdec +// base address: 0x6a040 +#define mmMC_VM_XGMI_LFB_CNTL 0x0823 +#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0 +#define mmMC_VM_XGMI_LFB_SIZE 0x0824 +#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h new file mode 100644 index 000000000000..0a6b072d191e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _mmhub_9_4_0_SH_MASK_HEADER +#define _mmhub_9_4_0_SH_MASK_HEADER + + +// addressBlock: mmhub_utcl2_vmsharedpfdec +//MC_VM_XGMI_LFB_CNTL +#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 +#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L +//MC_VM_XGMI_LFB_SIZE +#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 +#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 58ac0b90c310..8154d67388cc 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -188,8 +188,8 @@ struct tile_config { */ #define ALLOC_MEM_FLAGS_VRAM (1 << 0) #define ALLOC_MEM_FLAGS_GTT (1 << 1) -#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */ -#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */ +#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) +#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* * Allocation flags attributes/access options. diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 980e696989b1..1479ea1dc3e7 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -276,6 +276,10 @@ struct amd_pm_funcs { struct amd_pp_simple_clock_info *clocks); int (*notify_smu_enable_pwe)(void *handle); int (*enable_mgpu_fan_boost)(void *handle); + int (*set_active_display_count)(void *handle, uint32_t count); + int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock); + int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock); + int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock); }; #endif diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index d6aa1d414320..9bc27f468d5b 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -300,7 +300,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) return -EINVAL; if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } @@ -387,7 +387,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low) return 0; if (hwmgr->hwmgr_func->get_sclk == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -405,7 +405,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low) return 0; if (hwmgr->hwmgr_func->get_mclk == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -422,7 +422,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate) return; if (hwmgr->hwmgr_func->powergate_vce == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return; } mutex_lock(&hwmgr->smu_lock); @@ -438,7 +438,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate) return; if (hwmgr->hwmgr_func->powergate_uvd == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return; } mutex_lock(&hwmgr->smu_lock); @@ -505,7 +505,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) return; if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return; } mutex_lock(&hwmgr->smu_lock); @@ -522,7 +522,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle) return 0; if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -540,7 +540,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) return -EINVAL; if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -558,7 +558,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) return -EINVAL; if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } @@ -594,7 +594,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) return -EINVAL; if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -720,12 +720,12 @@ static int pp_dpm_force_clock_level(void *handle, return -EINVAL; if (hwmgr->hwmgr_func->force_clock_level == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { - pr_info("force clock level is for dpm manual mode only.\n"); + pr_debug("force clock level is for dpm manual mode only.\n"); return -EINVAL; } @@ -745,7 +745,7 @@ static int pp_dpm_print_clock_levels(void *handle, return -EINVAL; if (hwmgr->hwmgr_func->print_clock_levels == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -763,7 +763,7 @@ static int pp_dpm_get_sclk_od(void *handle) return -EINVAL; if (hwmgr->hwmgr_func->get_sclk_od == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -781,7 +781,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) return -EINVAL; if (hwmgr->hwmgr_func->set_sclk_od == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } @@ -800,7 +800,7 @@ static int pp_dpm_get_mclk_od(void *handle) return -EINVAL; if (hwmgr->hwmgr_func->get_mclk_od == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -818,7 +818,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) return -EINVAL; if (hwmgr->hwmgr_func->set_mclk_od == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } mutex_lock(&hwmgr->smu_lock); @@ -878,7 +878,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf) return -EINVAL; if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return snprintf(buf, PAGE_SIZE, "\n"); } @@ -894,12 +894,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) return ret; if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return ret; } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { - pr_info("power profile setting is for manual dpm mode only.\n"); + pr_debug("power profile setting is for manual dpm mode only.\n"); return ret; } @@ -917,7 +917,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3 return -EINVAL; if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return -EINVAL; } @@ -935,7 +935,7 @@ static int pp_dpm_switch_power_profile(void *handle, return -EINVAL; if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return -EINVAL; } @@ -972,7 +972,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit) return -EINVAL; if (hwmgr->hwmgr_func->set_power_limit == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return -EINVAL; } @@ -1072,7 +1072,7 @@ static int pp_get_current_clocks(void *handle, &hw_clocks, PHM_PerformanceLevelDesignation_Activity); if (ret) { - pr_info("Error in phm_get_clock_info \n"); + pr_debug("Error in phm_get_clock_info \n"); mutex_unlock(&hwmgr->smu_lock); return -EINVAL; } @@ -1212,7 +1212,7 @@ static int pp_dpm_powergate_mmhub(void *handle) return -EINVAL; if (hwmgr->hwmgr_func->powergate_mmhub == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } @@ -1227,7 +1227,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate) return 0; if (hwmgr->hwmgr_func->powergate_gfx == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } @@ -1242,7 +1242,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate) return; if (hwmgr->hwmgr_func->powergate_acp == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return; } @@ -1257,7 +1257,7 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate) return; if (hwmgr->hwmgr_func->powergate_sdma == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return; } @@ -1303,7 +1303,7 @@ static int pp_notify_smu_enable_pwe(void *handle) return -EINVAL; if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_info_ratelimited("%s was not implemented.\n", __func__); return -EINVAL;; } @@ -1332,6 +1332,78 @@ static int pp_enable_mgpu_fan_boost(void *handle) return 0; } +static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) { + pr_debug("%s was not implemented.\n", __func__); + return -EINVAL;; + } + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) { + pr_debug("%s was not implemented.\n", __func__); + return -EINVAL;; + } + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) { + pr_debug("%s was not implemented.\n", __func__); + return -EINVAL;; + } + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_set_active_display_count(void *handle, uint32_t count) +{ + struct pp_hwmgr *hwmgr = handle; + int ret = 0; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + mutex_lock(&hwmgr->smu_lock); + ret = phm_set_active_display_count(hwmgr, count); + mutex_unlock(&hwmgr->smu_lock); + + return ret; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1378,4 +1450,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, .notify_smu_enable_pwe = pp_notify_smu_enable_pwe, .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost, + .set_active_display_count = pp_set_active_display_count, + .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk, + .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq, + .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 85119c2bdcc8..1f92a9f4c9e3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) PHM_FUNC_CHECK(hwmgr); adev = hwmgr->adev; - if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) { + /* Skip for suspend/resume case */ + if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev) + && adev->in_suspend) { pr_info("dpm has been enabled\n"); return 0; } @@ -286,8 +288,8 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, if (display_config == NULL) return -EINVAL; - if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk) - hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk); + if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk) + hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk); for (index = 0; index < display_config->num_path_including_non_display; index++) { if (display_config->displays[index].controller_id != 0) @@ -478,3 +480,44 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr) return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr); } + +int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) +{ + PHM_FUNC_CHECK(hwmgr); + + if (!hwmgr->hwmgr_func->set_active_display_count) + return -EINVAL; + + return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count); +} + +int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) +{ + PHM_FUNC_CHECK(hwmgr); + + if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk) + return -EINVAL; + + return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock); +} + +int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) +{ + PHM_FUNC_CHECK(hwmgr); + + if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq) + return -EINVAL; + + return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock); +} + +int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) +{ + PHM_FUNC_CHECK(hwmgr); + + if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq) + return -EINVAL; + + return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock); +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 47ac92369739..0173d0480024 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + ret = phm_pre_display_configuration_changed(hwmgr); + if (ret) + return ret; ret = phm_set_cpu_power_state(hwmgr); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 91ffb7bc4ee7..56437866d120 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, if (skip) return 0; - phm_pre_display_configuration_changed(hwmgr); - phm_display_configuration_changed(hwmgr); if (hwmgr->ps) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index dd18cb710391..f95c5f50eb0f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -216,12 +216,12 @@ static inline uint32_t convert_10k_to_mhz(uint32_t clock) return (clock + 99) / 100; } -static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) +static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); if (smu10_data->need_min_deep_sleep_dcefclk && - smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) { + smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) { smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, @@ -230,6 +230,34 @@ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) return 0; } +static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) +{ + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + + if (smu10_data->dcf_actual_hard_min_freq && + smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) { + smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinDcefclkByFreq, + smu10_data->dcf_actual_hard_min_freq); + } + return 0; +} + +static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) +{ + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + + if (smu10_data->f_actual_hard_min_freq && + smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) { + smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + smu10_data->f_actual_hard_min_freq); + } + return 0; +} + static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); @@ -1206,7 +1234,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .get_max_high_clocks = smu10_get_max_high_clocks, .read_sensor = smu10_read_sensor, .set_active_display_count = smu10_set_active_display_count, - .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk, + .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk, .dynamic_state_management_enable = smu10_enable_dpm_tasks, .power_off_asic = smu10_power_off_asic, .asic_setup = smu10_setup_asic_task, @@ -1217,6 +1245,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .display_clock_voltage_request = smu10_display_clock_voltage_request, .powergate_gfx = smu10_gfx_off_control, .powergate_sdma = smu10_powergate_sdma, + .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq, + .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 88f6b35ea6fe..d91390459326 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -269,7 +269,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) hwmgr->dyn_state.mvdd_dependency_on_mclk); PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 MVDD table from dependancy table.", + "Failed to retrieve SVI2 MVDD table from dependency table.", return result;); } @@ -288,7 +288,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), hwmgr->dyn_state.vddci_dependency_on_mclk); PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDCI table from dependancy table.", + "Failed to retrieve SVI2 VDDCI table from dependency table.", return result); } @@ -317,7 +317,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) table_info->vddc_lookup_table); PP_ASSERT_WITH_CODE((0 == result), - "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); + "Failed to retrieve SVI2 VDDC table from dependency table.", return result;); } tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); @@ -2859,7 +2859,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: - switch_limit_us = data->is_memory_gddr5 ? 190 : 150; + if (hwmgr->is_kicker) + switch_limit_us = data->is_memory_gddr5 ? 450 : 150; + else + switch_limit_us = data->is_memory_gddr5 ? 190 : 150; break; case CHIP_VEGAM: switch_limit_us = 30; @@ -3589,8 +3592,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons } if (i >= sclk_table->count) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - sclk_table->dpm_levels[i-1].value = sclk; + if (sclk > sclk_table->dpm_levels[i-1].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + sclk_table->dpm_levels[i-1].value = sclk; + } } else { /* TODO: Check SCLK in DAL's minimum clocks * in case DeepSleep divider update is required. @@ -3607,8 +3612,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons } if (i >= mclk_table->count) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - mclk_table->dpm_levels[i-1].value = mclk; + if (mclk > mclk_table->dpm_levels[i-1].value) { + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + mclk_table->dpm_levels[i-1].value = mclk; + } } if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) @@ -4219,9 +4226,17 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) if (tmp & (1 << 23)) { data->mem_latency_high = MEM_LATENCY_HIGH; data->mem_latency_low = MEM_LATENCY_LOW; + if ((hwmgr->chip_id == CHIP_POLARIS10) || + (hwmgr->chip_id == CHIP_POLARIS11) || + (hwmgr->chip_id == CHIP_POLARIS12)) + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC); } else { data->mem_latency_high = 330; data->mem_latency_low = 330; + if ((hwmgr->chip_id == CHIP_POLARIS10) || + (hwmgr->chip_id == CHIP_POLARIS11) || + (hwmgr->chip_id == CHIP_POLARIS12)) + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC); } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index fef111ddb736..553a203ac47c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -1228,17 +1228,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) { - if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { - smu8_nbdpm_pstate_enable_disable(hwmgr, true, true); + if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); - } return 0; } static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) { if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { - smu8_nbdpm_pstate_enable_disable(hwmgr, false, true); return smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_UVDPowerON, @@ -1995,6 +1992,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = { .power_state_set = smu8_set_power_state_tasks, .dynamic_state_management_disable = smu8_disable_dpm_tasks, .notify_cac_buffer_info = smu8_notify_cac_buffer_info, + .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable, .get_thermal_temperature_range = smu8_get_thermal_temperature_range, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index e2bc6e0c229f..79c86247d0ac 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co } if (i >= sclk_table->count) { - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - sclk_table->dpm_levels[i-1].value = sclk; + if (sclk > sclk_table->dpm_levels[i-1].value) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + sclk_table->dpm_levels[i-1].value = sclk; + } } for (i = 0; i < mclk_table->count; i++) { @@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co } if (i >= mclk_table->count) { - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - mclk_table->dpm_levels[i-1].value = mclk; + if (mclk > mclk_table->dpm_levels[i-1].value) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + mclk_table->dpm_levels[i-1].value = mclk; + } } if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 2679d1240fa1..26154f9b2178 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -49,6 +49,10 @@ #include "soc15_common.h" #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" +#include "nbio/nbio_7_4_sh_mask.h" + +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) { @@ -130,7 +134,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.disable_auto_wattman = 1; data->registry_data.auto_wattman_debug = 0; data->registry_data.auto_wattman_sample_period = 100; - data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD; + data->registry_data.fclk_gfxclk_ratio = 0; data->registry_data.auto_wattman_threshold = 50; data->registry_data.gfxoff_controlled_by_driver = 1; data->gfxoff_allowed = false; @@ -1660,14 +1664,15 @@ static uint32_t vega20_find_highest_dpm_level( return i; } -static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) +static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); uint32_t min_freq; int ret = 0; - if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + if (data->smu_features[GNLD_DPM_GFXCLK].enabled && + (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, @@ -1676,7 +1681,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_UCLK].enabled) { + if (data->smu_features[GNLD_DPM_UCLK].enabled && + (feature_mask & FEATURE_DPM_UCLK_MASK)) { min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, @@ -1692,7 +1698,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_UVD].enabled) { + if (data->smu_features[GNLD_DPM_UVD].enabled && + (feature_mask & FEATURE_DPM_UVD_MASK)) { min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -1710,7 +1717,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_VCE].enabled) { + if (data->smu_features[GNLD_DPM_VCE].enabled && + (feature_mask & FEATURE_DPM_VCE_MASK)) { min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -1720,7 +1728,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + if (data->smu_features[GNLD_DPM_SOCCLK].enabled && + (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -1733,14 +1742,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) return ret; } -static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) +static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); uint32_t max_freq; int ret = 0; - if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + if (data->smu_features[GNLD_DPM_GFXCLK].enabled && + (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -1750,7 +1760,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_UCLK].enabled) { + if (data->smu_features[GNLD_DPM_UCLK].enabled && + (feature_mask & FEATURE_DPM_UCLK_MASK)) { max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -1760,7 +1771,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_UVD].enabled) { + if (data->smu_features[GNLD_DPM_UVD].enabled && + (feature_mask & FEATURE_DPM_UVD_MASK)) { max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -1777,7 +1789,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_VCE].enabled) { + if (data->smu_features[GNLD_DPM_VCE].enabled && + (feature_mask & FEATURE_DPM_VCE_MASK)) { max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -1787,7 +1800,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) return ret); } - if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + if (data->smu_features[GNLD_DPM_SOCCLK].enabled && + (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( @@ -2126,12 +2140,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) data->dpm_table.mem_table.dpm_state.soft_max_level = data->dpm_table.mem_table.dpm_levels[soft_level].value; - ret = vega20_upload_dpm_min_level(hwmgr); + ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr); + ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2158,12 +2172,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) data->dpm_table.mem_table.dpm_state.soft_max_level = data->dpm_table.mem_table.dpm_levels[soft_level].value; - ret = vega20_upload_dpm_min_level(hwmgr); + ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr); + ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2176,12 +2190,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { int ret = 0; - ret = vega20_upload_dpm_min_level(hwmgr); + ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Bootup Levels!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr); + ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Max Levels!", return ret); @@ -2239,12 +2253,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, data->dpm_table.gfx_table.dpm_state.soft_max_level = data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; - ret = vega20_upload_dpm_min_level(hwmgr); + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to lowest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2259,12 +2273,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, data->dpm_table.mem_table.dpm_state.soft_max_level = data->dpm_table.mem_table.dpm_levels[soft_max_level].value; - ret = vega20_upload_dpm_min_level(hwmgr); + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to lowest!", return ret); - ret = vega20_upload_dpm_max_level(hwmgr); + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK); PP_ASSERT_WITH_CODE(!ret, "Failed to upload dpm max level to highest!", return ret); @@ -2272,6 +2286,18 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, break; case PP_PCIE: + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + if (soft_min_level >= NUM_LINK_LEVELS || + soft_max_level >= NUM_LINK_LEVELS) + return -EINVAL; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level); + PP_ASSERT_WITH_CODE(!ret, + "Failed to set min link dpm level!", + return ret); + break; default: @@ -2748,9 +2774,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; + struct amdgpu_device *adev = hwmgr->adev; struct pp_clock_levels_with_latency clocks; int i, now, size = 0; int ret = 0; + uint32_t gen_speed, lane_width; switch (type) { case PP_SCLK: @@ -2788,6 +2819,28 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case PP_PCIE: + gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; + lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + for (i = 0; i < NUM_LINK_LEVELS; i++) + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, + (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : + (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : + (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : + (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", + (pptable->PcieLaneCount[i] == 1) ? "x1" : + (pptable->PcieLaneCount[i] == 2) ? "x2" : + (pptable->PcieLaneCount[i] == 3) ? "x4" : + (pptable->PcieLaneCount[i] == 4) ? "x8" : + (pptable->PcieLaneCount[i] == 5) ? "x12" : + (pptable->PcieLaneCount[i] == 6) ? "x16" : "", + pptable->LclkFreq[i], + (gen_speed == pptable->PcieGenSpeed[i]) && + (lane_width == pptable->PcieLaneCount[i]) ? + "*" : ""); break; case OD_SCLK: diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 54fd0125d9cf..f4dab979a3a1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -463,5 +463,8 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr); + +extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count); + #endif /* _HARDWARE_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 07d180ce4d18..0d298a0409f5 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -309,7 +309,7 @@ struct pp_hwmgr_func { int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable); int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr); int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); - int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr, uint32_t virtual_addr_low, @@ -317,6 +317,9 @@ struct pp_hwmgr_func { uint32_t mc_addr_low, uint32_t mc_addr_hi, uint32_t size); + int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr, + bool enable, + bool lock); int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf); @@ -329,6 +332,8 @@ struct pp_hwmgr_func { int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate); int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr); + int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); }; struct pp_table_func { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h index 62f36ba2435b..6e19f4c7cf8f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h @@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) #define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) +#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415) + #define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280) #define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281) #define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282) @@ -395,6 +397,9 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306) +#define PPSMC_MSG_EnableFFC ((uint16_t) 0x307) +#define PPSMC_MSG_DisableFFC ((uint16_t) 0x308) + #define PPSMC_MSG_EnableDpmDidt ((uint16_t) 0x309) #define PPSMC_MSG_DisableDpmDidt ((uint16_t) 0x30A) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 2b2c26616902..52abca065764 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -1528,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) efuse = efuse >> 24; if (hwmgr->chip_id == CHIP_POLARIS10) { - min = 1000; - max = 2300; + if (hwmgr->is_kicker) { + min = 1200; + max = 2500; + } else { + min = 1000; + max = 2300; + } + } else if (hwmgr->chip_id == CHIP_POLARIS11) { + if (hwmgr->is_kicker) { + min = 900; + max = 2100; + } else { + min = 1100; + max = 2100; + } } else { min = 1100; max = 2100; @@ -1626,6 +1639,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); int result = 0; @@ -1646,6 +1660,59 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) result = atomctrl_get_avfs_information(hwmgr, &avfs_params); if (0 == result) { + if (((adev->pdev->device == 0x67ef) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe5))) || + ((adev->pdev->device == 0x67ff) && + ((adev->pdev->revision == 0xcf) || + (adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) { + avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1; + if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) || + (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) { + if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) && + (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) && + (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) && + (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) && + (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) && + (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) { + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF718F1D4; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x323FD; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x1E455; + avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0; + avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0; + avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x23; + } + } + } else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) { + avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF6B024DD; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x3005E; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x18A5F; + avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315; + avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1; + avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x3B; + } else if (((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe3) || + (adev->pdev->revision == 0xe4) || + (adev->pdev->revision == 0xe5) || + (adev->pdev->revision == 0xe7) || + (adev->pdev->revision == 0xef))) || + ((adev->pdev->device == 0x6fdf) && + ((adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) { + avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF843B66B; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x59CB5; + avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0xFFFF287F; + avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0; + avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23; + avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x58; + } + } + + if (0 == result) { table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); @@ -1984,6 +2051,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + /* Apply avfs cks-off voltages to avoid the overshoot + * when switching to the highest sclk frequency + */ + if (data->apply_avfs_cks_off_voltage) + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); + return 0; } |