diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 1697 |
1 files changed, 1249 insertions, 448 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 404483437bd3..08133de21fdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -27,11 +27,20 @@ #include <linux/uaccess.h> #include <linux/reboot.h> #include <linux/syscalls.h> +#include <linux/pm_runtime.h> #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" +#include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" +#include "atom.h" +#ifdef CONFIG_X86_MCE_AMD +#include <asm/mce.h> + +static bool notifier_registered; +#endif +static const char *RAS_FS_NAME = "ras"; const char *ras_error_string[] = { "none", @@ -56,18 +65,40 @@ const char *ras_block_string[] = { "mp0", "mp1", "fuse", + "mca", +}; + +const char *ras_mca_block_string[] = { + "mca_mp0", + "mca_mp1", + "mca_mpio", + "mca_iohc", }; +const char *get_ras_block_str(struct ras_common_if *ras_block) +{ + if (!ras_block) + return "NULL"; + + if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) + return "OUT OF RANGE"; + + if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) + return ras_mca_block_string[ras_block->sub_block_index]; + + return ras_block_string[ras_block->block]; +} + #define ras_err_str(i) (ras_error_string[ffs(i)]) -#define ras_block_str(i) (ras_block_string[i]) -#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1 -#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) /* inject address is 52 bits */ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) +/* typical ECC bad page rate is 1 bad page per 100MB VRAM */ +#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) + enum amdgpu_ras_retire_page_reservation { AMDGPU_RAS_RETIRE_PAGE_RESERVED, AMDGPU_RAS_RETIRE_PAGE_PENDING, @@ -76,8 +107,75 @@ enum amdgpu_ras_retire_page_reservation { atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); +static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, + uint64_t addr); static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, uint64_t addr); +#ifdef CONFIG_X86_MCE_AMD +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); +struct mce_notifier_adev_list { + struct amdgpu_device *devs[MAX_GPU_INSTANCE]; + int num_gpu; +}; +static struct mce_notifier_adev_list mce_adev_list; +#endif + +void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) +{ + if (adev && amdgpu_ras_get_context(adev)) + amdgpu_ras_get_context(adev)->error_query_ready = ready; +} + +static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) +{ + if (adev && amdgpu_ras_get_context(adev)) + return amdgpu_ras_get_context(adev)->error_query_ready; + + return false; +} + +static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) +{ + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct eeprom_table_record err_rec; + + if ((address >= adev->gmc.mc_vram_size) || + (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + dev_warn(adev->dev, + "RAS WARN: input address 0x%llx is invalid.\n", + address); + return -EINVAL; + } + + if (amdgpu_ras_check_bad_page(adev, address)) { + dev_warn(adev->dev, + "RAS WARN: 0x%llx has already been marked as bad page!\n", + address); + return 0; + } + + memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); + + err_rec.address = address; + err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT; + err_rec.ts = (uint64_t)ktime_get_real_seconds(); + err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + + err_data.err_addr = &err_rec; + err_data.err_addr_cnt = 1; + + if (amdgpu_bad_page_threshold != 0) { + amdgpu_ras_add_bad_pages(adev, err_data.err_addr, + err_data.err_addr_cnt); + amdgpu_ras_save_bad_pages(adev); + } + + dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); + dev_warn(adev->dev, "Clear EEPROM:\n"); + dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); + + return 0; +} static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) @@ -89,7 +187,7 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, ssize_t s; char val[128]; - if (amdgpu_ras_error_query(obj->adev, &info)) + if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", @@ -123,7 +221,7 @@ static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { *block_id = i; - if (strcmp(name, ras_block_str(i)) == 0) + if (strcmp(name, ras_block_string[i]) == 0) return 0; } return -EINVAL; @@ -158,11 +256,24 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, op = 1; else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) op = 2; + else if (strstr(str, "retire_page") != NULL) + op = 3; else if (str[0] && str[1] && str[2] && str[3]) /* ascii string, but commands are not matched. */ return -EINVAL; if (op != -1) { + if (op == 3) { + if (sscanf(str, "%*s 0x%llx", &address) != 1 && + sscanf(str, "%*s %llu", &address) != 1) + return -EINVAL; + + data->op = op; + data->inject.address = address; + + return 0; + } + if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) return -EINVAL; @@ -178,11 +289,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, data->op = op; if (op == 2) { - if (sscanf(str, "%*s %*s %*s %u %llu %llu", - &sub_block, &address, &value) != 3) - if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", - &sub_block, &address, &value) != 3) - return -EINVAL; + if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", + &sub_block, &address, &value) != 3 && + sscanf(str, "%*s %*s %*s %u %llu %llu", + &sub_block, &address, &value) != 3) + return -EINVAL; data->head.sub_block_index = sub_block; data->inject.address = address; data->inject.value = value; @@ -198,13 +309,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return 0; } -static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, - struct ras_common_if *head); - /** * DOC: AMDGPU RAS debugfs control interface * - * It accepts struct ras_debug_if who has two members. + * The control interface accepts struct ras_debug_if which has two members. * * First member: ras_debug_if::head or ras_debug_if::inject. * @@ -229,32 +337,36 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, * * How to use the interface? * - * Programs + * In a program * - * Copy the struct ras_debug_if in your codes and initialize it. - * Write the struct to the control node. + * Copy the struct ras_debug_if in your code and initialize it. + * Write the struct to the control interface. * - * Shells + * From shell * * .. code-block:: bash * - * echo op block [error [sub_block address value]] > .../ras/ras_ctrl + * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl + * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl + * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl + * + * Where N, is the card which you want to affect. * - * Parameters: + * "disable" requires only the block. + * "enable" requires the block and error type. + * "inject" requires the block, error type, address, and value. * - * op: disable, enable, inject - * disable: only block is needed - * enable: block and error are needed - * inject: error, address, value are needed - * block: umc, sdma, gfx, ......... + * The block is one of: umc, sdma, gfx, etc. * see ras_block_string[] for details - * error: ue, ce - * ue: multi_uncorrectable - * ce: single_correctable - * sub_block: - * sub block index, pass 0 if there is no sub block * - * here are some examples for bash commands: + * The error type is one of: ue, ce, where, + * ue is multi-uncorrectable + * ce is single-correctable + * + * The sub-block is a the sub-block index, pass 0 if there is no sub-block. + * The address and value are hexadecimal numbers, leading 0x is optional. + * + * For instance, * * .. code-block:: bash * @@ -262,30 +374,45 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl * - * How to check the result? + * How to check the result of the operation? * - * For disable/enable, please check ras features at + * To check disable/enable, see "ras" features at, * /sys/class/drm/card[0/1/2...]/device/ras/features * - * For inject, please check corresponding err count at - * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count + * To check inject, see the corresponding error count at, + * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count * * .. note:: * Operations are only allowed on blocks which are supported. - * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask * to see which blocks support RAS on a particular asic. * */ -static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) +static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, + const char __user *buf, + size_t size, loff_t *pos) { struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; struct ras_debug_if data; int ret = 0; + if (!amdgpu_ras_get_error_query_ready(adev)) { + dev_warn(adev->dev, "RAS WARN: error injection " + "currently inaccessible\n"); + return size; + } + ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); if (ret) - return -EINVAL; + return ret; + + if (data.op == 3) { + ret = amdgpu_reserve_page_direct(adev, data.inject.address); + if (!ret) + return size; + else + return ret; + } if (!amdgpu_ras_is_supported(adev, data.head.block)) return -EINVAL; @@ -300,6 +427,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * case 2: if ((data.inject.address >= adev->gmc.mc_vram_size) || (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + dev_warn(adev->dev, "RAS WARN: input address " + "0x%llx is invalid.", + data.inject.address); ret = -EINVAL; break; } @@ -307,8 +437,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * /* umc ce/ue error injection for a bad page is not allowed */ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && amdgpu_ras_check_bad_page(adev, data.inject.address)) { - DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", - data.inject.address); + dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " + "already been marked as bad!\n", + data.inject.address); break; } @@ -318,7 +449,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * default: ret = -EINVAL; break; - }; + } if (ret) return -EINVAL; @@ -342,15 +473,25 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * * will reset EEPROM table to 0 entries. * */ -static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) +static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, + const char __user *buf, + size_t size, loff_t *pos) { - struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct amdgpu_device *adev = + (struct amdgpu_device *)file_inode(f)->i_private; int ret; - ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); + ret = amdgpu_ras_eeprom_reset_table( + &(amdgpu_ras_get_context(adev)->eeprom_control)); - return ret == 1 ? size : -EIO; + if (!ret) { + /* Something was written to EEPROM. + */ + amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; + return size; + } else { + return ret; + } } static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { @@ -396,12 +537,19 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, .head = obj->head, }; - if (amdgpu_ras_error_query(obj->adev, &info)) + if (!amdgpu_ras_get_error_query_ready(obj->adev)) + return sysfs_emit(buf, "Query currently inaccessible\n"); + + if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n", - "ue", info.ue_count, - "ce", info.ce_count); + if (obj->adev->asic_type == CHIP_ALDEBARAN) { + if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) + DRM_WARN("Failed to reset error counter and error status"); + } + + return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, + "ce", info.ce_count); } /* obj begin */ @@ -411,11 +559,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, static inline void put_obj(struct ras_manager *obj) { - if (obj && --obj->use == 0) + if (obj && (--obj->use == 0)) list_del(&obj->node); - if (obj && obj->use < 0) { - DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name); - } + if (obj && (obj->use < 0)) + DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); } /* make one obj and return it. */ @@ -425,13 +572,20 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - if (!con) + if (!adev->ras_enabled || !con) return NULL; if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; + + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; + /* already exist. return obj? */ if (alive_obj(obj)) return NULL; @@ -445,33 +599,35 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, } /* return an obj equal to head, or the first when head is NULL */ -static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, +struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, struct ras_common_if *head) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; int i; - if (!con) + if (!adev->ras_enabled || !con) return NULL; if (head) { if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; + + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; - if (alive_obj(obj)) { - WARN_ON(head->block != obj->head.block); + if (alive_obj(obj)) return obj; - } } else { - for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { obj = &con->objs[i]; - if (alive_obj(obj)) { - WARN_ON(i != obj->head.block); + if (alive_obj(obj)) return obj; - } } } @@ -481,11 +637,9 @@ static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, /* feature ctl begin */ static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, - struct ras_common_if *head) + struct ras_common_if *head) { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - - return con->hw_supported & BIT(head->block); + return adev->ras_hw_enabled & BIT(head->block); } static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, @@ -514,8 +668,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, */ if (!amdgpu_ras_is_feature_allowed(adev, head)) return 0; - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) - return 0; if (enable) { if (!obj) { @@ -542,19 +694,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - union ta_ras_cmd_input info; + union ta_ras_cmd_input *info; int ret; if (!con) return -EINVAL; + info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); + if (!info) + return -ENOMEM; + if (!enable) { - info.disable_features = (struct ta_ras_disable_features_input) { + info->disable_features = (struct ta_ras_disable_features_input) { .block_id = amdgpu_ras_block_to_ta(head->block), .error_type = amdgpu_ras_error_to_ta(head->type), }; } else { - info.enable_features = (struct ta_ras_enable_features_input) { + info->enable_features = (struct ta_ras_enable_features_input) { .block_id = amdgpu_ras_block_to_ta(head->block), .error_type = amdgpu_ras_error_to_ta(head->type), }; @@ -562,27 +718,24 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, /* Do not enable if it is not allowed. */ WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); - /* Are we alerady in that state we are going to set? */ - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) - return 0; if (!amdgpu_ras_intr_triggered()) { - ret = psp_ras_enable_features(&adev->psp, &info, enable); + ret = psp_ras_enable_features(&adev->psp, info, enable); if (ret) { - DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", - enable ? "enable":"disable", - ras_block_str(head->block), - ret); - if (ret == TA_RAS_STATUS__RESET_NEEDED) - return -EAGAIN; - return -EINVAL; + dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", + enable ? "enable":"disable", + get_ras_block_str(head), + amdgpu_ras_is_poison_mode_supported(adev), ret); + goto out; } } /* setup the obj */ __amdgpu_ras_feature_enable(adev, head, enable); - - return 0; + ret = 0; +out: + kfree(info); + return ret; } /* Only used in device probe stage and called only once. */ @@ -611,8 +764,9 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (ret == -EINVAL) { ret = __amdgpu_ras_feature_enable(adev, head, 1); if (!ret) - DRM_INFO("RAS INFO: %s setup object\n", - ras_block_str(head->block)); + dev_info(adev->dev, + "RAS INFO: %s setup object\n", + get_ras_block_str(head)); } } else { /* setup the object then issue a ras TA disable cmd.*/ @@ -620,7 +774,15 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (ret) return ret; + /* gfx block ras dsiable cmd must send to ras-ta */ + if (head->block == AMDGPU_RAS_BLOCK__GFX) + con->features |= BIT(head->block); + ret = amdgpu_ras_feature_enable(adev, head, 0); + + /* clean gfx block ras features flag */ + if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) + con->features &= ~BIT(head->block); } } else ret = amdgpu_ras_feature_enable(adev, head, enable); @@ -654,18 +816,39 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, bool bypass) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; int i; - const enum amdgpu_ras_error_type default_ras_type = - AMDGPU_RAS_ERROR__NONE; + const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; - for (i = 0; i < ras_block_count; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { struct ras_common_if head = { .block = i, .type = default_ras_type, .sub_block_index = 0, }; - strcpy(head.name, ras_block_str(i)); + + if (i == AMDGPU_RAS_BLOCK__MCA) + continue; + + if (bypass) { + /* + * bypass psp. vbios enable ras for us. + * so just create the obj + */ + if (__amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } else { + if (amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } + } + + for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__MCA, + .type = default_ras_type, + .sub_block_index = i, + }; + if (bypass) { /* * bypass psp. vbios enable ras for us. @@ -683,37 +866,97 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, } /* feature ctl end */ + +void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_err_data *err_data) +{ + switch (ras_block->sub_block_index) { + case AMDGPU_RAS_MCA_BLOCK__MP0: + if (adev->mca.mp0.ras_funcs && + adev->mca.mp0.ras_funcs->query_ras_error_count) + adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MP1: + if (adev->mca.mp1.ras_funcs && + adev->mca.mp1.ras_funcs->query_ras_error_count) + adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MPIO: + if (adev->mca.mpio.ras_funcs && + adev->mca.mpio.ras_funcs->query_ras_error_count) + adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data); + break; + default: + break; + } +} + /* query/inject/cure begin */ -int amdgpu_ras_error_query(struct amdgpu_device *adev, - struct ras_query_if *info) +int amdgpu_ras_query_error_status(struct amdgpu_device *adev, + struct ras_query_if *info) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; + int i; if (!obj) return -EINVAL; switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: - if (adev->umc.funcs->query_ras_error_count) - adev->umc.funcs->query_ras_error_count(adev, &err_data); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, &err_data); /* umc query_ras_error_address is also responsible for clearing * error status */ - if (adev->umc.funcs->query_ras_error_address) - adev->umc.funcs->query_ras_error_address(adev, &err_data); + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address) + adev->umc.ras_funcs->query_ras_error_address(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__SDMA: + if (adev->sdma.funcs->query_ras_error_count) { + for (i = 0; i < adev->sdma.num_instances; i++) + adev->sdma.funcs->query_ras_error_count(adev, i, + &err_data); + } break; case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.funcs->query_ras_error_count) - adev->gfx.funcs->query_ras_error_count(adev, &err_data); + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->query_ras_error_count) + adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data); + + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->query_ras_error_status) + adev->gfx.ras_funcs->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub.funcs->query_ras_error_count) - adev->mmhub.funcs->query_ras_error_count(adev, &err_data); + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->query_ras_error_count) + adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data); + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->query_ras_error_status) + adev->mmhub.ras_funcs->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__PCIE_BIF: - if (adev->nbio.funcs->query_ras_error_count) - adev->nbio.funcs->query_ras_error_count(adev, &err_data); + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->query_ras_error_count) + adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + if (adev->gmc.xgmi.ras_funcs && + adev->gmc.xgmi.ras_funcs->query_ras_error_count) + adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__HDP: + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->query_ras_error_count) + adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__MCA: + amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data); break; default: break; @@ -726,17 +969,117 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, info->ce_count = obj->err_data.ce_count; if (err_data.ce_count) { - dev_info(adev->dev, "%ld correctable errors detected in %s block\n", - obj->err_data.ce_count, ras_block_str(info->head.block)); + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { + dev_info(adev->dev, "socket: %d, die: %d " + "%ld correctable hardware errors " + "detected in %s block, no user " + "action is needed.\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + obj->err_data.ce_count, + get_ras_block_str(&info->head)); + } else { + dev_info(adev->dev, "%ld correctable hardware errors " + "detected in %s block, no user " + "action is needed.\n", + obj->err_data.ce_count, + get_ras_block_str(&info->head)); + } } if (err_data.ue_count) { - dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", - obj->err_data.ue_count, ras_block_str(info->head.block)); + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { + dev_info(adev->dev, "socket: %d, die: %d " + "%ld uncorrectable hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + obj->err_data.ue_count, + get_ras_block_str(&info->head)); + } else { + dev_info(adev->dev, "%ld uncorrectable hardware errors " + "detected in %s block\n", + obj->err_data.ue_count, + get_ras_block_str(&info->head)); + } } + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + amdgpu_ras_reset_error_status(adev, info->head.block); + return 0; } +int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, + enum amdgpu_ras_block block) +{ + if (!amdgpu_ras_is_supported(adev, block)) + return -EINVAL; + + switch (block) { + case AMDGPU_RAS_BLOCK__GFX: + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->reset_ras_error_count) + adev->gfx.ras_funcs->reset_ras_error_count(adev); + + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->reset_ras_error_status) + adev->gfx.ras_funcs->reset_ras_error_status(adev); + break; + case AMDGPU_RAS_BLOCK__MMHUB: + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_count) + adev->mmhub.ras_funcs->reset_ras_error_count(adev); + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_status) + adev->mmhub.ras_funcs->reset_ras_error_status(adev); + break; + case AMDGPU_RAS_BLOCK__SDMA: + if (adev->sdma.funcs->reset_ras_error_count) + adev->sdma.funcs->reset_ras_error_count(adev); + break; + case AMDGPU_RAS_BLOCK__HDP: + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->reset_ras_error_count) + adev->hdp.ras_funcs->reset_ras_error_count(adev); + break; + default: + break; + } + + return 0; +} + +/* Trigger XGMI/WAFL error */ +static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, + struct ta_ras_trigger_error_input *block_info) +{ + int ret; + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + dev_warn(adev->dev, "Failed to disallow df cstate"); + + if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) + dev_warn(adev->dev, "Failed to disallow XGMI power down"); + + ret = psp_ras_trigger_error(&adev->psp, block_info); + + if (amdgpu_ras_intr_triggered()) + return ret; + + if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) + dev_warn(adev->dev, "Failed to allow XGMI power down"); + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + dev_warn(adev->dev, "Failed to allow df cstate"); + + return ret; +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -754,64 +1097,94 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1) { + block_info.address = + amdgpu_xgmi_get_relative_phy_addr(adev, + block_info.address); + } + switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.funcs->ras_error_inject) - ret = adev->gfx.funcs->ras_error_inject(adev, info); + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->ras_error_inject) + ret = adev->gfx.ras_funcs->ras_error_inject(adev, info); else ret = -EINVAL; break; case AMDGPU_RAS_BLOCK__UMC: + case AMDGPU_RAS_BLOCK__SDMA: case AMDGPU_RAS_BLOCK__MMHUB: - case AMDGPU_RAS_BLOCK__XGMI_WAFL: case AMDGPU_RAS_BLOCK__PCIE_BIF: + case AMDGPU_RAS_BLOCK__MCA: ret = psp_ras_trigger_error(&adev->psp, &block_info); break; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + ret = amdgpu_ras_error_inject_xgmi(adev, &block_info); + break; default: - DRM_INFO("%s error injection is not supported yet\n", - ras_block_str(info->head.block)); + dev_info(adev->dev, "%s error injection is not supported yet\n", + get_ras_block_str(&info->head)); ret = -EINVAL; } if (ret) - DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", - ras_block_str(info->head.block), - ret); + dev_err(adev->dev, "ras inject %s failed %d\n", + get_ras_block_str(&info->head), ret); return ret; } -int amdgpu_ras_error_cure(struct amdgpu_device *adev, - struct ras_cure_if *info) -{ - /* psp fw has no cure interface for now. */ - return 0; -} - -/* get the total error counts on all IPs */ -unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, - bool is_ce) +/** + * amdgpu_ras_query_error_count -- Get error counts of all IPs + * adev: pointer to AMD GPU device + * ce_count: pointer to an integer to be set to the count of correctible errors. + * ue_count: pointer to an integer to be set to the count of uncorrectible + * errors. + * + * If set, @ce_count or @ue_count, count and return the corresponding + * error counts in those integer pointers. Return 0 if the device + * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. + */ +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - struct ras_err_data data = {0, 0}; + unsigned long ce, ue; - if (!con) + if (!adev->ras_enabled || !con) + return -EOPNOTSUPP; + + /* Don't count since no reporting. + */ + if (!ce_count && !ue_count) return 0; + ce = 0; + ue = 0; list_for_each_entry(obj, &con->head, node) { struct ras_query_if info = { .head = obj->head, }; + int res; - if (amdgpu_ras_error_query(adev, &info)) - return 0; + res = amdgpu_ras_query_error_status(adev, &info); + if (res) + return res; - data.ce_count += info.ce_count; - data.ue_count += info.ue_count; + ce += info.ce_count; + ue += info.ue_count; } - return is_ce ? data.ce_count : data.ue_count; + if (ce_count) + *ce_count = ce; + + if (ue_count) + *ue_count = ue; + + return 0; } /* query/inject/cure end */ @@ -831,7 +1204,7 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags) case AMDGPU_RAS_RETIRE_PAGE_FAULT: default: return "F"; - }; + } } /** @@ -905,45 +1278,13 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); } -static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev) +static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct attribute *attrs[] = { - &con->features_attr.attr, - NULL - }; - struct bin_attribute *bin_attrs[] = { - &con->badpages_attr, - NULL - }; - struct attribute_group group = { - .name = "ras", - .attrs = attrs, - .bin_attrs = bin_attrs, - }; - - con->features_attr = (struct device_attribute) { - .attr = { - .name = "features", - .mode = S_IRUGO, - }, - .show = amdgpu_ras_sysfs_features_read, - }; - - con->badpages_attr = (struct bin_attribute) { - .attr = { - .name = "gpu_vram_bad_pages", - .mode = S_IRUGO, - }, - .size = 0, - .private = NULL, - .read = amdgpu_ras_sysfs_badpages_read, - }; - sysfs_attr_init(attrs[0]); - sysfs_bin_attr_init(bin_attrs[0]); - - return sysfs_create_group(&adev->dev->kobj, &group); + sysfs_remove_file_from_group(&adev->dev->kobj, + &con->badpages_attr.attr, + RAS_FS_NAME); } static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) @@ -953,14 +1294,9 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) &con->features_attr.attr, NULL }; - struct bin_attribute *bin_attrs[] = { - &con->badpages_attr, - NULL - }; struct attribute_group group = { - .name = "ras", + .name = RAS_FS_NAME, .attrs = attrs, - .bin_attrs = bin_attrs, }; sysfs_remove_group(&adev->dev->kobj, &group); @@ -993,7 +1329,7 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, if (sysfs_add_file_to_group(&adev->dev->kobj, &obj->sysfs_attr.attr, - "ras")) { + RAS_FS_NAME)) { put_obj(obj); return -EINVAL; } @@ -1013,7 +1349,7 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, sysfs_remove_file_from_group(&adev->dev->kobj, &obj->sysfs_attr.attr, - "ras"); + RAS_FS_NAME); obj->attr_inuse = 0; put_obj(obj); @@ -1029,6 +1365,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_sysfs_remove(adev, &obj->head); } + if (amdgpu_bad_page_threshold != 0) + amdgpu_ras_sysfs_remove_bad_page_node(adev); + amdgpu_ras_sysfs_remove_feature_node(adev); return 0; @@ -1054,16 +1393,27 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) * */ /* debugfs begin */ -static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) +static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct drm_minor *minor = adev->ddev->primary; - - con->dir = debugfs_create_dir("ras", minor->debugfs_root); - debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, - adev, &amdgpu_ras_debugfs_ctrl_ops); - debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, - adev, &amdgpu_ras_debugfs_eeprom_ops); + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *dir; + + dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); + debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_eeprom_ops); + debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, + &con->bad_page_cnt_threshold); + debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); + debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); + debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_eeprom_size_ops); + con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", + S_IRUGO, dir, adev, + &amdgpu_ras_debugfs_eeprom_table_ops); + amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); /* * After one uncorrectable error happens, usually GPU recovery will @@ -1073,17 +1423,24 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine * will never be called. */ - debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, - &con->reboot); + debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); + + /* + * User could set this not to clean up hardware's error count register + * of RAS IPs during ras recovery. + */ + debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, + &con->disable_ras_err_cnt_harvest); + return dir; } -void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, - struct ras_fs_if *head) +static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, + struct ras_fs_if *head, + struct dentry *dir) { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); - if (!obj || obj->ent) + if (!obj || !dir) return; get_obj(obj); @@ -1092,51 +1449,94 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, head->debugfs_name, sizeof(obj->fs_data.debugfs_name)); - obj->ent = debugfs_create_file(obj->fs_data.debugfs_name, - S_IWUGO | S_IRUGO, con->dir, obj, - &amdgpu_ras_debugfs_ops); + debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, + obj, &amdgpu_ras_debugfs_ops); } -void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, - struct ras_common_if *head) +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) { - struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct dentry *dir; + struct ras_manager *obj; + struct ras_fs_if fs_info; - if (!obj || !obj->ent) + /* + * it won't be called in resume path, no need to check + * suspend and gpu reset status + */ + if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) return; - debugfs_remove(obj->ent); - obj->ent = NULL; - put_obj(obj); -} + dir = amdgpu_ras_debugfs_create_ctrl_node(adev); -static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) -{ - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_manager *obj, *tmp; - - list_for_each_entry_safe(obj, tmp, &con->head, node) { - amdgpu_ras_debugfs_remove(adev, &obj->head); + list_for_each_entry(obj, &con->head, node) { + if (amdgpu_ras_is_supported(adev, obj->head.block) && + (obj->attr_inuse == 1)) { + sprintf(fs_info.debugfs_name, "%s_err_inject", + get_ras_block_str(&obj->head)); + fs_info.head = obj->head; + amdgpu_ras_debugfs_create(adev, &fs_info, dir); + } } - - debugfs_remove_recursive(con->dir); - con->dir = NULL; } + /* debugfs end */ /* ras fs */ - +static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, + amdgpu_ras_sysfs_badpages_read, NULL, 0); +static DEVICE_ATTR(features, S_IRUGO, + amdgpu_ras_sysfs_features_read, NULL); static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { - amdgpu_ras_sysfs_create_feature_node(adev); - amdgpu_ras_debugfs_create_ctrl_node(adev); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct attribute_group group = { + .name = RAS_FS_NAME, + }; + struct attribute *attrs[] = { + &con->features_attr.attr, + NULL + }; + struct bin_attribute *bin_attrs[] = { + NULL, + NULL, + }; + int r; + + /* add features entry */ + con->features_attr = dev_attr_features; + group.attrs = attrs; + sysfs_attr_init(attrs[0]); + + if (amdgpu_bad_page_threshold != 0) { + /* add bad_page_features entry */ + bin_attr_gpu_vram_bad_pages.private = NULL; + con->badpages_attr = bin_attr_gpu_vram_bad_pages; + bin_attrs[0] = &con->badpages_attr; + group.bin_attrs = bin_attrs; + sysfs_bin_attr_init(bin_attrs[0]); + } + + r = sysfs_create_group(&adev->dev->kobj, &group); + if (r) + dev_err(adev->dev, "Failed to create RAS sysfs group!"); return 0; } static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) { - amdgpu_ras_debugfs_remove_all(adev); + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *con_obj, *ip_obj, *tmp; + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + list_for_each_entry_safe(con_obj, tmp, &con->head, node) { + ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); + if (ip_obj) + put_obj(ip_obj); + } + } + amdgpu_ras_sysfs_remove_all(adev); return 0; } @@ -1159,22 +1559,28 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) data->rptr = (data->aligned_element_size + data->rptr) % data->ring_size; - /* Let IP handle its data, maybe we need get the output - * from the callback to udpate the error type/count, etc - */ if (data->cb) { - ret = data->cb(obj->adev, &err_data, &entry); - /* ue will trigger an interrupt, and in that case - * we need do a reset to recovery the whole system. - * But leave IP do that recovery, here we just dispatch - * the error. - */ - if (ret == AMDGPU_RAS_SUCCESS) { - /* these counts could be left as 0 if - * some blocks do not count error number + if (amdgpu_ras_is_poison_mode_supported(obj->adev) && + obj->head.block == AMDGPU_RAS_BLOCK__UMC) + dev_info(obj->adev->dev, + "Poison is created, no user action is needed.\n"); + else { + /* Let IP handle its data, maybe we need get the output + * from the callback to udpate the error type/count, etc */ - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; + ret = data->cb(obj->adev, &err_data, &entry); + /* ue will trigger an interrupt, and in that case + * we need do a reset to recovery the whole system. + * But leave IP do that recovery, here we just dispatch + * the error. + */ + if (ret == AMDGPU_RAS_SUCCESS) { + /* these counts could be left as 0 if + * some blocks do not count error number + */ + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + } } } } @@ -1294,6 +1700,74 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) } /* ih end */ +/* traversal all IPs except NBIO to query error counter */ +static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj; + + if (!adev->ras_enabled || !con) + return; + + list_for_each_entry(obj, &con->head, node) { + struct ras_query_if info = { + .head = obj->head, + }; + + /* + * PCIE_BIF IP has one different isr by ras controller + * interrupt, the specific ras counter query will be + * done in that isr. So skip such block from common + * sync flood interrupt isr calling. + */ + if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) + continue; + + amdgpu_ras_query_error_status(adev, &info); + } +} + +/* Parse RdRspStatus and WrRspStatus */ +static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, + struct ras_query_if *info) +{ + /* + * Only two block need to query read/write + * RspStatus at current state + */ + switch (info->head.block) { + case AMDGPU_RAS_BLOCK__GFX: + if (adev->gfx.ras_funcs && + adev->gfx.ras_funcs->query_ras_error_status) + adev->gfx.ras_funcs->query_ras_error_status(adev); + break; + case AMDGPU_RAS_BLOCK__MMHUB: + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->query_ras_error_status) + adev->mmhub.ras_funcs->query_ras_error_status(adev); + break; + default: + break; + } +} + +static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj; + + if (!adev->ras_enabled || !con) + return; + + list_for_each_entry(obj, &con->head, node) { + struct ras_query_if info = { + .head = obj->head, + }; + + amdgpu_ras_error_status_query(adev, &info); + } +} + /* recovery begin */ /* return 0 on success. @@ -1305,7 +1779,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; int i = 0; - int ret = 0; + int ret = 0, status; if (!con || !con->eh_data || !bps || !count) return -EINVAL; @@ -1314,6 +1788,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, data = con->eh_data; if (!data || data->count == 0) { *bps = NULL; + ret = -EINVAL; goto out; } @@ -1329,10 +1804,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, .size = AMDGPU_GPU_PAGE_SIZE, .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; - - if (data->last_reserved <= i) + status = amdgpu_vram_mgr_query_page_status( + ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + data->bps[i].retired_page); + if (status == -EBUSY) (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; - else if (data->bps_bo[i] == NULL) + else if (status == -ENOENT) (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; } @@ -1346,8 +1823,33 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) { struct amdgpu_ras *ras = container_of(work, struct amdgpu_ras, recovery_work); + struct amdgpu_device *remote_adev = NULL; + struct amdgpu_device *adev = ras->adev; + struct list_head device_list, *device_list_handle = NULL; + + if (!ras->disable_ras_err_cnt_harvest) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + + /* Build list of devices to query RAS related errors */ + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { + device_list_handle = &hive->device_list; + } else { + INIT_LIST_HEAD(&device_list); + list_add_tail(&adev->gmc.xgmi.head, &device_list); + device_list_handle = &device_list; + } + + list_for_each_entry(remote_adev, + device_list_handle, gmc.xgmi.head) { + amdgpu_ras_query_err_status(remote_adev); + amdgpu_ras_log_on_err_counter(remote_adev); + } - amdgpu_device_gpu_recover(ras->adev, 0); + amdgpu_put_xgmi_hive(hive); + } + + if (amdgpu_device_should_recover_gpu(ras->adev)) + amdgpu_device_gpu_recover(ras->adev, NULL); atomic_set(&ras->in_recovery, 0); } @@ -1359,12 +1861,9 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, unsigned int new_space = old_space + pages; unsigned int align_space = ALIGN(new_space, 512); void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); - struct amdgpu_bo **bps_bo = - kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL); - if (!bps || !bps_bo) { + if (!bps) { kfree(bps); - kfree(bps_bo); return -ENOMEM; } @@ -1373,14 +1872,8 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, data->count * sizeof(*data->bps)); kfree(data->bps); } - if (data->bps_bo) { - memcpy(bps_bo, data->bps_bo, - data->count * sizeof(*data->bps_bo)); - kfree(data->bps_bo); - } data->bps = bps; - data->bps_bo = bps_bo; data->space_left += align_space - old_space; return 0; } @@ -1392,6 +1885,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; int ret = 0; + uint32_t i; if (!con || !con->eh_data || !bps || pages <= 0) return 0; @@ -1401,16 +1895,26 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, if (!data) goto out; - if (data->space_left <= pages) - if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) { + for (i = 0; i < pages; i++) { + if (amdgpu_ras_check_bad_page_unlock(con, + bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) + continue; + + if (!data->space_left && + amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { ret = -ENOMEM; goto out; } - memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); - data->count += pages; - data->space_left -= pages; + amdgpu_vram_mgr_reserve_range( + ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE); + memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); + data->count++; + data->space_left--; + } out: mutex_unlock(&con->recovery_lock); @@ -1421,7 +1925,7 @@ out: * write error record array to eeprom, the function should be * protected by recovery_lock */ -static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) +int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; @@ -1433,17 +1937,19 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) control = &con->eeprom_control; data = con->eh_data; - save_count = data->count - control->num_recs; + save_count = data->count - control->ras_num_recs; /* only new entries are saved */ - if (save_count > 0) - if (amdgpu_ras_eeprom_process_recods(control, - &data->bps[control->num_recs], - true, - save_count)) { - DRM_ERROR("Failed to save EEPROM table data!"); + if (save_count > 0) { + if (amdgpu_ras_eeprom_append(control, + &data->bps[control->ras_num_recs], + save_count)) { + dev_err(adev->dev, "Failed to save EEPROM table data!"); return -EIO; } + dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); + } + return 0; } @@ -1454,148 +1960,120 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) { struct amdgpu_ras_eeprom_control *control = - &adev->psp.ras.ras->eeprom_control; - struct eeprom_table_record *bps = NULL; - int ret = 0; + &adev->psp.ras_context.ras->eeprom_control; + struct eeprom_table_record *bps; + int ret; /* no bad page record, skip eeprom access */ - if (!control->num_recs) - return ret; + if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) + return 0; - bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); + bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); if (!bps) return -ENOMEM; - if (amdgpu_ras_eeprom_process_recods(control, bps, false, - control->num_recs)) { - DRM_ERROR("Failed to load EEPROM table records!"); - ret = -EIO; - goto out; - } - - ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); + ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); + if (ret) + dev_err(adev->dev, "Failed to load EEPROM table records!"); + else + ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs); -out: kfree(bps); return ret; } -/* - * check if an address belongs to bad page - * - * Note: this check is only for umc block - */ -static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, +static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, uint64_t addr) { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data *data; + struct ras_err_handler_data *data = con->eh_data; int i; - bool ret = false; - - if (!con || !con->eh_data) - return ret; - - mutex_lock(&con->recovery_lock); - data = con->eh_data; - if (!data) - goto out; addr >>= AMDGPU_GPU_PAGE_SHIFT; for (i = 0; i < data->count; i++) - if (addr == data->bps[i].retired_page) { - ret = true; - goto out; - } + if (addr == data->bps[i].retired_page) + return true; -out: - mutex_unlock(&con->recovery_lock); - return ret; + return false; } -/* called in gpu recovery/init */ -int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) +/* + * check if an address belongs to bad page + * + * Note: this check is only for umc block + */ +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data *data; - uint64_t bp; - struct amdgpu_bo *bo = NULL; - int i, ret = 0; + bool ret = false; if (!con || !con->eh_data) - return 0; + return ret; mutex_lock(&con->recovery_lock); - data = con->eh_data; - if (!data) - goto out; - /* reserve vram at driver post stage. */ - for (i = data->last_reserved; i < data->count; i++) { - bp = data->bps[i].retired_page; - - /* There are two cases of reserve error should be ignored: - * 1) a ras bad page has been allocated (used by someone); - * 2) a ras bad page has been reserved (duplicate error injection - * for one page); - */ - if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, - AMDGPU_GPU_PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL)) - DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); - - data->bps_bo[i] = bo; - data->last_reserved = i + 1; - bo = NULL; - } - - /* continue to save bad pages to eeprom even reesrve_vram fails */ - ret = amdgpu_ras_save_bad_pages(adev); -out: + ret = amdgpu_ras_check_bad_page_unlock(con, addr); mutex_unlock(&con->recovery_lock); return ret; } -/* called when driver unload */ -static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) +static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, + uint32_t max_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data *data; - struct amdgpu_bo *bo; - int i; - - if (!con || !con->eh_data) - return 0; - mutex_lock(&con->recovery_lock); - data = con->eh_data; - if (!data) - goto out; - - for (i = data->last_reserved - 1; i >= 0; i--) { - bo = data->bps_bo[i]; + /* + * Justification of value bad_page_cnt_threshold in ras structure + * + * Generally, -1 <= amdgpu_bad_page_threshold <= max record length + * in eeprom, and introduce two scenarios accordingly. + * + * Bad page retirement enablement: + * - If amdgpu_bad_page_threshold = -1, + * bad_page_cnt_threshold = typical value by formula. + * + * - When the value from user is 0 < amdgpu_bad_page_threshold < + * max record length in eeprom, use it directly. + * + * Bad page retirement disablement: + * - If amdgpu_bad_page_threshold = 0, bad page retirement + * functionality is disabled, and bad_page_cnt_threshold will + * take no effect. + */ - amdgpu_bo_free_kernel(&bo, NULL, NULL); + if (amdgpu_bad_page_threshold < 0) { + u64 val = adev->gmc.mc_vram_size; - data->bps_bo[i] = bo; - data->last_reserved = i; + do_div(val, RAS_BAD_PAGE_COVER); + con->bad_page_cnt_threshold = min(lower_32_bits(val), + max_count); + } else { + con->bad_page_cnt_threshold = min_t(int, max_count, + amdgpu_bad_page_threshold); } -out: - mutex_unlock(&con->recovery_lock); - return 0; } int amdgpu_ras_recovery_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data **data; + u32 max_eeprom_records_count = 0; + bool exc_err_limit = false; int ret; - if (con) - data = &con->eh_data; - else + if (!con) + return 0; + + /* Allow access to RAS EEPROM via debugfs, when the ASIC + * supports RAS and debugfs is enabled, but when + * adev->ras_enabled is unset, i.e. when "ras_enable" + * module parameter is set to 0. + */ + con->adev = adev; + + if (!adev->ras_enabled) return 0; + data = &con->eh_data; *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); if (!*data) { ret = -ENOMEM; @@ -1605,32 +2083,55 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) mutex_init(&con->recovery_lock); INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); atomic_set(&con->in_recovery, 0); - con->adev = adev; - ret = amdgpu_ras_eeprom_init(&con->eeprom_control); - if (ret) + max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(); + amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); + + /* Todo: During test the SMU might fail to read the eeprom through I2C + * when the GPU is pending on XGMI reset during probe time + * (Mostly after second bus reset), skip it now + */ + if (adev->gmc.xgmi.pending_reset) + return 0; + ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); + /* + * This calling fails when exc_err_limit is true or + * ret != 0. + */ + if (exc_err_limit || ret) goto free; - if (con->eeprom_control.num_recs) { + if (con->eeprom_control.ras_num_recs) { ret = amdgpu_ras_load_bad_pages(adev); if (ret) goto free; - ret = amdgpu_ras_reserve_bad_pages(adev); - if (ret) - goto release; + + if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num) + adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); } +#ifdef CONFIG_X86_MCE_AMD + if ((adev->asic_type == CHIP_ALDEBARAN) && + (adev->gmc.xgmi.connected_to_cpu)) + amdgpu_register_bad_pages_mca_notifier(adev); +#endif return 0; -release: - amdgpu_ras_release_bad_pages(adev); free: kfree((*data)->bps); - kfree((*data)->bps_bo); kfree(*data); con->eh_data = NULL; out: - DRM_WARN("Failed to initialize ras recovery!\n"); + dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); + + /* + * Except error threshold exceeding case, other failure cases in this + * function would not fail amdgpu driver init. + */ + if (!exc_err_limit) + ret = 0; + else + ret = -EINVAL; return ret; } @@ -1645,12 +2146,10 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) return 0; cancel_work_sync(&con->recovery_work); - amdgpu_ras_release_bad_pages(adev); mutex_lock(&con->recovery_lock); con->eh_data = NULL; kfree(data->bps); - kfree(data->bps_bo); kfree(data); mutex_unlock(&con->recovery_lock); @@ -1658,17 +2157,32 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) } /* recovery end */ -/* return 0 if ras will reset gpu and repost.*/ -int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, - unsigned int block) +static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + return adev->asic_type == CHIP_VEGA10 || + adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS || + adev->asic_type == CHIP_ALDEBARAN || + adev->asic_type == CHIP_SIENNA_CICHLID; +} - if (!ras) - return -EINVAL; +/* + * this is workaround for vega20 workstation sku, + * force enable gfx ras, ignore vbios gfx ras flag + * due to GC EDC can not write + */ +static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) +{ + struct atom_context *ctx = adev->mode_info.atom_context; - ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET; - return 0; + if (!ctx) + return; + + if (strnstr(ctx->vbios_version, "D16406", + sizeof(ctx->vbios_version)) || + strnstr(ctx->vbios_version, "D36002", + sizeof(ctx->vbios_version))) + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); } /* @@ -1680,49 +2194,111 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, * we have to initialize ras as normal. but need check if operation is * allowed or not in each function. */ -static void amdgpu_ras_check_supported(struct amdgpu_device *adev, - uint32_t *hw_supported, uint32_t *supported) +static void amdgpu_ras_check_supported(struct amdgpu_device *adev) { - *hw_supported = 0; - *supported = 0; + adev->ras_hw_enabled = adev->ras_enabled = 0; - if (amdgpu_sriov_vf(adev) || - adev->asic_type != CHIP_VEGA20) + if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || + !amdgpu_ras_asic_supported(adev)) return; - if (adev->is_atom_fw && - (amdgpu_atomfirmware_mem_ecc_supported(adev) || - amdgpu_atomfirmware_sram_ecc_supported(adev))) - *hw_supported = AMDGPU_RAS_BLOCK_MASK; + if (!adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { + dev_info(adev->dev, "MEM ECC is active.\n"); + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else { + dev_info(adev->dev, "MEM ECC is not presented.\n"); + } - *supported = amdgpu_ras_enable == 0 ? - 0 : *hw_supported & amdgpu_ras_mask; + if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { + dev_info(adev->dev, "SRAM ECC is active.\n"); + adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); + } else { + dev_info(adev->dev, "SRAM ECC is not presented.\n"); + } + } else { + /* driver only manages a few IP blocks RAS feature + * when GPU is connected cpu through XGMI */ + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | + 1 << AMDGPU_RAS_BLOCK__SDMA | + 1 << AMDGPU_RAS_BLOCK__MMHUB); + } + + amdgpu_ras_get_quirks(adev); + + /* hw_supported needs to be aligned with RAS block mask. */ + adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; + + adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; +} + +static void amdgpu_ras_counte_dw(struct work_struct *work) +{ + struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, + ras_counte_delay_work.work); + struct amdgpu_device *adev = con->adev; + struct drm_device *dev = adev_to_drm(adev); + unsigned long ce_count, ue_count; + int res; + + res = pm_runtime_get_sync(dev->dev); + if (res < 0) + goto Out; + + /* Cache new values. + */ + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } + + pm_runtime_mark_last_busy(dev->dev); +Out: + pm_runtime_put_autosuspend(dev->dev); } int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int r; + bool df_poison, umc_poison; if (con) return 0; con = kmalloc(sizeof(struct amdgpu_ras) + - sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, + sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + + sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, GFP_KERNEL|__GFP_ZERO); if (!con) return -ENOMEM; + con->adev = adev; + INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); + atomic_set(&con->ras_ce_count, 0); + atomic_set(&con->ras_ue_count, 0); + con->objs = (struct ras_manager *)(con + 1); amdgpu_ras_set_context(adev, con); - amdgpu_ras_check_supported(adev, &con->hw_supported, - &con->supported); - if (!con->hw_supported) { - amdgpu_ras_set_context(adev, NULL); - kfree(con); - return 0; + amdgpu_ras_check_supported(adev); + + if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { + /* set gfx block ras context feature for VEGA20 Gaming + * send ras disable cmd to ras ta during ras late init. + */ + if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { + con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); + + return 0; + } + + r = 0; + goto release_con; } con->features = 0; @@ -1730,32 +2306,103 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* Might need get this flag from vbios. */ con->flags = RAS_DEFAULT_FLAGS; - if (adev->nbio.funcs->init_ras_controller_interrupt) { - r = adev->nbio.funcs->init_ras_controller_interrupt(adev); + /* initialize nbio ras function ahead of any other + * ras functions so hardware fatal error interrupt + * can be enabled as early as possible */ + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + case CHIP_ALDEBARAN: + if (!adev->gmc.xgmi.connected_to_cpu) + adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs; + break; + default: + /* nbio ras is not available */ + break; + } + + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->init_ras_controller_interrupt) { + r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev); if (r) - return r; + goto release_con; } - if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { - r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); + if (adev->nbio.ras_funcs && + adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) { + r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev); if (r) - return r; + goto release_con; } - amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; + /* Init poison supported flag, the default value is false */ + if (adev->df.funcs && + adev->df.funcs->query_ras_poison_mode && + adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_poison_mode) { + df_poison = + adev->df.funcs->query_ras_poison_mode(adev); + umc_poison = + adev->umc.ras_funcs->query_ras_poison_mode(adev); + /* Only poison is set in both DF and UMC, we can support it */ + if (df_poison && umc_poison) + con->poison_supported = true; + else if (df_poison != umc_poison) + dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", + df_poison, umc_poison); + } - if (amdgpu_ras_fs_init(adev)) - goto fs_out; + if (amdgpu_ras_fs_init(adev)) { + r = -EINVAL; + goto release_con; + } + + dev_info(adev->dev, "RAS INFO: ras initialized successfully, " + "hardware ability[%x] ras_mask[%x]\n", + adev->ras_hw_enabled, adev->ras_enabled); - DRM_INFO("RAS INFO: ras initialized successfully, " - "hardware ability[%x] ras_mask[%x]\n", - con->hw_supported, con->supported); return 0; -fs_out: +release_con: amdgpu_ras_set_context(adev, NULL); kfree(con); - return -EINVAL; + return r; +} + +int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) +{ + if (adev->gmc.xgmi.connected_to_cpu) + return 1; + return 0; +} + +static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, + struct ras_common_if *ras_block) +{ + struct ras_query_if info = { + .head = *ras_block, + }; + + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + return 0; + + if (amdgpu_ras_query_error_status(adev, &info) != 0) + DRM_WARN("RAS init harvest failure"); + + if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) + DRM_WARN("RAS init harvest reset failure"); + + return 0; +} + +bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!con) + return false; + + return con->poison_supported; } /* helper function to handle common stuff in ip late init phase */ @@ -1764,6 +2411,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, struct ras_fs_if *fs_info, struct ras_ih_if *ih_info) { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + unsigned long ue_count, ce_count; int r; /* disable RAS feature per IP block if it is not supported */ @@ -1774,12 +2423,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); if (r) { - if (r == -EAGAIN) { - /* request gpu reset. will run again */ - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - return 0; - } else if (adev->in_suspend || adev->in_gpu_reset) { + if (adev->in_suspend || amdgpu_in_reset(adev)) { /* in resume phase, if fail to enable ras, * clean up all ras fs nodes, and disable ras */ goto cleanup; @@ -1787,8 +2431,11 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, return r; } + /* check for errors on warm reset edc persisant supported ASIC */ + amdgpu_persistent_edc_harvesting(adev, ras_block); + /* in resume phase, no need to create ras fs node */ - if (adev->in_suspend || adev->in_gpu_reset) + if (adev->in_suspend || amdgpu_in_reset(adev)) return 0; if (ih_info->cb) { @@ -1797,17 +2444,21 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, goto interrupt; } - amdgpu_ras_debugfs_create(adev, fs_info); - r = amdgpu_ras_sysfs_create(adev, fs_info); if (r) goto sysfs; + /* Those are the cached values at init. + */ + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } + return 0; cleanup: amdgpu_ras_sysfs_remove(adev, ras_block); sysfs: - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); interrupt: @@ -1824,9 +2475,8 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev, return; amdgpu_ras_sysfs_remove(adev, ras_block); - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) - amdgpu_ras_interrupt_remove_handler(adev, ih_info); + amdgpu_ras_interrupt_remove_handler(adev, ih_info); amdgpu_ras_feature_enable(adev, ras_block, 0); } @@ -1838,8 +2488,12 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj, *tmp; - if (!con) + if (!adev->ras_enabled || !con) { + /* clean ras context for VEGA20 Gaming after send ras disable cmd */ + amdgpu_release_ras_context(adev); + return; + } if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { /* Set up all other IPs which are not implemented. There is a @@ -1861,26 +2515,13 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) } } } - - if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) { - con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET; - /* setup ras obj state as disabled. - * for init_by_vbios case. - * if we want to enable ras, just enable it in a normal way. - * If we want do disable it, need setup ras obj as enabled, - * then issue another TA disable cmd. - * See feature_enable_on_boot - */ - amdgpu_ras_disable_all_features(adev, 1); - amdgpu_ras_reset_gpu(adev, 0); - } } void amdgpu_ras_suspend(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!con) + if (!adev->ras_enabled || !con) return; amdgpu_ras_disable_all_features(adev, 0); @@ -1894,9 +2535,10 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!con) + if (!adev->ras_enabled || !con) return 0; + /* Need disable ras on all IPs here before ip [hw/sw]fini */ amdgpu_ras_disable_all_features(adev, 0); amdgpu_ras_recovery_fini(adev); @@ -1907,7 +2549,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!con) + if (!adev->ras_enabled || !con) return 0; amdgpu_ras_fs_fini(adev); @@ -1918,6 +2560,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) if (con->features) amdgpu_ras_disable_all_features(adev, 1); + cancel_delayed_work_sync(&con->ras_counte_delay_work); + amdgpu_ras_set_context(adev, NULL); kfree(con); @@ -1926,15 +2570,172 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { - uint32_t hw_supported, supported; - - amdgpu_ras_check_supported(adev, &hw_supported, &supported); - if (!hw_supported) + amdgpu_ras_check_supported(adev); + if (!adev->ras_hw_enabled) return; if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { - DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); + dev_info(adev->dev, "uncorrectable hardware error" + "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); + + amdgpu_ras_reset_gpu(adev); + } +} + +bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_VEGA20 && + adev->pm.fw_version <= 0x283400) { + return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && + amdgpu_ras_intr_triggered(); + } + + return false; +} + +void amdgpu_release_ras_context(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!con) + return; + + if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { + con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); + amdgpu_ras_set_context(adev, NULL); + kfree(con); + } +} + +#ifdef CONFIG_X86_MCE_AMD +static struct amdgpu_device *find_adev(uint32_t node_id) +{ + int i; + struct amdgpu_device *adev = NULL; + + for (i = 0; i < mce_adev_list.num_gpu; i++) { + adev = mce_adev_list.devs[i]; - amdgpu_ras_reset_gpu(adev, false); + if (adev && adev->gmc.xgmi.connected_to_cpu && + adev->gmc.xgmi.physical_node_id == node_id) + break; + adev = NULL; + } + + return adev; +} + +#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) +#define GET_UMC_INST(m) (((m) >> 21) & 0x7) +#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) +#define GPU_ID_OFFSET 8 + +static int amdgpu_bad_page_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct mce *m = (struct mce *)data; + struct amdgpu_device *adev = NULL; + uint32_t gpu_id = 0; + uint32_t umc_inst = 0; + uint32_t ch_inst, channel_index = 0; + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct eeprom_table_record err_rec; + uint64_t retired_page; + + /* + * If the error was generated in UMC_V2, which belongs to GPU UMCs, + * and error occurred in DramECC (Extended error code = 0) then only + * process the error, else bail out. + */ + if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) && + (XEC(m->status, 0x3f) == 0x0))) + return NOTIFY_DONE; + + /* + * If it is correctable error, return. + */ + if (mce_is_correctable(m)) + return NOTIFY_OK; + + /* + * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. + */ + gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; + + adev = find_adev(gpu_id); + if (!adev) { + DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, + gpu_id); + return NOTIFY_DONE; + } + + /* + * If it is uncorrectable error, then find out UMC instance and + * channel index. + */ + umc_inst = GET_UMC_INST(m->ipid); + ch_inst = GET_CHAN_INDEX(m->ipid); + + dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", + umc_inst, ch_inst); + + memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); + + /* + * Translate UMC channel address to Physical address + */ + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + + ch_inst]; + + retired_page = ADDR_OF_8KB_BLOCK(m->addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(m->addr); + + err_rec.address = m->addr; + err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec.ts = (uint64_t)ktime_get_real_seconds(); + err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec.cu = 0; + err_rec.mem_channel = channel_index; + err_rec.mcumc_id = umc_inst; + + err_data.err_addr = &err_rec; + err_data.err_addr_cnt = 1; + + if (amdgpu_bad_page_threshold != 0) { + amdgpu_ras_add_bad_pages(adev, err_data.err_addr, + err_data.err_addr_cnt); + amdgpu_ras_save_bad_pages(adev); + } + + return NOTIFY_OK; +} + +static struct notifier_block amdgpu_bad_page_nb = { + .notifier_call = amdgpu_bad_page_notifier, + .priority = MCE_PRIO_UC, +}; + +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) +{ + /* + * Add the adev to the mce_adev_list. + * During mode2 reset, amdgpu device is temporarily + * removed from the mgpu_info list which can cause + * page retirement to fail. + * Use this list instead of mgpu_info to find the amdgpu + * device on which the UMC error was reported. + */ + mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; + + /* + * Register the x86 notifier only once + * with MCE subsystem. + */ + if (notifier_registered == false) { + mce_register_decode_chain(&amdgpu_bad_page_nb); + notifier_registered = true; } } +#endif |