root/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
/*
 * Copyright 2018 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/reboot.h>
#include <linux/syscalls.h>
#include <linux/pm_runtime.h>
#include <linux/list_sort.h>

#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "nbio_v4_3.h"
#include "nbif_v6_3_1.h"
#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
#include "amdgpu_psp.h"
#include "amdgpu_ras_mgr.h"

#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>

static bool notifier_registered;
#endif
static const char *RAS_FS_NAME = "ras";

const char *ras_error_string[] = {
        "none",
        "parity",
        "single_correctable",
        "multi_uncorrectable",
        "poison",
};

const char *ras_block_string[] = {
        "umc",
        "sdma",
        "gfx",
        "mmhub",
        "athub",
        "pcie_bif",
        "hdp",
        "xgmi_wafl",
        "df",
        "smn",
        "sem",
        "mp0",
        "mp1",
        "fuse",
        "mca",
        "vcn",
        "jpeg",
        "ih",
        "mpio",
        "mmsch",
};

const char *ras_mca_block_string[] = {
        "mca_mp0",
        "mca_mp1",
        "mca_mpio",
        "mca_iohc",
};

struct amdgpu_ras_block_list {
        /* ras block link */
        struct list_head node;

        struct amdgpu_ras_block_object *ras_obj;
};

const char *get_ras_block_str(struct ras_common_if *ras_block)
{
        if (!ras_block)
                return "NULL";

        if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
            ras_block->block >= ARRAY_SIZE(ras_block_string))
                return "OUT OF RANGE";

        if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
                return ras_mca_block_string[ras_block->sub_block_index];

        return ras_block_string[ras_block->block];
}

#define ras_block_str(_BLOCK_) \
        (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")

#define ras_err_str(i) (ras_error_string[ffs(i)])

#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)

/* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)

/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)

#define MAX_UMC_POISON_POLLING_TIME_ASYNC  10

#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms

#define MAX_FLUSH_RETIRE_DWORK_TIMES  100

#define BYPASS_ALLOCATED_ADDRESS        0x0
#define BYPASS_INITIALIZATION_ADDRESS   0x1

enum amdgpu_ras_retire_page_reservation {
        AMDGPU_RAS_RETIRE_PAGE_RESERVED,
        AMDGPU_RAS_RETIRE_PAGE_PENDING,
        AMDGPU_RAS_RETIRE_PAGE_FAULT,
};

atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);

static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
                                uint64_t addr);
static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
                                uint64_t addr);

static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);

#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
static void
amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
        struct amdgpu_device *devs[MAX_GPU_INSTANCE];
        int num_gpu;
};
static struct mce_notifier_adev_list mce_adev_list;
#endif

void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
{
        if (adev && amdgpu_ras_get_context(adev))
                amdgpu_ras_get_context(adev)->error_query_ready = ready;
}

static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
{
        if (adev && amdgpu_ras_get_context(adev))
                return amdgpu_ras_get_context(adev)->error_query_ready;

        return false;
}

static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
{
        struct ras_err_data err_data;
        struct eeprom_table_record err_rec;
        int ret;

        ret = amdgpu_ras_check_bad_page(adev, address);
        if (ret == -EINVAL) {
                dev_warn(adev->dev,
                        "RAS WARN: input address 0x%llx is invalid.\n",
                        address);
                return -EINVAL;
        } else if (ret == 1) {
                dev_warn(adev->dev,
                        "RAS WARN: 0x%llx has already been marked as bad page!\n",
                        address);
                return 0;
        }

        ret = amdgpu_ras_error_data_init(&err_data);
        if (ret)
                return ret;

        memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
        err_data.err_addr = &err_rec;
        amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);

        if (amdgpu_bad_page_threshold != 0) {
                amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
                                         err_data.err_addr_cnt, false);
                amdgpu_ras_save_bad_pages(adev, NULL);
        }

        amdgpu_ras_error_data_fini(&err_data);

        dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
        dev_warn(adev->dev, "Clear EEPROM:\n");
        dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");

        return 0;
}

static int amdgpu_check_address_validity(struct amdgpu_device *adev,
                        uint64_t address, uint64_t flags)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct amdgpu_vram_block_info blk_info;
        uint64_t page_pfns[32] = {0};
        int i, ret, count;
        bool hit = false;

        if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
                return 0;

        if (amdgpu_sriov_vf(adev)) {
                if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
                        return -EPERM;
                return hit ? -EACCES : 0;
        }

        if ((address >= adev->gmc.mc_vram_size) ||
            (address >= RAS_UMC_INJECT_ADDR_LIMIT))
                return -EFAULT;

        if (amdgpu_uniras_enabled(adev))
                count = amdgpu_ras_mgr_lookup_bad_pages_in_a_row(adev, address,
                        page_pfns, ARRAY_SIZE(page_pfns));
        else
                count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
                                address, page_pfns, ARRAY_SIZE(page_pfns));

        if (count <= 0)
                return -EPERM;

        for (i = 0; i < count; i++) {
                memset(&blk_info, 0, sizeof(blk_info));
                ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
                                        page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
                if (!ret) {
                        /* The input address that needs to be checked is allocated by
                         * current calling process, so it is necessary to exclude
                         * the calling process.
                         */
                        if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
                            ((blk_info.task.pid != task_pid_nr(current)) ||
                                strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
                                return -EACCES;
                        else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
                                (blk_info.task.pid == con->init_task_pid) &&
                                !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
                                return -EACCES;
                }
        }

        return 0;
}

static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
                                        size_t size, loff_t *pos)
{
        struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
        struct ras_query_if info = {
                .head = obj->head,
        };
        ssize_t s;
        char val[128];

        if (amdgpu_ras_query_error_status(obj->adev, &info))
                return -EINVAL;

        /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
        if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
            amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
                if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
                        dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
        }

        s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
                        "ue", info.ue_count,
                        "ce", info.ce_count);
        if (*pos >= s)
                return 0;

        s -= *pos;
        s = min_t(u64, s, size);


        if (copy_to_user(buf, &val[*pos], s))
                return -EINVAL;

        *pos += s;

        return s;
}

static const struct file_operations amdgpu_ras_debugfs_ops = {
        .owner = THIS_MODULE,
        .read = amdgpu_ras_debugfs_read,
        .write = NULL,
        .llseek = default_llseek
};

static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
{
        int i;

        for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
                *block_id = i;
                if (strcmp(name, ras_block_string[i]) == 0)
                        return 0;
        }
        return -EINVAL;
}

static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
                const char __user *buf, size_t size,
                loff_t *pos, struct ras_debug_if *data)
{
        ssize_t s = min_t(u64, 64, size);
        char str[65];
        char block_name[33];
        char err[9] = "ue";
        int op = -1;
        int block_id;
        uint32_t sub_block;
        u64 address, value;
        /* default value is 0 if the mask is not set by user */
        u32 instance_mask = 0;

        if (*pos)
                return -EINVAL;
        *pos = size;

        memset(str, 0, sizeof(str));
        memset(data, 0, sizeof(*data));

        if (copy_from_user(str, buf, s))
                return -EINVAL;

        if (sscanf(str, "disable %32s", block_name) == 1)
                op = 0;
        else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
                op = 1;
        else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
                op = 2;
        else if (strstr(str, "retire_page") != NULL)
                op = 3;
        else if (strstr(str, "check_address") != NULL)
                op = 4;
        else if (str[0] && str[1] && str[2] && str[3])
                /* ascii string, but commands are not matched. */
                return -EINVAL;

        if (op != -1) {
                if (op == 3) {
                        if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
                            sscanf(str, "%*s %llu", &address) != 1)
                                return -EINVAL;

                        data->op = op;
                        data->inject.address = address;

                        return 0;
                } else if (op == 4) {
                        if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
                            sscanf(str, "%*s %llu %llu", &address, &value) != 2)
                                return -EINVAL;

                        data->op = op;
                        data->inject.address = address;
                        data->inject.value = value;
                        return 0;
                }

                if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
                        return -EINVAL;

                data->head.block = block_id;
                /* only ue, ce and poison errors are supported */
                if (!memcmp("ue", err, 2))
                        data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                else if (!memcmp("ce", err, 2))
                        data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
                else if (!memcmp("poison", err, 6))
                        data->head.type = AMDGPU_RAS_ERROR__POISON;
                else
                        return -EINVAL;

                data->op = op;

                if (op == 2) {
                        if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
                                   &sub_block, &address, &value, &instance_mask) != 4 &&
                            sscanf(str, "%*s %*s %*s %u %llu %llu %u",
                                   &sub_block, &address, &value, &instance_mask) != 4 &&
                                sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
                                   &sub_block, &address, &value) != 3 &&
                            sscanf(str, "%*s %*s %*s %u %llu %llu",
                                   &sub_block, &address, &value) != 3)
                                return -EINVAL;
                        data->head.sub_block_index = sub_block;
                        data->inject.address = address;
                        data->inject.value = value;
                        data->inject.instance_mask = instance_mask;
                }
        } else {
                if (size < sizeof(*data))
                        return -EINVAL;

                if (copy_from_user(data, buf, sizeof(*data)))
                        return -EINVAL;
        }

        return 0;
}

static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
                                struct ras_debug_if *data)
{
        int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
        uint32_t mask, inst_mask = data->inject.instance_mask;

        /* no need to set instance mask if there is only one instance */
        if (num_xcc <= 1 && inst_mask) {
                data->inject.instance_mask = 0;
                dev_dbg(adev->dev,
                        "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
                        inst_mask);

                return;
        }

        switch (data->head.block) {
        case AMDGPU_RAS_BLOCK__GFX:
                mask = GENMASK(num_xcc - 1, 0);
                break;
        case AMDGPU_RAS_BLOCK__SDMA:
                mask = GENMASK(adev->sdma.num_instances - 1, 0);
                break;
        case AMDGPU_RAS_BLOCK__VCN:
        case AMDGPU_RAS_BLOCK__JPEG:
                mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
                break;
        default:
                mask = inst_mask;
                break;
        }

        /* remove invalid bits in instance mask */
        data->inject.instance_mask &= mask;
        if (inst_mask != data->inject.instance_mask)
                dev_dbg(adev->dev,
                        "Adjust RAS inject mask 0x%x to 0x%x\n",
                        inst_mask, data->inject.instance_mask);
}

/**
 * DOC: AMDGPU RAS debugfs control interface
 *
 * The control interface accepts struct ras_debug_if which has two members.
 *
 * First member: ras_debug_if::head or ras_debug_if::inject.
 *
 * head is used to indicate which IP block will be under control.
 *
 * head has four members, they are block, type, sub_block_index, name.
 * block: which IP will be under control.
 * type: what kind of error will be enabled/disabled/injected.
 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
 * name: the name of IP.
 *
 * inject has three more members than head, they are address, value and mask.
 * As their names indicate, inject operation will write the
 * value to the address.
 *
 * The second member: struct ras_debug_if::op.
 * It has three kinds of operations.
 *
 * - 0: disable RAS on the block. Take ::head as its data.
 * - 1: enable RAS on the block. Take ::head as its data.
 * - 2: inject errors on the block. Take ::inject as its data.
 *
 * How to use the interface?
 *
 * In a program
 *
 * Copy the struct ras_debug_if in your code and initialize it.
 * Write the struct to the control interface.
 *
 * From shell
 *
 * .. code-block:: bash
 *
 *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 *      echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
 *
 * Where N, is the card which you want to affect.
 *
 * "disable" requires only the block.
 * "enable" requires the block and error type.
 * "inject" requires the block, error type, address, and value.
 *
 * The block is one of: umc, sdma, gfx, etc.
 *      see ras_block_string[] for details
 *
 * The error type is one of: ue, ce and poison where,
 *      ue is multi-uncorrectable
 *      ce is single-correctable
 *      poison is poison
 *
 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
 * The address and value are hexadecimal numbers, leading 0x is optional.
 * The mask means instance mask, is optional, default value is 0x1.
 *
 * For instance,
 *
 * .. code-block:: bash
 *
 *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
 *      echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
 *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
 *
 * How to check the result of the operation?
 *
 * To check disable/enable, see "ras" features at,
 * /sys/class/drm/card[0/1/2...]/device/ras/features
 *
 * To check inject, see the corresponding error count at,
 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
 *
 * .. note::
 *      Operations are only allowed on blocks which are supported.
 *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
 *      to see which blocks support RAS on a particular asic.
 *
 */
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
                                             const char __user *buf,
                                             size_t size, loff_t *pos)
{
        struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
        struct ras_debug_if data;
        int ret = 0;

        if (!amdgpu_ras_get_error_query_ready(adev)) {
                dev_warn(adev->dev, "RAS WARN: error injection "
                                "currently inaccessible\n");
                return size;
        }

        ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
        if (ret)
                return ret;

        if (data.op == 3) {
                ret = amdgpu_reserve_page_direct(adev, data.inject.address);
                if (!ret)
                        return size;
                else
                        return ret;
        } else if (data.op == 4) {
                ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
                return ret ? ret : size;
        }

        if (!amdgpu_ras_is_supported(adev, data.head.block))
                return -EINVAL;

        switch (data.op) {
        case 0:
                ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
                break;
        case 1:
                ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
                break;
        case 2:
                /* umc ce/ue error injection for a bad page is not allowed */
                if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
                        ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
                if (ret == -EINVAL) {
                        dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
                                        data.inject.address);
                        break;
                } else if (ret == 1) {
                        dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
                                        data.inject.address);
                        break;
                }

                amdgpu_ras_instance_mask_check(adev, &data);

                /* data.inject.address is offset instead of absolute gpu address */
                ret = amdgpu_ras_error_inject(adev, &data.inject);
                break;
        default:
                ret = -EINVAL;
                break;
        }

        if (ret)
                return ret;

        return size;
}

static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);

/**
 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
 *
 * Some boards contain an EEPROM which is used to persistently store a list of
 * bad pages which experiences ECC errors in vram.  This interface provides
 * a way to reset the EEPROM, e.g., after testing error injection.
 *
 * Usage:
 *
 * .. code-block:: bash
 *
 *      echo 1 > ../ras/ras_eeprom_reset
 *
 * will reset EEPROM table to 0 entries.
 *
 */
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
                                               const char __user *buf,
                                               size_t size, loff_t *pos)
{
        struct amdgpu_device *adev =
                (struct amdgpu_device *)file_inode(f)->i_private;
        int ret;

        if (amdgpu_uniras_enabled(adev)) {
                ret = amdgpu_uniras_clear_badpages_info(adev);
                return ret ? ret : size;
        }

        ret = amdgpu_ras_eeprom_reset_table(
                &(amdgpu_ras_get_context(adev)->eeprom_control));

        if (!ret) {
                /* Something was written to EEPROM.
                 */
                amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
                return size;
        } else {
                return ret;
        }
}

static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
        .owner = THIS_MODULE,
        .read = NULL,
        .write = amdgpu_ras_debugfs_ctrl_write,
        .llseek = default_llseek
};

static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
        .owner = THIS_MODULE,
        .read = NULL,
        .write = amdgpu_ras_debugfs_eeprom_write,
        .llseek = default_llseek
};

/**
 * DOC: AMDGPU RAS sysfs Error Count Interface
 *
 * It allows the user to read the error count for each IP block on the gpu through
 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
 *
 * It outputs the multiple lines which report the uncorrected (ue) and corrected
 * (ce) error counts.
 *
 * The format of one line is below,
 *
 * [ce|ue]: count
 *
 * Example:
 *
 * .. code-block:: bash
 *
 *      ue: 0
 *      ce: 1
 *
 */
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
                struct device_attribute *attr, char *buf)
{
        struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
        struct ras_query_if info = {
                .head = obj->head,
        };

        if (!amdgpu_ras_get_error_query_ready(obj->adev))
                return sysfs_emit(buf, "Query currently inaccessible\n");

        if (amdgpu_ras_query_error_status(obj->adev, &info))
                return -EINVAL;

        if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
            amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
                if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
                        dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
        }

        if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
                return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
                                "ce", info.ce_count, "de", info.de_count);
        else
                return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
                                "ce", info.ce_count);
}

/* obj begin */

#define get_obj(obj) do { (obj)->use++; } while (0)
#define alive_obj(obj) ((obj)->use)

static inline void put_obj(struct ras_manager *obj)
{
        if (obj && (--obj->use == 0)) {
                list_del(&obj->node);
                amdgpu_ras_error_data_fini(&obj->err_data);
        }

        if (obj && (obj->use < 0))
                DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
}

/* make one obj and return it. */
static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
                struct ras_common_if *head)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;

        if (!adev->ras_enabled || !con)
                return NULL;

        if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
                return NULL;

        if (head->block == AMDGPU_RAS_BLOCK__MCA) {
                if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
                        return NULL;

                obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
        } else
                obj = &con->objs[head->block];

        /* already exist. return obj? */
        if (alive_obj(obj))
                return NULL;

        if (amdgpu_ras_error_data_init(&obj->err_data))
                return NULL;

        obj->head = *head;
        obj->adev = adev;
        list_add(&obj->node, &con->head);
        get_obj(obj);

        return obj;
}

/* return an obj equal to head, or the first when head is NULL */
struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
                struct ras_common_if *head)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
        int i;

        if (!adev->ras_enabled || !con)
                return NULL;

        if (head) {
                if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
                        return NULL;

                if (head->block == AMDGPU_RAS_BLOCK__MCA) {
                        if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
                                return NULL;

                        obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
                } else
                        obj = &con->objs[head->block];

                if (alive_obj(obj))
                        return obj;
        } else {
                for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
                        obj = &con->objs[i];
                        if (alive_obj(obj))
                                return obj;
                }
        }

        return NULL;
}
/* obj end */

/* feature ctl begin */
static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
                                         struct ras_common_if *head)
{
        return adev->ras_hw_enabled & BIT(head->block);
}

static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
                struct ras_common_if *head)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        return con->features & BIT(head->block);
}

/*
 * if obj is not created, then create one.
 * set feature enable flag.
 */
static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
                struct ras_common_if *head, int enable)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);

        /* If hardware does not support ras, then do not create obj.
         * But if hardware support ras, we can create the obj.
         * Ras framework checks con->hw_supported to see if it need do
         * corresponding initialization.
         * IP checks con->support to see if it need disable ras.
         */
        if (!amdgpu_ras_is_feature_allowed(adev, head))
                return 0;

        if (enable) {
                if (!obj) {
                        obj = amdgpu_ras_create_obj(adev, head);
                        if (!obj)
                                return -EINVAL;
                } else {
                        /* In case we create obj somewhere else */
                        get_obj(obj);
                }
                con->features |= BIT(head->block);
        } else {
                if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
                        con->features &= ~BIT(head->block);
                        put_obj(obj);
                }
        }

        return 0;
}

/* wrapper of psp_ras_enable_features */
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
                struct ras_common_if *head, bool enable)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        union ta_ras_cmd_input *info;
        int ret;

        if (!con)
                return -EINVAL;

        /* For non-gfx ip, do not enable ras feature if it is not allowed */
        /* For gfx ip, regardless of feature support status, */
        /* Force issue enable or disable ras feature commands */
        if (head->block != AMDGPU_RAS_BLOCK__GFX &&
            !amdgpu_ras_is_feature_allowed(adev, head))
                return 0;

        /* Only enable gfx ras feature from host side */
        if (head->block == AMDGPU_RAS_BLOCK__GFX &&
            !amdgpu_sriov_vf(adev) &&
            !amdgpu_ras_intr_triggered()) {
                info = kzalloc_obj(union ta_ras_cmd_input);
                if (!info)
                        return -ENOMEM;

                if (!enable) {
                        info->disable_features = (struct ta_ras_disable_features_input) {
                                .block_id =  amdgpu_ras_block_to_ta(head->block),
                                .error_type = amdgpu_ras_error_to_ta(head->type),
                        };
                } else {
                        info->enable_features = (struct ta_ras_enable_features_input) {
                                .block_id =  amdgpu_ras_block_to_ta(head->block),
                                .error_type = amdgpu_ras_error_to_ta(head->type),
                        };
                }

                ret = psp_ras_enable_features(&adev->psp, info, enable);
                if (ret) {
                        dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
                                enable ? "enable":"disable",
                                get_ras_block_str(head),
                                amdgpu_ras_is_poison_mode_supported(adev), ret);
                        kfree(info);
                        return ret;
                }

                kfree(info);
        }

        /* setup the obj */
        __amdgpu_ras_feature_enable(adev, head, enable);

        return 0;
}

/* Only used in device probe stage and called only once. */
int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
                struct ras_common_if *head, bool enable)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        int ret;

        if (!con)
                return -EINVAL;

        if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
                if (enable) {
                        /* There is no harm to issue a ras TA cmd regardless of
                         * the currecnt ras state.
                         * If current state == target state, it will do nothing
                         * But sometimes it requests driver to reset and repost
                         * with error code -EAGAIN.
                         */
                        ret = amdgpu_ras_feature_enable(adev, head, 1);
                        /* With old ras TA, we might fail to enable ras.
                         * Log it and just setup the object.
                         * TODO need remove this WA in the future.
                         */
                        if (ret == -EINVAL) {
                                ret = __amdgpu_ras_feature_enable(adev, head, 1);
                                if (!ret)
                                        dev_info(adev->dev,
                                                "RAS INFO: %s setup object\n",
                                                get_ras_block_str(head));
                        }
                } else {
                        /* setup the object then issue a ras TA disable cmd.*/
                        ret = __amdgpu_ras_feature_enable(adev, head, 1);
                        if (ret)
                                return ret;

                        /* gfx block ras disable cmd must send to ras-ta */
                        if (head->block == AMDGPU_RAS_BLOCK__GFX)
                                con->features |= BIT(head->block);

                        ret = amdgpu_ras_feature_enable(adev, head, 0);

                        /* clean gfx block ras features flag */
                        if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
                                con->features &= ~BIT(head->block);
                }
        } else
                ret = amdgpu_ras_feature_enable(adev, head, enable);

        return ret;
}

static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
                bool bypass)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;

        list_for_each_entry_safe(obj, tmp, &con->head, node) {
                /* bypass psp.
                 * aka just release the obj and corresponding flags
                 */
                if (bypass) {
                        if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
                                break;
                } else {
                        if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
                                break;
                }
        }

        return con->features;
}

static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
                bool bypass)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        int i;
        const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;

        for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
                struct ras_common_if head = {
                        .block = i,
                        .type = default_ras_type,
                        .sub_block_index = 0,
                };

                if (i == AMDGPU_RAS_BLOCK__MCA)
                        continue;

                if (bypass) {
                        /*
                         * bypass psp. vbios enable ras for us.
                         * so just create the obj
                         */
                        if (__amdgpu_ras_feature_enable(adev, &head, 1))
                                break;
                } else {
                        if (amdgpu_ras_feature_enable(adev, &head, 1))
                                break;
                }
        }

        for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
                struct ras_common_if head = {
                        .block = AMDGPU_RAS_BLOCK__MCA,
                        .type = default_ras_type,
                        .sub_block_index = i,
                };

                if (bypass) {
                        /*
                         * bypass psp. vbios enable ras for us.
                         * so just create the obj
                         */
                        if (__amdgpu_ras_feature_enable(adev, &head, 1))
                                break;
                } else {
                        if (amdgpu_ras_feature_enable(adev, &head, 1))
                                break;
                }
        }

        return con->features;
}
/* feature ctl end */

static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
                enum amdgpu_ras_block block)
{
        if (!block_obj)
                return -EINVAL;

        if (block_obj->ras_comm.block == block)
                return 0;

        return -EINVAL;
}

static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
                                        enum amdgpu_ras_block block, uint32_t sub_block_index)
{
        struct amdgpu_ras_block_list *node, *tmp;
        struct amdgpu_ras_block_object *obj;

        if (block >= AMDGPU_RAS_BLOCK__LAST)
                return NULL;

        list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
                if (!node->ras_obj) {
                        dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
                        continue;
                }

                obj = node->ras_obj;
                if (obj->ras_block_match) {
                        if (obj->ras_block_match(obj, block, sub_block_index) == 0)
                                return obj;
                } else {
                        if (amdgpu_ras_block_match_default(obj, block) == 0)
                                return obj;
                }
        }

        return NULL;
}

static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
{
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
        int ret = 0;

        /*
         * choosing right query method according to
         * whether smu support query error information
         */
        ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
        if (ret == -EOPNOTSUPP) {
                if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
                        adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
                        adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);

                /* umc query_ras_error_address is also responsible for clearing
                 * error status
                 */
                if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
                    adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
                        adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
        } else if (!ret) {
                if (adev->umc.ras &&
                        adev->umc.ras->ecc_info_query_ras_error_count)
                        adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);

                if (adev->umc.ras &&
                        adev->umc.ras->ecc_info_query_ras_error_address)
                        adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
        }
}

static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
                                              struct ras_manager *ras_mgr,
                                              struct ras_err_data *err_data,
                                              struct ras_query_context *qctx,
                                              const char *blk_name,
                                              bool is_ue,
                                              bool is_de)
{
        struct amdgpu_smuio_mcm_config_info *mcm_info;
        struct ras_err_node *err_node;
        struct ras_err_info *err_info;
        u64 event_id = qctx->evid.event_id;

        if (is_ue) {
                for_each_ras_error(err_node, err_data) {
                        err_info = &err_node->err_info;
                        mcm_info = &err_info->mcm_info;
                        if (err_info->ue_count) {
                                RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
                                              "%lld new uncorrectable hardware errors detected in %s block\n",
                                              mcm_info->socket_id,
                                              mcm_info->die_id,
                                              err_info->ue_count,
                                              blk_name);
                        }
                }

                for_each_ras_error(err_node, &ras_mgr->err_data) {
                        err_info = &err_node->err_info;
                        mcm_info = &err_info->mcm_info;
                        RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
                                      "%lld uncorrectable hardware errors detected in total in %s block\n",
                                      mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
                }

        } else {
                if (is_de) {
                        for_each_ras_error(err_node, err_data) {
                                err_info = &err_node->err_info;
                                mcm_info = &err_info->mcm_info;
                                if (err_info->de_count) {
                                        RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
                                                      "%lld new deferred hardware errors detected in %s block\n",
                                                      mcm_info->socket_id,
                                                      mcm_info->die_id,
                                                      err_info->de_count,
                                                      blk_name);
                                }
                        }

                        for_each_ras_error(err_node, &ras_mgr->err_data) {
                                err_info = &err_node->err_info;
                                mcm_info = &err_info->mcm_info;
                                RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
                                              "%lld deferred hardware errors detected in total in %s block\n",
                                              mcm_info->socket_id, mcm_info->die_id,
                                              err_info->de_count, blk_name);
                        }
                } else {
                        if (adev->debug_disable_ce_logs)
                                return;

                        for_each_ras_error(err_node, err_data) {
                                err_info = &err_node->err_info;
                                mcm_info = &err_info->mcm_info;
                                if (err_info->ce_count) {
                                        RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
                                                      "%lld new correctable hardware errors detected in %s block\n",
                                                      mcm_info->socket_id,
                                                      mcm_info->die_id,
                                                      err_info->ce_count,
                                                      blk_name);
                                }
                        }

                        for_each_ras_error(err_node, &ras_mgr->err_data) {
                                err_info = &err_node->err_info;
                                mcm_info = &err_info->mcm_info;
                                RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
                                              "%lld correctable hardware errors detected in total in %s block\n",
                                              mcm_info->socket_id, mcm_info->die_id,
                                              err_info->ce_count, blk_name);
                        }
                }
        }
}

static inline bool err_data_has_source_info(struct ras_err_data *data)
{
        return !list_empty(&data->err_node_list);
}

static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
                                             struct ras_query_if *query_if,
                                             struct ras_err_data *err_data,
                                             struct ras_query_context *qctx)
{
        struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
        const char *blk_name = get_ras_block_str(&query_if->head);
        u64 event_id = qctx->evid.event_id;

        if (err_data->ce_count) {
                if (err_data_has_source_info(err_data)) {
                        amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
                                                          blk_name, false, false);
                } else if (!adev->aid_mask &&
                           adev->smuio.funcs &&
                           adev->smuio.funcs->get_socket_id &&
                           adev->smuio.funcs->get_die_id) {
                        RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
                                      "%ld correctable hardware errors "
                                      "detected in %s block\n",
                                      adev->smuio.funcs->get_socket_id(adev),
                                      adev->smuio.funcs->get_die_id(adev),
                                      ras_mgr->err_data.ce_count,
                                      blk_name);
                } else {
                        RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
                                      "detected in %s block\n",
                                      ras_mgr->err_data.ce_count,
                                      blk_name);
                }
        }

        if (err_data->ue_count) {
                if (err_data_has_source_info(err_data)) {
                        amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
                                                          blk_name, true, false);
                } else if (!adev->aid_mask &&
                           adev->smuio.funcs &&
                           adev->smuio.funcs->get_socket_id &&
                           adev->smuio.funcs->get_die_id) {
                        RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
                                      "%ld uncorrectable hardware errors "
                                      "detected in %s block\n",
                                      adev->smuio.funcs->get_socket_id(adev),
                                      adev->smuio.funcs->get_die_id(adev),
                                      ras_mgr->err_data.ue_count,
                                      blk_name);
                } else {
                        RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
                                      "detected in %s block\n",
                                      ras_mgr->err_data.ue_count,
                                      blk_name);
                }
        }

        if (err_data->de_count) {
                if (err_data_has_source_info(err_data)) {
                        amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
                                                          blk_name, false, true);
                } else if (!adev->aid_mask &&
                           adev->smuio.funcs &&
                           adev->smuio.funcs->get_socket_id &&
                           adev->smuio.funcs->get_die_id) {
                        RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
                                      "%ld deferred hardware errors "
                                      "detected in %s block\n",
                                      adev->smuio.funcs->get_socket_id(adev),
                                      adev->smuio.funcs->get_die_id(adev),
                                      ras_mgr->err_data.de_count,
                                      blk_name);
                } else {
                        RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
                                      "detected in %s block\n",
                                      ras_mgr->err_data.de_count,
                                      blk_name);
                }
        }
}

static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
                                                  struct ras_query_if *query_if,
                                                  struct ras_err_data *err_data,
                                                  struct ras_query_context *qctx)
{
        unsigned long new_ue, new_ce, new_de;
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
        const char *blk_name = get_ras_block_str(&query_if->head);
        u64 event_id = qctx->evid.event_id;

        new_ce = err_data->ce_count - obj->err_data.ce_count;
        new_ue = err_data->ue_count - obj->err_data.ue_count;
        new_de = err_data->de_count - obj->err_data.de_count;

        if (new_ce) {
                RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
                              "detected in %s block\n",
                              new_ce,
                              blk_name);
        }

        if (new_ue) {
                RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
                              "detected in %s block\n",
                              new_ue,
                              blk_name);
        }

        if (new_de) {
                RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
                              "detected in %s block\n",
                              new_de,
                              blk_name);
        }
}

static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
{
        struct ras_err_node *err_node;
        struct ras_err_info *err_info;

        if (err_data_has_source_info(err_data)) {
                for_each_ras_error(err_node, err_data) {
                        err_info = &err_node->err_info;
                        amdgpu_ras_error_statistic_de_count(&obj->err_data,
                                        &err_info->mcm_info, err_info->de_count);
                        amdgpu_ras_error_statistic_ce_count(&obj->err_data,
                                        &err_info->mcm_info, err_info->ce_count);
                        amdgpu_ras_error_statistic_ue_count(&obj->err_data,
                                        &err_info->mcm_info, err_info->ue_count);
                }
        } else {
                /* for legacy asic path which doesn't has error source info */
                obj->err_data.ue_count += err_data->ue_count;
                obj->err_data.ce_count += err_data->ce_count;
                obj->err_data.de_count += err_data->de_count;
        }
}

static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
                                                             struct ras_err_data *err_data)
{
        /* Host reports absolute counts */
        obj->err_data.ue_count = err_data->ue_count;
        obj->err_data.ce_count = err_data->ce_count;
        obj->err_data.de_count = err_data->de_count;
}

static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
{
        struct ras_common_if head;

        memset(&head, 0, sizeof(head));
        head.block = blk;

        return amdgpu_ras_find_obj(adev, &head);
}

int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
                        const struct aca_info *aca_info, void *data)
{
        struct ras_manager *obj;

        /* in resume phase, no need to create aca fs node */
        if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
                return 0;

        obj = get_ras_manager(adev, blk);
        if (!obj)
                return -EINVAL;

        return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
}

int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
{
        struct ras_manager *obj;

        obj = get_ras_manager(adev, blk);
        if (!obj)
                return -EINVAL;

        amdgpu_aca_remove_handle(&obj->aca_handle);

        return 0;
}

static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
                                         enum aca_error_type type, struct ras_err_data *err_data,
                                         struct ras_query_context *qctx)
{
        struct ras_manager *obj;

        obj = get_ras_manager(adev, blk);
        if (!obj)
                return -EINVAL;

        return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
}

ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
                                  struct aca_handle *handle, char *buf, void *data)
{
        struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
        struct ras_query_if info = {
                .head = obj->head,
        };

        if (!amdgpu_ras_get_error_query_ready(obj->adev))
                return sysfs_emit(buf, "Query currently inaccessible\n");

        if (amdgpu_ras_query_error_status(obj->adev, &info))
                return -EINVAL;

        return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
                          "ce", info.ce_count, "de", info.de_count);
}

static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
                                                struct ras_query_if *info,
                                                struct ras_err_data *err_data,
                                                struct ras_query_context *qctx,
                                                unsigned int error_query_mode)
{
        enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
        struct amdgpu_ras_block_object *block_obj = NULL;
        int ret;

        if (blk == AMDGPU_RAS_BLOCK_COUNT)
                return -EINVAL;

        if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
                return -EINVAL;

        if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
                return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
        } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
                if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
                        amdgpu_ras_get_ecc_info(adev, err_data);
                } else {
                        block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
                        if (!block_obj || !block_obj->hw_ops) {
                                dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
                                             get_ras_block_str(&info->head));
                                return -EINVAL;
                        }

                        if (block_obj->hw_ops->query_ras_error_count)
                                block_obj->hw_ops->query_ras_error_count(adev, err_data);

                        if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
                            (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
                            (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
                                if (block_obj->hw_ops->query_ras_error_status)
                                        block_obj->hw_ops->query_ras_error_status(adev);
                        }
                }
        } else {
                if (amdgpu_aca_is_enabled(adev)) {
                        ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
                        if (ret)
                                return ret;

                        ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
                        if (ret)
                                return ret;

                        ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
                        if (ret)
                                return ret;
                } else {
                        /* FIXME: add code to check return value later */
                        amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
                        amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
                }
        }

        return 0;
}

/* query/inject/cure begin */
static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
                                                    struct ras_query_if *info,
                                                    enum ras_event_type type)
{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
        struct ras_err_data err_data;
        struct ras_query_context qctx;
        unsigned int error_query_mode;
        int ret;

        if (!obj)
                return -EINVAL;

        ret = amdgpu_ras_error_data_init(&err_data);
        if (ret)
                return ret;

        if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
                return -EINVAL;

        memset(&qctx, 0, sizeof(qctx));
        qctx.evid.type = type;
        qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);

        if (!down_read_trylock(&adev->reset_domain->sem)) {
                ret = -EIO;
                goto out_fini_err_data;
        }

        ret = amdgpu_ras_query_error_status_helper(adev, info,
                                                   &err_data,
                                                   &qctx,
                                                   error_query_mode);
        up_read(&adev->reset_domain->sem);
        if (ret)
                goto out_fini_err_data;

        if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
                amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
                amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
        } else {
                /* Host provides absolute error counts. First generate the report
                 * using the previous VF internal count against new host count.
                 * Then Update VF internal count.
                 */
                amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
                amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
        }

        info->ue_count = obj->err_data.ue_count;
        info->ce_count = obj->err_data.ce_count;
        info->de_count = obj->err_data.de_count;

out_fini_err_data:
        amdgpu_ras_error_data_fini(&err_data);

        return ret;
}

static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
{
        struct ras_cmd_dev_handle req = {0};
        int ret;

        ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
                                &req, sizeof(req), NULL, 0);
        if (ret) {
                dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
                return ret;
        }

        return 0;
}

static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
                        struct ras_query_if *info)
{
        struct ras_cmd_block_ecc_info_req req = {0};
        struct ras_cmd_block_ecc_info_rsp rsp = {0};
        int ret;

        if (!info)
                return -EINVAL;

        req.block_id = info->head.block;
        req.subblock_id = info->head.sub_block_index;

        ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
                                &req, sizeof(req), &rsp, sizeof(rsp));
        if (!ret) {
                info->ce_count = rsp.ce_count;
                info->ue_count = rsp.ue_count;
                info->de_count = rsp.de_count;
        }

        return ret;
}

int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
{
        if (amdgpu_uniras_enabled(adev))
                return amdgpu_uniras_query_block_ecc(adev, info);
        else
                return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
}

int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
                enum amdgpu_ras_block block)
{
        struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
        const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;

        if (!block_obj || !block_obj->hw_ops) {
                dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
                                ras_block_str(block));
                return -EOPNOTSUPP;
        }

        if (!amdgpu_ras_is_supported(adev, block) ||
            !amdgpu_ras_get_aca_debug_mode(adev))
                return -EOPNOTSUPP;

        if (amdgpu_sriov_vf(adev))
                return -EOPNOTSUPP;

        /* skip ras error reset in gpu reset */
        if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
            ((smu_funcs && smu_funcs->set_debug_mode) ||
             (mca_funcs && mca_funcs->mca_set_debug_mode)))
                return -EOPNOTSUPP;

        if (block_obj->hw_ops->reset_ras_error_count)
                block_obj->hw_ops->reset_ras_error_count(adev);

        return 0;
}

int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
                enum amdgpu_ras_block block)
{
        struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);

        if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
                return 0;

        if ((block == AMDGPU_RAS_BLOCK__GFX) ||
            (block == AMDGPU_RAS_BLOCK__MMHUB)) {
                if (block_obj->hw_ops->reset_ras_error_status)
                        block_obj->hw_ops->reset_ras_error_status(adev);
        }

        return 0;
}

static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
                struct ras_inject_if *info)
{
        struct ras_cmd_inject_error_req inject_req;
        struct ras_cmd_inject_error_rsp rsp;

        if (!info)
                return -EINVAL;

        memset(&inject_req, 0, sizeof(inject_req));
        inject_req.block_id = info->head.block;
        inject_req.subblock_id = info->head.sub_block_index;
        inject_req.address = info->address;
        inject_req.error_type = info->head.type;
        inject_req.instance_mask = info->instance_mask;
        inject_req.method = info->value;

        return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
                        &inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
}

/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
                struct ras_inject_if *info)
{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
        struct ta_ras_trigger_error_input block_info = {
                .block_id =  amdgpu_ras_block_to_ta(info->head.block),
                .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
                .sub_block_index = info->head.sub_block_index,
                .address = info->address,
                .value = info->value,
        };
        int ret = -EINVAL;
        struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
                                                        info->head.block,
                                                        info->head.sub_block_index);

        if (amdgpu_uniras_enabled(adev))
                return amdgpu_uniras_error_inject(adev, info);

        /* inject on guest isn't allowed, return success directly */
        if (amdgpu_sriov_vf(adev))
                return 0;

        if (!obj)
                return -EINVAL;

        if (!block_obj || !block_obj->hw_ops)   {
                dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
                             get_ras_block_str(&info->head));
                return -EINVAL;
        }

        /* Calculate XGMI relative offset */
        if (adev->gmc.xgmi.num_physical_nodes > 1 &&
            info->head.block != AMDGPU_RAS_BLOCK__GFX) {
                block_info.address =
                        amdgpu_xgmi_get_relative_phy_addr(adev,
                                                          block_info.address);
        }

        if (block_obj->hw_ops->ras_error_inject) {
                if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
                        ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
                else /* Special ras_error_inject is defined (e.g: xgmi) */
                        ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
                                                info->instance_mask);
        } else {
                /* default path */
                ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
        }

        if (ret)
                dev_err(adev->dev, "ras inject %s failed %d\n",
                        get_ras_block_str(&info->head), ret);

        return ret;
}

/**
 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
 * @adev: pointer to AMD GPU device
 * @ce_count: pointer to an integer to be set to the count of correctible errors.
 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
 * @query_info: pointer to ras_query_if
 *
 * Return 0 for query success or do nothing, otherwise return an error
 * on failures
 */
static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
                                               unsigned long *ce_count,
                                               unsigned long *ue_count,
                                               struct ras_query_if *query_info)
{
        int ret;

        if (!query_info)
                /* do nothing if query_info is not specified */
                return 0;

        ret = amdgpu_ras_query_error_status(adev, query_info);
        if (ret)
                return ret;

        *ce_count += query_info->ce_count;
        *ue_count += query_info->ue_count;

        /* some hardware/IP supports read to clear
         * no need to explictly reset the err status after the query call */
        if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
            amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
                if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
                        dev_warn(adev->dev,
                                 "Failed to reset error counter and error status\n");
        }

        return 0;
}

/**
 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
 * @adev: pointer to AMD GPU device
 * @ce_count: pointer to an integer to be set to the count of correctible errors.
 * @ue_count: pointer to an integer to be set to the count of uncorrectible
 * errors.
 * @query_info: pointer to ras_query_if if the query request is only for
 * specific ip block; if info is NULL, then the qurey request is for
 * all the ip blocks that support query ras error counters/status
 *
 * If set, @ce_count or @ue_count, count and return the corresponding
 * error counts in those integer pointers. Return 0 if the device
 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
 */
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
                                 unsigned long *ce_count,
                                 unsigned long *ue_count,
                                 struct ras_query_if *query_info)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
        unsigned long ce, ue;
        int ret;

        if (!adev->ras_enabled || !con)
                return -EOPNOTSUPP;

        /* Don't count since no reporting.
         */
        if (!ce_count && !ue_count)
                return 0;

        ce = 0;
        ue = 0;
        if (!query_info) {
                /* query all the ip blocks that support ras query interface */
                list_for_each_entry(obj, &con->head, node) {
                        struct ras_query_if info = {
                                .head = obj->head,
                        };

                        ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
                }
        } else {
                /* query specific ip block */
                ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
        }

        if (ret)
                return ret;

        if (ce_count)
                *ce_count = ce;

        if (ue_count)
                *ue_count = ue;

        return 0;
}
/* query/inject/cure end */


/* sysfs begin */

static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
                struct ras_badpage *bps, uint32_t count, uint32_t start);
static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
                struct ras_badpage *bps, uint32_t count, uint32_t start);

static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
        switch (flags) {
        case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
                return "R";
        case AMDGPU_RAS_RETIRE_PAGE_PENDING:
                return "P";
        case AMDGPU_RAS_RETIRE_PAGE_FAULT:
        default:
                return "F";
        }
}

/**
 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
 *
 * It allows user to read the bad pages of vram on the gpu through
 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
 *
 * It outputs multiple lines, and each line stands for one gpu page.
 *
 * The format of one line is below,
 * gpu pfn : gpu page size : flags
 *
 * gpu pfn and gpu page size are printed in hex format.
 * flags can be one of below character,
 *
 * R: reserved, this gpu page is reserved and not able to use.
 *
 * P: pending for reserve, this gpu page is marked as bad, will be reserved
 * in next window of page_reserve.
 *
 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
 *
 * Examples:
 *
 * .. code-block:: bash
 *
 *      0x00000001 : 0x00001000 : R
 *      0x00000002 : 0x00001000 : P
 *
 */

static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
                struct kobject *kobj, const struct bin_attribute *attr,
                char *buf, loff_t ppos, size_t count)
{
        struct amdgpu_ras *con =
                container_of(attr, struct amdgpu_ras, badpages_attr);
        struct amdgpu_device *adev = con->adev;
        const unsigned int element_size =
                sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
        unsigned int start = div64_ul(ppos + element_size - 1, element_size);
        unsigned int end = div64_ul(ppos + count - 1, element_size);
        ssize_t s = 0;
        struct ras_badpage *bps = NULL;
        int bps_count = 0, i, status;
        uint64_t address;

        memset(buf, 0, count);

        bps_count = end - start;
        bps = kmalloc_objs(*bps, bps_count);
        if (!bps)
                return 0;

        memset(bps, 0, sizeof(*bps) * bps_count);

        if (amdgpu_uniras_enabled(adev))
                bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
        else
                bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);

        if (bps_count <= 0) {
                kfree(bps);
                return 0;
        }

        for (i = 0; i < bps_count; i++) {
                address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;

                bps[i].size = AMDGPU_GPU_PAGE_SIZE;

                status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
                                        address);
                if (status == -EBUSY)
                        bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
                else if (status == -ENOENT)
                        bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
                else
                        bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;

                if ((bps[i].flags != AMDGPU_RAS_RETIRE_PAGE_RESERVED) &&
                    amdgpu_ras_check_critical_address(adev, address))
                        bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;

                s += scnprintf(&buf[s], element_size + 1,
                                "0x%08x : 0x%08x : %1s\n",
                                bps[i].bp,
                                bps[i].size,
                                amdgpu_ras_badpage_flags_str(bps[i].flags));
        }

        kfree(bps);

        return s;
}

static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
                struct device_attribute *attr, char *buf)
{
        struct amdgpu_ras *con =
                container_of(attr, struct amdgpu_ras, features_attr);

        return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
}

static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
                        u32 *minor, u32 *rev)
{
        int i;

        if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
                return false;

        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
                        *major = adev->ip_blocks[i].version->major;
                        *minor = adev->ip_blocks[i].version->minor;
                        *rev = adev->ip_blocks[i].version->rev;
                        return true;
                }
        }

        return false;
}

static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
                struct device_attribute *attr, char *buf)
{
        struct amdgpu_ras *con =
                container_of(attr, struct amdgpu_ras, version_attr);
        u32 major, minor, rev;
        ssize_t size = 0;

        size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
                        con->eeprom_control.tbl_hdr.version);

        if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
                size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
                        major, minor, rev);

        return size;
}

static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
                struct device_attribute *attr, char *buf)
{
        struct amdgpu_ras *con =
                container_of(attr, struct amdgpu_ras, schema_attr);
        return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
}

static struct {
        enum ras_event_type type;
        const char *name;
} dump_event[] = {
        {RAS_EVENT_TYPE_FATAL, "Fatal Error"},
        {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
        {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
};

static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
                                                 struct device_attribute *attr, char *buf)
{
        struct amdgpu_ras *con =
                container_of(attr, struct amdgpu_ras, event_state_attr);
        struct ras_event_manager *event_mgr = con->event_mgr;
        struct ras_event_state *event_state;
        int i, size = 0;

        if (!event_mgr)
                return -EINVAL;

        size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
        for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
                event_state = &event_mgr->event_state[dump_event[i].type];
                size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
                                      dump_event[i].name,
                                      atomic64_read(&event_state->count),
                                      event_state->last_seqno);
        }

        return (ssize_t)size;
}

static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        if (adev->dev->kobj.sd)
                sysfs_remove_file_from_group(&adev->dev->kobj,
                                &con->badpages_attr.attr,
                                RAS_FS_NAME);
}

static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct attribute *attrs[] = {
                &con->features_attr.attr,
                &con->version_attr.attr,
                &con->schema_attr.attr,
                &con->event_state_attr.attr,
                NULL
        };
        struct attribute_group group = {
                .name = RAS_FS_NAME,
                .attrs = attrs,
        };

        if (adev->dev->kobj.sd)
                sysfs_remove_group(&adev->dev->kobj, &group);

        return 0;
}

int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
                struct ras_common_if *head)
{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);

        if (amdgpu_aca_is_enabled(adev))
                return 0;

        if (!obj || obj->attr_inuse)
                return -EINVAL;

        if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
                return 0;

        get_obj(obj);

        snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
                "%s_err_count", head->name);

        obj->sysfs_attr = (struct device_attribute){
                .attr = {
                        .name = obj->fs_data.sysfs_name,
                        .mode = S_IRUGO,
                },
                        .show = amdgpu_ras_sysfs_read,
        };
        sysfs_attr_init(&obj->sysfs_attr.attr);

        if (sysfs_add_file_to_group(&adev->dev->kobj,
                                &obj->sysfs_attr.attr,
                                RAS_FS_NAME)) {
                put_obj(obj);
                return -EINVAL;
        }

        obj->attr_inuse = 1;

        return 0;
}

int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
                struct ras_common_if *head)
{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);

        if (amdgpu_aca_is_enabled(adev))
                return 0;

        if (!obj || !obj->attr_inuse)
                return -EINVAL;

        if (adev->dev->kobj.sd)
                sysfs_remove_file_from_group(&adev->dev->kobj,
                                &obj->sysfs_attr.attr,
                                RAS_FS_NAME);
        obj->attr_inuse = 0;
        put_obj(obj);

        return 0;
}

static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;

        list_for_each_entry_safe(obj, tmp, &con->head, node) {
                amdgpu_ras_sysfs_remove(adev, &obj->head);
        }

        if (amdgpu_bad_page_threshold != 0)
                amdgpu_ras_sysfs_remove_bad_page_node(adev);

        amdgpu_ras_sysfs_remove_dev_attr_node(adev);

        return 0;
}
/* sysfs end */

/**
 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
 *
 * Normally when there is an uncorrectable error, the driver will reset
 * the GPU to recover.  However, in the event of an unrecoverable error,
 * the driver provides an interface to reboot the system automatically
 * in that event.
 *
 * The following file in debugfs provides that interface:
 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
 *
 * Usage:
 *
 * .. code-block:: bash
 *
 *      echo true > .../ras/auto_reboot
 *
 */
/* debugfs begin */
static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
        struct drm_minor  *minor = adev_to_drm(adev)->primary;
        struct dentry     *dir;

        dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
        debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
                            &amdgpu_ras_debugfs_ctrl_ops);
        debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
                            &amdgpu_ras_debugfs_eeprom_ops);
        debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
                           &con->bad_page_cnt_threshold);
        debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
        debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
        debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
        debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
                            &amdgpu_ras_debugfs_eeprom_size_ops);
        con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
                                                       S_IRUGO, dir, adev,
                                                       &amdgpu_ras_debugfs_eeprom_table_ops);
        amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);

        /*
         * After one uncorrectable error happens, usually GPU recovery will
         * be scheduled. But due to the known problem in GPU recovery failing
         * to bring GPU back, below interface provides one direct way to
         * user to reboot system automatically in such case within
         * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
         * will never be called.
         */
        debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);

        /*
         * User could set this not to clean up hardware's error count register
         * of RAS IPs during ras recovery.
         */
        debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
                            &con->disable_ras_err_cnt_harvest);
        return dir;
}

static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
                                      struct ras_fs_if *head,
                                      struct dentry *dir)
{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);

        if (!obj || !dir)
                return;

        get_obj(obj);

        memcpy(obj->fs_data.debugfs_name,
                        head->debugfs_name,
                        sizeof(obj->fs_data.debugfs_name));

        debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
                            obj, &amdgpu_ras_debugfs_ops);
}

static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
{
        bool ret;

        switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
        case IP_VERSION(13, 0, 6):
        case IP_VERSION(13, 0, 12):
        case IP_VERSION(13, 0, 14):
                ret = true;
                break;
        default:
                ret = false;
                break;
        }

        return ret;
}

void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct dentry *dir;
        struct ras_manager *obj;
        struct ras_fs_if fs_info;

        /*
         * it won't be called in resume path, no need to check
         * suspend and gpu reset status
         */
        if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
                return;

        dir = amdgpu_ras_debugfs_create_ctrl_node(adev);

        list_for_each_entry(obj, &con->head, node) {
                if (amdgpu_ras_is_supported(adev, obj->head.block) &&
                        (obj->attr_inuse == 1)) {
                        sprintf(fs_info.debugfs_name, "%s_err_inject",
                                        get_ras_block_str(&obj->head));
                        fs_info.head = obj->head;
                        amdgpu_ras_debugfs_create(adev, &fs_info, dir);
                }
        }

        if (amdgpu_ras_aca_is_supported(adev)) {
                if (amdgpu_aca_is_enabled(adev))
                        amdgpu_aca_smu_debugfs_init(adev, dir);
                else
                        amdgpu_mca_smu_debugfs_init(adev, dir);
        }
}

/* debugfs end */

/* ras fs */
static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
                      amdgpu_ras_sysfs_badpages_read, NULL, 0);
static DEVICE_ATTR(features, S_IRUGO,
                amdgpu_ras_sysfs_features_read, NULL);
static DEVICE_ATTR(version, 0444,
                amdgpu_ras_sysfs_version_show, NULL);
static DEVICE_ATTR(schema, 0444,
                amdgpu_ras_sysfs_schema_show, NULL);
static DEVICE_ATTR(event_state, 0444,
                   amdgpu_ras_sysfs_event_state_show, NULL);
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct attribute_group group = {
                .name = RAS_FS_NAME,
        };
        struct attribute *attrs[] = {
                &con->features_attr.attr,
                &con->version_attr.attr,
                &con->schema_attr.attr,
                &con->event_state_attr.attr,
                NULL
        };
        const struct bin_attribute *bin_attrs[] = {
                NULL,
                NULL,
        };
        int r;

        group.attrs = attrs;

        /* add features entry */
        con->features_attr = dev_attr_features;
        sysfs_attr_init(attrs[0]);

        /* add version entry */
        con->version_attr = dev_attr_version;
        sysfs_attr_init(attrs[1]);

        /* add schema entry */
        con->schema_attr = dev_attr_schema;
        sysfs_attr_init(attrs[2]);

        /* add event_state entry */
        con->event_state_attr = dev_attr_event_state;
        sysfs_attr_init(attrs[3]);

        if (amdgpu_bad_page_threshold != 0) {
                /* add bad_page_features entry */
                con->badpages_attr = bin_attr_gpu_vram_bad_pages;
                sysfs_bin_attr_init(&con->badpages_attr);
                bin_attrs[0] = &con->badpages_attr;
                group.bin_attrs = bin_attrs;
        }

        r = sysfs_create_group(&adev->dev->kobj, &group);
        if (r)
                dev_err(adev->dev, "Failed to create RAS sysfs group!");

        return 0;
}

static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *con_obj, *ip_obj, *tmp;

        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
                list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
                        ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
                        if (ip_obj)
                                put_obj(ip_obj);
                }
        }

        amdgpu_ras_sysfs_remove_all(adev);
        return 0;
}
/* ras fs end */

/* ih begin */

/* For the hardware that cannot enable bif ring for both ras_controller_irq
 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
 * register to check whether the interrupt is triggered or not, and properly
 * ack the interrupt if it is there
 */
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
{
        /* Fatal error events are handled on host side */
        if (amdgpu_sriov_vf(adev))
                return;
        /*
         * If the current interrupt is caused by a non-fatal RAS error, skip
         * check for fatal error. For fatal errors, FED status of all devices
         * in XGMI hive gets set when the first device gets fatal error
         * interrupt. The error gets propagated to other devices as well, so
         * make sure to ack the interrupt regardless of FED status.
         */
        if (!amdgpu_ras_get_fed_status(adev) &&
            amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
                return;

        if (amdgpu_uniras_enabled(adev)) {
                amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
                return;
        }

        if (adev->nbio.ras &&
            adev->nbio.ras->handle_ras_controller_intr_no_bifring)
                adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);

        if (adev->nbio.ras &&
            adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
                adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
}

static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
                                struct amdgpu_iv_entry *entry)
{
        bool poison_stat = false;
        struct amdgpu_device *adev = obj->adev;
        struct amdgpu_ras_block_object *block_obj =
                amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
        u64 event_id;
        int ret;

        if (!block_obj || !con)
                return;

        ret = amdgpu_ras_mark_ras_event(adev, type);
        if (ret)
                return;

        amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
        /* both query_poison_status and handle_poison_consumption are optional,
         * but at least one of them should be implemented if we need poison
         * consumption handler
         */
        if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
                poison_stat = block_obj->hw_ops->query_poison_status(adev);
                if (!poison_stat) {
                        /* Not poison consumption interrupt, no need to handle it */
                        dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
                                        block_obj->ras_comm.name);

                        return;
                }
        }

        amdgpu_umc_poison_handler(adev, obj->head.block, 0);

        if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
                poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);

        /* gpu reset is fallback for failed and default cases.
         * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
         */
        if (poison_stat && !amdgpu_ras_is_rma(adev)) {
                event_id = amdgpu_ras_acquire_event_id(adev, type);
                RAS_EVENT_LOG(adev, event_id,
                              "GPU reset for %s RAS poison consumption is issued!\n",
                              block_obj->ras_comm.name);
                amdgpu_ras_reset_gpu(adev);
        }

        if (!poison_stat)
                amdgpu_gfx_poison_consumption_handler(adev, entry);
}

static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
                                struct amdgpu_iv_entry *entry)
{
        struct amdgpu_device *adev = obj->adev;
        enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
        u64 event_id;
        int ret;

        ret = amdgpu_ras_mark_ras_event(adev, type);
        if (ret)
                return;

        event_id = amdgpu_ras_acquire_event_id(adev, type);
        RAS_EVENT_LOG(adev, event_id, "Poison is created\n");

        if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
                struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);

                atomic_inc(&con->page_retirement_req_cnt);
                atomic_inc(&con->poison_creation_count);

                wake_up(&con->page_retirement_wq);
        }
}

static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
                                struct amdgpu_iv_entry *entry)
{
        struct ras_ih_data *data = &obj->ih_data;
        struct ras_err_data err_data;
        int ret;

        if (!data->cb)
                return;

        ret = amdgpu_ras_error_data_init(&err_data);
        if (ret)
                return;

        /* Let IP handle its data, maybe we need get the output
         * from the callback to update the error type/count, etc
         */
        amdgpu_ras_set_fed(obj->adev, true);
        ret = data->cb(obj->adev, &err_data, entry);
        /* ue will trigger an interrupt, and in that case
         * we need do a reset to recovery the whole system.
         * But leave IP do that recovery, here we just dispatch
         * the error.
         */
        if (ret == AMDGPU_RAS_SUCCESS) {
                /* these counts could be left as 0 if
                 * some blocks do not count error number
                 */
                obj->err_data.ue_count += err_data.ue_count;
                obj->err_data.ce_count += err_data.ce_count;
                obj->err_data.de_count += err_data.de_count;
        }

        amdgpu_ras_error_data_fini(&err_data);
}

static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
{
        struct ras_ih_data *data = &obj->ih_data;
        struct amdgpu_iv_entry entry;

        while (data->rptr != data->wptr) {
                rmb();
                memcpy(&entry, &data->ring[data->rptr],
                                data->element_size);

                wmb();
                data->rptr = (data->aligned_element_size +
                                data->rptr) % data->ring_size;

                if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
                        if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
                                amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
                        else
                                amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
                } else {
                        if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
                                amdgpu_ras_interrupt_umc_handler(obj, &entry);
                        else
                                dev_warn(obj->adev->dev,
                                        "No RAS interrupt handler for non-UMC block with poison disabled.\n");
                }
        }
}

static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
{
        struct ras_ih_data *data =
                container_of(work, struct ras_ih_data, ih_work);
        struct ras_manager *obj =
                container_of(data, struct ras_manager, ih_data);

        amdgpu_ras_interrupt_handler(obj);
}

int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
                struct ras_dispatch_if *info)
{
        struct ras_manager *obj;
        struct ras_ih_data *data;

        if (amdgpu_uniras_enabled(adev)) {
                struct ras_ih_info ih_info;

                memset(&ih_info, 0, sizeof(ih_info));
                ih_info.block = info->head.block;
                memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));

                return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
        }

        obj = amdgpu_ras_find_obj(adev, &info->head);
        if (!obj)
                return -EINVAL;

        data = &obj->ih_data;

        if (data->inuse == 0)
                return 0;

        /* Might be overflow... */
        memcpy(&data->ring[data->wptr], info->entry,
                        data->element_size);

        wmb();
        data->wptr = (data->aligned_element_size +
                        data->wptr) % data->ring_size;

        schedule_work(&data->ih_work);

        return 0;
}

int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
                struct ras_common_if *head)
{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
        struct ras_ih_data *data;

        if (!obj)
                return -EINVAL;

        data = &obj->ih_data;
        if (data->inuse == 0)
                return 0;

        cancel_work_sync(&data->ih_work);

        kfree(data->ring);
        memset(data, 0, sizeof(*data));
        put_obj(obj);

        return 0;
}

int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
                struct ras_common_if *head)
{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
        struct ras_ih_data *data;
        struct amdgpu_ras_block_object *ras_obj;

        if (!obj) {
                /* in case we registe the IH before enable ras feature */
                obj = amdgpu_ras_create_obj(adev, head);
                if (!obj)
                        return -EINVAL;
        } else
                get_obj(obj);

        ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);

        data = &obj->ih_data;
        /* add the callback.etc */
        *data = (struct ras_ih_data) {
                .inuse = 0,
                .cb = ras_obj->ras_cb,
                .element_size = sizeof(struct amdgpu_iv_entry),
                .rptr = 0,
                .wptr = 0,
        };

        INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);

        data->aligned_element_size = ALIGN(data->element_size, 8);
        /* the ring can store 64 iv entries. */
        data->ring_size = 64 * data->aligned_element_size;
        data->ring = kmalloc(data->ring_size, GFP_KERNEL);
        if (!data->ring) {
                put_obj(obj);
                return -ENOMEM;
        }

        /* IH is ready */
        data->inuse = 1;

        return 0;
}

static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;

        list_for_each_entry_safe(obj, tmp, &con->head, node) {
                amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
        }

        return 0;
}
/* ih end */

/* traversal all IPs except NBIO to query error counter */
static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;

        if (!adev->ras_enabled || !con)
                return;

        list_for_each_entry(obj, &con->head, node) {
                struct ras_query_if info = {
                        .head = obj->head,
                };

                /*
                 * PCIE_BIF IP has one different isr by ras controller
                 * interrupt, the specific ras counter query will be
                 * done in that isr. So skip such block from common
                 * sync flood interrupt isr calling.
                 */
                if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
                        continue;

                /*
                 * this is a workaround for aldebaran, skip send msg to
                 * smu to get ecc_info table due to smu handle get ecc
                 * info table failed temporarily.
                 * should be removed until smu fix handle ecc_info table.
                 */
                if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
                    (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
                     IP_VERSION(13, 0, 2)))
                        continue;

                amdgpu_ras_query_error_status_with_event(adev, &info, type);

                if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
                            IP_VERSION(11, 0, 2) &&
                    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
                            IP_VERSION(11, 0, 4) &&
                    amdgpu_ip_version(adev, MP0_HWIP, 0) !=
                            IP_VERSION(13, 0, 0)) {
                        if (amdgpu_ras_reset_error_status(adev, info.head.block))
                                dev_warn(adev->dev, "Failed to reset error counter and error status");
                }
        }
}

/* Parse RdRspStatus and WrRspStatus */
static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
                                          struct ras_query_if *info)
{
        struct amdgpu_ras_block_object *block_obj;
        /*
         * Only two block need to query read/write
         * RspStatus at current state
         */
        if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
                (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
                return;

        block_obj = amdgpu_ras_get_ras_block(adev,
                                        info->head.block,
                                        info->head.sub_block_index);

        if (!block_obj || !block_obj->hw_ops) {
                dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
                             get_ras_block_str(&info->head));
                return;
        }

        if (block_obj->hw_ops->query_ras_error_status)
                block_obj->hw_ops->query_ras_error_status(adev);

}

static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;

        if (!adev->ras_enabled || !con)
                return;

        list_for_each_entry(obj, &con->head, node) {
                struct ras_query_if info = {
                        .head = obj->head,
                };

                amdgpu_ras_error_status_query(adev, &info);
        }
}

static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
                struct ras_badpage *bps, uint32_t count, uint32_t start)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_handler_data *data;
        int r = 0;
        uint32_t i;

        if (!con || !con->eh_data || !bps || !count)
                return -EINVAL;

        mutex_lock(&con->recovery_lock);
        data = con->eh_data;
        if (start < data->count) {
                for (i = start; i < data->count; i++) {
                        if (!data->bps[i].ts)
                                continue;

                        /* U64_MAX is used to mark the record as invalid */
                        if (data->bps[i].retired_page == U64_MAX)
                                continue;

                        bps[r].bp = data->bps[i].retired_page;
                        r++;
                        if (r >= count)
                                break;
                }
        }
        mutex_unlock(&con->recovery_lock);

        return r;
}

static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
                struct ras_badpage *bps, uint32_t count, uint32_t start)
{
        struct ras_cmd_bad_pages_info_req cmd_input;
        struct ras_cmd_bad_pages_info_rsp *output;
        uint32_t group, start_group, end_group;
        uint32_t pos, pos_in_group;
        int r = 0, i;

        if (!bps || !count)
                return -EINVAL;

        output = kmalloc_obj(*output);
        if (!output)
                return -ENOMEM;

        memset(&cmd_input, 0, sizeof(cmd_input));

        start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
        end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
                                RAS_CMD_MAX_BAD_PAGES_PER_GROUP;

        pos = start;
        for (group = start_group; group < end_group; group++) {
                memset(output, 0, sizeof(*output));
                cmd_input.group_index = group;
                if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
                        &cmd_input, sizeof(cmd_input), output, sizeof(*output)))
                        goto out;

                if (pos >= output->bp_total_cnt)
                        goto out;

                pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
                for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
                        if (!output->records[i].ts)
                                continue;

                        bps[r].bp = output->records[i].retired_page;
                        r++;
                        if (r >= count)
                                goto out;
                }
        }

out:
        kfree(output);
        return r;
}

static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
                                   struct amdgpu_hive_info *hive, bool status)
{
        struct amdgpu_device *tmp_adev;

        if (hive) {
                list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
                        amdgpu_ras_set_fed(tmp_adev, status);
        } else {
                amdgpu_ras_set_fed(adev, status);
        }
}

bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
{
        struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
        int hive_ras_recovery = 0;

        if (hive) {
                hive_ras_recovery = atomic_read(&hive->ras_recovery);
                amdgpu_put_xgmi_hive(hive);
        }

        if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
                return true;

        return false;
}

static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
{
        if (amdgpu_ras_intr_triggered())
                return RAS_EVENT_TYPE_FATAL;
        else
                return RAS_EVENT_TYPE_POISON_CONSUMPTION;
}

static void amdgpu_ras_do_recovery(struct work_struct *work)
{
        struct amdgpu_ras *ras =
                container_of(work, struct amdgpu_ras, recovery_work);
        struct amdgpu_device *remote_adev = NULL;
        struct amdgpu_device *adev = ras->adev;
        struct list_head device_list, *device_list_handle =  NULL;
        struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
        unsigned int error_query_mode;
        enum ras_event_type type;

        if (hive) {
                atomic_set(&hive->ras_recovery, 1);

                /* If any device which is part of the hive received RAS fatal
                 * error interrupt, set fatal error status on all. This
                 * condition will need a recovery, and flag will be cleared
                 * as part of recovery.
                 */
                list_for_each_entry(remote_adev, &hive->device_list,
                                    gmc.xgmi.head)
                        if (amdgpu_ras_get_fed_status(remote_adev)) {
                                amdgpu_ras_set_fed_all(adev, hive, true);
                                break;
                        }
        }
        if (!ras->disable_ras_err_cnt_harvest) {

                /* Build list of devices to query RAS related errors */
                if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
                        device_list_handle = &hive->device_list;
                } else {
                        INIT_LIST_HEAD(&device_list);
                        list_add_tail(&adev->gmc.xgmi.head, &device_list);
                        device_list_handle = &device_list;
                }

                if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
                        if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
                                /* wait 500ms to ensure pmfw polling mca bank info done */
                                msleep(500);
                        }
                }

                type = amdgpu_ras_get_fatal_error_event(adev);
                list_for_each_entry(remote_adev,
                                device_list_handle, gmc.xgmi.head) {
                        if (amdgpu_uniras_enabled(remote_adev)) {
                                amdgpu_ras_mgr_update_ras_ecc(remote_adev);
                        } else {
                                amdgpu_ras_query_err_status(remote_adev);
                                amdgpu_ras_log_on_err_counter(remote_adev, type);
                        }
                }

        }

        if (amdgpu_device_should_recover_gpu(ras->adev)) {
                struct amdgpu_reset_context reset_context;
                memset(&reset_context, 0, sizeof(reset_context));

                reset_context.method = AMD_RESET_METHOD_NONE;
                reset_context.reset_req_dev = adev;
                reset_context.src = AMDGPU_RESET_SRC_RAS;
                set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);

                /* Perform full reset in fatal error mode */
                if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
                        set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
                else {
                        clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);

                        if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
                                ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
                                reset_context.method = AMD_RESET_METHOD_MODE2;
                        }

                        /* Fatal error occurs in poison mode, mode1 reset is used to
                         * recover gpu.
                         */
                        if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
                                ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
                                set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);

                                psp_fatal_error_recovery_quirk(&adev->psp);
                        }
                }

                amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
        }
        atomic_set(&ras->in_recovery, 0);
        if (hive) {
                atomic_set(&hive->ras_recovery, 0);
                amdgpu_put_xgmi_hive(hive);
        }
}

/* alloc/realloc bps array */
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
                struct ras_err_handler_data *data, int pages)
{
        unsigned int old_space = data->count + data->space_left;
        unsigned int new_space = old_space + pages;
        unsigned int align_space = ALIGN(new_space, 512);
        void *bps = kmalloc_objs(*data->bps, align_space);

        if (!bps) {
                return -ENOMEM;
        }

        if (data->bps) {
                memcpy(bps, data->bps,
                                data->count * sizeof(*data->bps));
                kfree(data->bps);
        }

        data->bps = bps;
        data->space_left += align_space - old_space;
        return 0;
}

static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
                        struct eeprom_table_record *bps,
                        struct ras_err_data *err_data)
{
        struct ta_ras_query_address_input addr_in;
        uint32_t socket = 0;
        int ret = 0;

        if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
                socket = adev->smuio.funcs->get_socket_id(adev);

        /* reinit err_data */
        err_data->err_addr_cnt = 0;
        err_data->err_addr_len = adev->umc.retire_unit;

        memset(&addr_in, 0, sizeof(addr_in));
        addr_in.ma.err_addr = bps->address;
        addr_in.ma.socket_id = socket;
        addr_in.ma.ch_inst = bps->mem_channel;
        if (!amdgpu_ras_smu_eeprom_supported(adev)) {
                /* tell RAS TA the node instance is not used */
                addr_in.ma.node_inst = TA_RAS_INV_NODE;
        } else {
                addr_in.ma.umc_inst = bps->mcumc_id;
                addr_in.ma.node_inst = bps->cu;
        }

        if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
                ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
                                &addr_in, NULL, false);

        return ret;
}

static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
                        struct eeprom_table_record *bps,
                        struct ras_err_data *err_data)
{
        struct ta_ras_query_address_input addr_in;
        uint32_t die_id, socket = 0;

        if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
                socket = adev->smuio.funcs->get_socket_id(adev);

        /* although die id is gotten from PA in nps1 mode, the id is
         * fitable for any nps mode
         */
        if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
                die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
                                        bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
        else
                return -EINVAL;

        /* reinit err_data */
        err_data->err_addr_cnt = 0;
        err_data->err_addr_len = adev->umc.retire_unit;

        memset(&addr_in, 0, sizeof(addr_in));
        addr_in.ma.err_addr = bps->address;
        addr_in.ma.ch_inst = bps->mem_channel;
        addr_in.ma.umc_inst = bps->mcumc_id;
        addr_in.ma.node_inst = die_id;
        addr_in.ma.socket_id = socket;

        if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
                return adev->umc.ras->convert_ras_err_addr(adev, err_data,
                                        &addr_in, NULL, false);
        else
                return  -EINVAL;
}

static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
                                        struct eeprom_table_record *bps, int count)
{
        int j;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_handler_data *data = con->eh_data;

        for (j = 0; j < count; j++) {
                if (!data->space_left &&
                    amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
                        return -ENOMEM;
                }

                if (amdgpu_ras_check_bad_page_unlock(con,
                        bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
                        /* set to U64_MAX to mark it as invalid */
                        data->bps[data->count].retired_page = U64_MAX;
                        data->count++;
                        data->space_left--;
                        continue;
                }

                amdgpu_ras_reserve_page(adev, bps[j].retired_page);

                memcpy(&data->bps[data->count], &(bps[j]),
                                sizeof(struct eeprom_table_record));
                data->count++;
                data->space_left--;
                con->bad_page_num++;
        }

        return 0;
}

static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
                                struct eeprom_table_record *bps, struct ras_err_data *err_data,
                                enum amdgpu_memory_partition nps)
{
        int i = 0;
        enum amdgpu_memory_partition save_nps;

        save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;

        /*old asics just have pa in eeprom*/
        if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
                memcpy(err_data->err_addr, bps,
                        sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
                goto out;
        }

        for (i = 0; i < adev->umc.retire_unit; i++)
                bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);

        if (save_nps) {
                if (save_nps == nps) {
                        if (amdgpu_umc_pages_in_a_row(adev, err_data,
                                        bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
                                return -EINVAL;
                        for (i = 0; i < adev->umc.retire_unit; i++) {
                                err_data->err_addr[i].address = bps[0].address;
                                err_data->err_addr[i].mem_channel = bps[0].mem_channel;
                                err_data->err_addr[i].bank = bps[0].bank;
                                err_data->err_addr[i].err_type = bps[0].err_type;
                                err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
                        }
                } else {
                        if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
                                return -EINVAL;
                }
        } else {
                if (bps[0].address == 0) {
                        /* for specific old eeprom data, mca address is not stored,
                         * calc it from pa
                         */
                        if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
                                &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
                                return -EINVAL;
                }

                if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
                        if (nps == AMDGPU_NPS1_PARTITION_MODE)
                                memcpy(err_data->err_addr, bps,
                                        sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
                        else
                                return -EOPNOTSUPP;
                }
        }

out:
        return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
}

static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
                                struct eeprom_table_record *bps, struct ras_err_data *err_data,
                                enum amdgpu_memory_partition nps)
{
        int i = 0;
        enum amdgpu_memory_partition save_nps;

        if (!amdgpu_ras_smu_eeprom_supported(adev)) {
                save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
                bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
        } else {
                /* if pmfw manages eeprom, save_nps is not stored on eeprom,
                 * we should always convert mca address into physical address,
                 * make save_nps different from nps
                 */
                save_nps = nps + 1;
        }

        if (save_nps == nps) {
                if (amdgpu_umc_pages_in_a_row(adev, err_data,
                                bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
                        return -EINVAL;
                for (i = 0; i < adev->umc.retire_unit; i++) {
                        err_data->err_addr[i].address = bps->address;
                        err_data->err_addr[i].mem_channel = bps->mem_channel;
                        err_data->err_addr[i].bank = bps->bank;
                        err_data->err_addr[i].err_type = bps->err_type;
                        err_data->err_addr[i].mcumc_id = bps->mcumc_id;
                }
        } else {
                if (bps->address) {
                        if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
                                return -EINVAL;
                } else {
                        /* for specific old eeprom data, mca address is not stored,
                         * calc it from pa
                         */
                        if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
                                &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
                                return -EINVAL;

                        if (amdgpu_ras_mca2pa(adev, bps, err_data))
                                return -EOPNOTSUPP;
                }
        }

        return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
                                                                        adev->umc.retire_unit);
}

/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
                struct eeprom_table_record *bps, int pages, bool from_rom)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_data err_data;
        struct amdgpu_ras_eeprom_control *control =
                        &adev->psp.ras_context.ras->eeprom_control;
        enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
        int ret = 0;
        uint32_t i = 0;

        if (!con || !con->eh_data || !bps || pages <= 0)
                return 0;

        if (from_rom) {
                err_data.err_addr =
                        kzalloc_objs(struct eeprom_table_record,
                                     adev->umc.retire_unit);
                if (!err_data.err_addr) {
                        dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
                        return -ENOMEM;
                }

                if (adev->gmc.gmc_funcs->query_mem_partition_mode)
                        nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
        }

        mutex_lock(&con->recovery_lock);

        if (from_rom) {
                /* there is no pa recs in V3, so skip pa recs processing */
                if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
                    !amdgpu_ras_smu_eeprom_supported(adev)) {
                        for (i = 0; i < pages; i++) {
                                if (control->ras_num_recs - i >= adev->umc.retire_unit) {
                                        if ((bps[i].address == bps[i + 1].address) &&
                                                (bps[i].mem_channel == bps[i + 1].mem_channel)) {
                                                /* deal with retire_unit records a time */
                                                ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
                                                                                &bps[i], &err_data, nps);
                                                i += (adev->umc.retire_unit - 1);
                                        } else {
                                                break;
                                        }
                                } else {
                                        break;
                                }
                        }
                }
                for (; i < pages; i++) {
                        ret = __amdgpu_ras_convert_rec_from_rom(adev,
                                &bps[i], &err_data, nps);
                }

                con->eh_data->count_saved = con->eh_data->count;
        } else {
                ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
        }

        if (from_rom)
                kfree(err_data.err_addr);
        mutex_unlock(&con->recovery_lock);

        return ret;
}

/*
 * write error record array to eeprom, the function should be
 * protected by recovery_lock
 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
 */
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
                unsigned long *new_cnt)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_handler_data *data;
        struct amdgpu_ras_eeprom_control *control;
        int save_count, unit_num, i;

        if (!con || !con->eh_data) {
                if (new_cnt)
                        *new_cnt = 0;

                return 0;
        }

        if (!con->eeprom_control.is_eeprom_valid) {
                dev_warn(adev->dev,
                        "Failed to save EEPROM table data because of EEPROM data corruption!");
                if (new_cnt)
                        *new_cnt = 0;

                return 0;
        }

        mutex_lock(&con->recovery_lock);
        control = &con->eeprom_control;
        data = con->eh_data;
        if (amdgpu_ras_smu_eeprom_supported(adev))
                unit_num = control->ras_num_recs -
                        control->ras_num_recs_old;
        else
                unit_num = data->count / adev->umc.retire_unit -
                        control->ras_num_recs;

        save_count = con->bad_page_num - control->ras_num_bad_pages;
        mutex_unlock(&con->recovery_lock);

        if (new_cnt)
                *new_cnt = unit_num;

        /* only new entries are saved */
        if (unit_num && save_count) {
                /*old asics only save pa to eeprom like before*/
                if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
                        if (amdgpu_ras_eeprom_append(control,
                                        &data->bps[data->count_saved], unit_num)) {
                                dev_err(adev->dev, "Failed to save EEPROM table data!");
                                return -EIO;
                        }
                } else {
                        for (i = 0; i < unit_num; i++) {
                                if (amdgpu_ras_eeprom_append(control,
                                                &data->bps[data->count_saved +
                                                i * adev->umc.retire_unit], 1)) {
                                        dev_err(adev->dev, "Failed to save EEPROM table data!");
                                        return -EIO;
                                }
                        }
                }

                dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
                data->count_saved = data->count;
        }

        return 0;
}

/*
 * read error record array in eeprom and reserve enough space for
 * storing new bad pages
 */
static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
{
        struct amdgpu_ras_eeprom_control *control =
                &adev->psp.ras_context.ras->eeprom_control;
        struct eeprom_table_record *bps;
        int ret, i = 0;

        /* no bad page record, skip eeprom access */
        if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
                return 0;

        bps = kzalloc_objs(*bps, control->ras_num_recs);
        if (!bps)
                return -ENOMEM;

        ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
        if (ret) {
                dev_err(adev->dev, "Failed to load EEPROM table records!");
        } else {
                if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
                        /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
                        as pa recs, so add verion check to avoid it.
                        */
                        if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
                            !amdgpu_ras_smu_eeprom_supported(adev)) {
                                for (i = 0; i < control->ras_num_recs; i++) {
                                        if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
                                                if ((bps[i].address == bps[i + 1].address) &&
                                                        (bps[i].mem_channel == bps[i + 1].mem_channel)) {
                                                        control->ras_num_pa_recs += adev->umc.retire_unit;
                                                        i += (adev->umc.retire_unit - 1);
                                                } else {
                                                        control->ras_num_mca_recs +=
                                                                                (control->ras_num_recs - i);
                                                        break;
                                                }
                                        } else {
                                                control->ras_num_mca_recs += (control->ras_num_recs - i);
                                                break;
                                        }
                                }
                        } else {
                                control->ras_num_mca_recs = control->ras_num_recs;
                        }
                }

                ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
                if (ret)
                        goto out;

                ret = amdgpu_ras_eeprom_check(control);
                if (ret)
                        goto out;

                /* HW not usable */
                if (amdgpu_ras_is_rma(adev))
                        ret = -EHWPOISON;
        }

out:
        kfree(bps);
        return ret;
}

static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
                                uint64_t addr)
{
        struct ras_err_handler_data *data = con->eh_data;
        struct amdgpu_device *adev = con->adev;
        int i;

        if ((addr >= adev->gmc.mc_vram_size &&
            adev->gmc.mc_vram_size) ||
            (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
                return -EINVAL;

        addr >>= AMDGPU_GPU_PAGE_SHIFT;
        for (i = 0; i < data->count; i++)
                if (addr == data->bps[i].retired_page)
                        return 1;

        return 0;
}

/*
 * check if an address belongs to bad page
 *
 * Note: this check is only for umc block
 */
static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
                                uint64_t addr)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        int ret = 0;

        if (!con || !con->eh_data)
                return ret;

        mutex_lock(&con->recovery_lock);
        ret = amdgpu_ras_check_bad_page_unlock(con, addr);
        mutex_unlock(&con->recovery_lock);
        return ret;
}

static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
                                          uint32_t max_count)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        /*
         * amdgpu_bad_page_threshold is used to config
         * the threshold for the number of bad pages.
         * -1:  Threshold is set to default value
         *      Driver will issue a warning message when threshold is reached
         *      and continue runtime services.
         * 0:   Disable bad page retirement
         *      Driver will not retire bad pages
         *      which is intended for debugging purpose.
         * -2:  Threshold is determined by a formula
         *      that assumes 1 bad page per 100M of local memory.
         *      Driver will continue runtime services when threhold is reached.
         * 0 < threshold < max number of bad page records in EEPROM,
         *      A user-defined threshold is set
         *      Driver will halt runtime services when this custom threshold is reached.
         */
        if (amdgpu_bad_page_threshold == -2) {
                u64 val = adev->gmc.mc_vram_size;

                do_div(val, RAS_BAD_PAGE_COVER);
                con->bad_page_cnt_threshold = min(lower_32_bits(val),
                                                  max_count);
        } else if (amdgpu_bad_page_threshold == -1) {
                con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
        } else {
                con->bad_page_cnt_threshold = min_t(int, max_count,
                                                    amdgpu_bad_page_threshold);
        }
}

int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
                enum amdgpu_ras_block block, uint16_t pasid,
                pasid_notify pasid_fn, void *data, uint32_t reset)
{
        int ret = 0;
        struct ras_poison_msg poison_msg;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        memset(&poison_msg, 0, sizeof(poison_msg));
        poison_msg.block = block;
        poison_msg.pasid = pasid;
        poison_msg.reset = reset;
        poison_msg.pasid_fn = pasid_fn;
        poison_msg.data = data;

        ret = kfifo_put(&con->poison_fifo, poison_msg);
        if (!ret) {
                dev_err(adev->dev, "Poison message fifo is full!\n");
                return -ENOSPC;
        }

        return 0;
}

static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
                struct ras_poison_msg *poison_msg)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        return kfifo_get(&con->poison_fifo, poison_msg);
}

static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
{
        mutex_init(&ecc_log->lock);

        INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
        ecc_log->de_queried_count = 0;
        ecc_log->consumption_q_count = 0;
}

static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
{
        struct radix_tree_iter iter;
        void __rcu **slot;
        struct ras_ecc_err *ecc_err;

        mutex_lock(&ecc_log->lock);
        radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
                ecc_err = radix_tree_deref_slot(slot);
                kfree(ecc_err->err_pages.pfn);
                kfree(ecc_err);
                radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
        }
        mutex_unlock(&ecc_log->lock);

        mutex_destroy(&ecc_log->lock);
        ecc_log->de_queried_count = 0;
        ecc_log->consumption_q_count = 0;
}

static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
                                uint32_t delayed_ms)
{
        int ret;

        mutex_lock(&con->umc_ecc_log.lock);
        ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
                        UMC_ECC_NEW_DETECTED_TAG);
        mutex_unlock(&con->umc_ecc_log.lock);

        if (ret)
                schedule_delayed_work(&con->page_retirement_dwork,
                        msecs_to_jiffies(delayed_ms));

        return ret ? true : false;
}

static void amdgpu_ras_do_page_retirement(struct work_struct *work)
{
        struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
                                              page_retirement_dwork.work);
        struct amdgpu_device *adev = con->adev;
        struct ras_err_data err_data;

        /* If gpu reset is ongoing, delay retiring the bad pages */
        if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
                amdgpu_ras_schedule_retirement_dwork(con,
                                AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
                return;
        }

        amdgpu_ras_error_data_init(&err_data);

        amdgpu_umc_handle_bad_pages(adev, &err_data);

        amdgpu_ras_error_data_fini(&err_data);

        amdgpu_ras_schedule_retirement_dwork(con,
                        AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}

static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
                                uint32_t poison_creation_count)
{
        int ret = 0;
        struct ras_ecc_log_info *ecc_log;
        struct ras_query_if info;
        u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
        u64 de_queried_count;
        u64 consumption_q_count;
        enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;

        memset(&info, 0, sizeof(info));
        info.head.block = AMDGPU_RAS_BLOCK__UMC;

        ecc_log = &ras->umc_ecc_log;
        ecc_log->de_queried_count = 0;
        ecc_log->consumption_q_count = 0;

        do {
                ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
                if (ret)
                        return ret;

                de_queried_count = ecc_log->de_queried_count;
                consumption_q_count = ecc_log->consumption_q_count;

                if (de_queried_count && consumption_q_count)
                        break;

                msleep(100);
        } while (--timeout);

        if (de_queried_count)
                schedule_delayed_work(&ras->page_retirement_dwork, 0);

        if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
                amdgpu_ras_reset_gpu(adev);

        return 0;
}

static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_poison_msg msg;
        int ret;

        do {
                ret = kfifo_get(&con->poison_fifo, &msg);
        } while (ret);
}

static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
                        uint32_t msg_count, uint32_t *gpu_reset)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        uint32_t reset_flags = 0, reset = 0;
        struct ras_poison_msg msg;
        int ret, i;

        kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);

        for (i = 0; i < msg_count; i++) {
                ret = amdgpu_ras_get_poison_req(adev, &msg);
                if (!ret)
                        continue;

                if (msg.pasid_fn)
                        msg.pasid_fn(adev, msg.pasid, msg.data);

                reset_flags |= msg.reset;
        }

        /*
         * Try to ensure poison creation handler is completed first
         * to set rma if bad page exceed threshold.
         */
        flush_delayed_work(&con->page_retirement_dwork);

        /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
        if (reset_flags && !amdgpu_ras_is_rma(adev)) {
                if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
                        reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
                else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
                        reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
                else
                        reset = reset_flags;

                con->gpu_reset_flags |= reset;
                amdgpu_ras_reset_gpu(adev);

                *gpu_reset = reset;

                /* Wait for gpu recovery to complete */
                flush_work(&con->recovery_work);
        }

        return 0;
}

static int amdgpu_ras_page_retirement_thread(void *param)
{
        struct amdgpu_device *adev = (struct amdgpu_device *)param;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        uint32_t poison_creation_count, msg_count;
        uint32_t gpu_reset;
        int ret;

        while (!kthread_should_stop()) {

                wait_event_interruptible(con->page_retirement_wq,
                                kthread_should_stop() ||
                                atomic_read(&con->page_retirement_req_cnt));

                if (kthread_should_stop())
                        break;

                mutex_lock(&con->poison_lock);
                gpu_reset = 0;

                do {
                        poison_creation_count = atomic_read(&con->poison_creation_count);
                        ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
                        if (ret == -EIO)
                                break;

                        if (poison_creation_count) {
                                atomic_sub(poison_creation_count, &con->poison_creation_count);
                                atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
                        }
                } while (atomic_read(&con->poison_creation_count) &&
                        !atomic_read(&con->poison_consumption_count));

                if (ret != -EIO) {
                        msg_count = kfifo_len(&con->poison_fifo);
                        if (msg_count) {
                                ret = amdgpu_ras_poison_consumption_handler(adev,
                                                msg_count, &gpu_reset);
                                if ((ret != -EIO) &&
                                    (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
                                        atomic_sub(msg_count, &con->page_retirement_req_cnt);
                        }
                }

                if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
                        /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
                        /* Clear poison creation request */
                        atomic_set(&con->poison_creation_count, 0);
                        atomic_set(&con->poison_consumption_count, 0);

                        /* Clear poison fifo */
                        amdgpu_ras_clear_poison_fifo(adev);

                        /* Clear all poison requests */
                        atomic_set(&con->page_retirement_req_cnt, 0);

                        if (ret == -EIO) {
                                /* Wait for mode-1 reset to complete */
                                down_read(&adev->reset_domain->sem);
                                up_read(&adev->reset_domain->sem);
                        }

                        /* Wake up work to save bad pages to eeprom */
                        schedule_delayed_work(&con->page_retirement_dwork, 0);
                } else if (gpu_reset) {
                        /* gpu just completed mode-2 reset or other reset */
                        /* Clear poison consumption messages cached in fifo */
                        msg_count = kfifo_len(&con->poison_fifo);
                        if (msg_count) {
                                amdgpu_ras_clear_poison_fifo(adev);
                                atomic_sub(msg_count, &con->page_retirement_req_cnt);
                        }

                        atomic_set(&con->poison_consumption_count, 0);

                        /* Wake up work to save bad pages to eeprom */
                        schedule_delayed_work(&con->page_retirement_dwork, 0);
                }
                mutex_unlock(&con->poison_lock);
        }

        return 0;
}

int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct amdgpu_ras_eeprom_control *control;
        int ret;

        if (!con || amdgpu_sriov_vf(adev))
                return 0;

        if (amdgpu_uniras_enabled(adev))
                return 0;

        control = &con->eeprom_control;
        con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);

        ret = amdgpu_ras_eeprom_init(control);
        control->is_eeprom_valid = !ret;

        if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
                control->ras_num_pa_recs = control->ras_num_recs;

        if (adev->umc.ras &&
            adev->umc.ras->get_retire_flip_bits)
                adev->umc.ras->get_retire_flip_bits(adev);

        if (control->ras_num_recs && control->is_eeprom_valid) {
                ret = amdgpu_ras_load_bad_pages(adev);
                if (ret) {
                        control->is_eeprom_valid = false;
                        return 0;
                }

                amdgpu_dpm_send_hbm_bad_pages_num(
                        adev, control->ras_num_bad_pages);

                if (con->update_channel_flag == true) {
                        amdgpu_dpm_send_hbm_bad_channel_flag(
                                adev, control->bad_channel_bitmap);
                        con->update_channel_flag = false;
                }

                /* The format action is only applied to new ASICs */
                if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
                    control->tbl_hdr.version < RAS_TABLE_VER_V3)
                        if (!amdgpu_ras_eeprom_reset_table(control))
                                if (amdgpu_ras_save_bad_pages(adev, NULL))
                                        dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
        }

        return 0;
}

int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_handler_data **data;
        u32  max_eeprom_records_count = 0;
        int ret;

        if (!con || amdgpu_sriov_vf(adev))
                return 0;

        /* Allow access to RAS EEPROM via debugfs, when the ASIC
         * supports RAS and debugfs is enabled, but when
         * adev->ras_enabled is unset, i.e. when "ras_enable"
         * module parameter is set to 0.
         */
        con->adev = adev;

        if (!adev->ras_enabled)
                return 0;

        data = &con->eh_data;
        *data = kzalloc_obj(**data);
        if (!*data) {
                ret = -ENOMEM;
                goto out;
        }

        mutex_init(&con->recovery_lock);
        mutex_init(&con->poison_lock);
        INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
        atomic_set(&con->in_recovery, 0);
        atomic_set(&con->rma_in_recovery, 0);
        con->eeprom_control.bad_channel_bitmap = 0;

        max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
        amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);

        if (init_bp_info) {
                ret = amdgpu_ras_init_badpage_info(adev);
                if (ret)
                        goto free;
        }

        mutex_init(&con->page_rsv_lock);
        INIT_KFIFO(con->poison_fifo);
        mutex_init(&con->page_retirement_lock);
        init_waitqueue_head(&con->page_retirement_wq);
        atomic_set(&con->page_retirement_req_cnt, 0);
        atomic_set(&con->poison_creation_count, 0);
        atomic_set(&con->poison_consumption_count, 0);
        con->page_retirement_thread =
                kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
        if (IS_ERR(con->page_retirement_thread)) {
                con->page_retirement_thread = NULL;
                dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
        }

        INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
        amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
#ifdef CONFIG_X86_MCE_AMD
        if ((adev->asic_type == CHIP_ALDEBARAN) &&
            (adev->gmc.xgmi.connected_to_cpu))
                amdgpu_register_bad_pages_mca_notifier(adev);
#endif
        return 0;

free:
        kfree((*data)->bps);
        kfree(*data);
        con->eh_data = NULL;
out:
        dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);

        /*
         * Except error threshold exceeding case, other failure cases in this
         * function would not fail amdgpu driver init.
         */
        if (!amdgpu_ras_is_rma(adev))
                ret = 0;
        else
                ret = -EINVAL;

        return ret;
}

static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_handler_data *data = con->eh_data;
        int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
        bool ret;

        /* recovery_init failed to init it, fini is useless */
        if (!data)
                return 0;

        /* Save all cached bad pages to eeprom */
        do {
                flush_delayed_work(&con->page_retirement_dwork);
                ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
        } while (ret && max_flush_timeout--);

        if (con->page_retirement_thread)
                kthread_stop(con->page_retirement_thread);

        atomic_set(&con->page_retirement_req_cnt, 0);
        atomic_set(&con->poison_creation_count, 0);

        mutex_destroy(&con->page_rsv_lock);

        cancel_work_sync(&con->recovery_work);

        cancel_delayed_work_sync(&con->page_retirement_dwork);

        amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);

        mutex_lock(&con->recovery_lock);
        con->eh_data = NULL;
        kfree(data->bps);
        kfree(data);
        mutex_unlock(&con->recovery_lock);

        amdgpu_ras_critical_region_init(adev);
#ifdef CONFIG_X86_MCE_AMD
        amdgpu_unregister_bad_pages_mca_notifier(adev);
#endif
        return 0;
}
/* recovery end */

static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
        if (amdgpu_sriov_vf(adev)) {
                switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
                case IP_VERSION(13, 0, 2):
                case IP_VERSION(13, 0, 6):
                case IP_VERSION(13, 0, 12):
                case IP_VERSION(13, 0, 14):
                        return true;
                default:
                        return false;
                }
        }

        if (adev->asic_type == CHIP_IP_DISCOVERY) {
                switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
                case IP_VERSION(13, 0, 0):
                case IP_VERSION(13, 0, 6):
                case IP_VERSION(13, 0, 10):
                case IP_VERSION(13, 0, 12):
                case IP_VERSION(13, 0, 14):
                case IP_VERSION(14, 0, 3):
                        return true;
                default:
                        return false;
                }
        }

        return adev->asic_type == CHIP_VEGA10 ||
                adev->asic_type == CHIP_VEGA20 ||
                adev->asic_type == CHIP_ARCTURUS ||
                adev->asic_type == CHIP_ALDEBARAN ||
                adev->asic_type == CHIP_SIENNA_CICHLID;
}

/*
 * this is workaround for vega20 workstation sku,
 * force enable gfx ras, ignore vbios gfx ras flag
 * due to GC EDC can not write
 */
static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
{
        struct atom_context *ctx = adev->mode_info.atom_context;

        if (!ctx)
                return;

        if (strnstr(ctx->vbios_pn, "D16406",
                    sizeof(ctx->vbios_pn)) ||
                strnstr(ctx->vbios_pn, "D36002",
                        sizeof(ctx->vbios_pn)))
                adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
}

/* Query ras capablity via atomfirmware interface */
static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
{
        /* mem_ecc cap */
        if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
                dev_info(adev->dev, "MEM ECC is active.\n");
                adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
                                         1 << AMDGPU_RAS_BLOCK__DF);
        } else {
                dev_info(adev->dev, "MEM ECC is not presented.\n");
        }

        /* sram_ecc cap */
        if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
                dev_info(adev->dev, "SRAM ECC is active.\n");
                if (!amdgpu_sriov_vf(adev))
                        adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
                                                  1 << AMDGPU_RAS_BLOCK__DF);
                else
                        adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
                                                 1 << AMDGPU_RAS_BLOCK__SDMA |
                                                 1 << AMDGPU_RAS_BLOCK__GFX);

                /*
                 * VCN/JPEG RAS can be supported on both bare metal and
                 * SRIOV environment
                 */
                if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
                    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
                    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
                    amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
                        adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
                                                 1 << AMDGPU_RAS_BLOCK__JPEG);
                else
                        adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
                                                  1 << AMDGPU_RAS_BLOCK__JPEG);

                /*
                 * XGMI RAS is not supported if xgmi num physical nodes
                 * is zero
                 */
                if (!adev->gmc.xgmi.num_physical_nodes)
                        adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
        } else {
                dev_info(adev->dev, "SRAM ECC is not presented.\n");
        }
}

/* Query poison mode from umc/df IP callbacks */
static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        bool df_poison, umc_poison;

        /* poison setting is useless on SRIOV guest */
        if (amdgpu_sriov_vf(adev) || !con)
                return;

        /* Init poison supported flag, the default value is false */
        if (adev->gmc.xgmi.connected_to_cpu ||
            adev->gmc.is_app_apu) {
                /* enabled by default when GPU is connected to CPU */
                con->poison_supported = true;
        } else if (adev->df.funcs &&
            adev->df.funcs->query_ras_poison_mode &&
            adev->umc.ras &&
            adev->umc.ras->query_ras_poison_mode) {
                df_poison =
                        adev->df.funcs->query_ras_poison_mode(adev);
                umc_poison =
                        adev->umc.ras->query_ras_poison_mode(adev);

                /* Only poison is set in both DF and UMC, we can support it */
                if (df_poison && umc_poison)
                        con->poison_supported = true;
                else if (df_poison != umc_poison)
                        dev_warn(adev->dev,
                                "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
                                df_poison, umc_poison);
        }
}

/*
 * check hardware's ras ability which will be saved in hw_supported.
 * if hardware does not support ras, we can skip some ras initializtion and
 * forbid some ras operations from IP.
 * if software itself, say boot parameter, limit the ras ability. We still
 * need allow IP do some limited operations, like disable. In such case,
 * we have to initialize ras as normal. but need check if operation is
 * allowed or not in each function.
 */
static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
{
        adev->ras_hw_enabled = adev->ras_enabled = 0;

        if (!amdgpu_ras_asic_supported(adev))
                return;

        if (amdgpu_sriov_vf(adev)) {
                if (amdgpu_virt_get_ras_capability(adev))
                        goto init_ras_enabled_flag;
        }

        /* query ras capability from psp */
        if (amdgpu_psp_get_ras_capability(&adev->psp))
                goto init_ras_enabled_flag;

        /* query ras capablity from bios */
        if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
                amdgpu_ras_query_ras_capablity_from_vbios(adev);
        } else {
                /* driver only manages a few IP blocks RAS feature
                 * when GPU is connected cpu through XGMI */
                adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
                                           1 << AMDGPU_RAS_BLOCK__SDMA |
                                           1 << AMDGPU_RAS_BLOCK__MMHUB);
        }

        /* apply asic specific settings (vega20 only for now) */
        amdgpu_ras_get_quirks(adev);

        /* query poison mode from umc/df ip callback */
        amdgpu_ras_query_poison_mode(adev);

init_ras_enabled_flag:
        /* hw_supported needs to be aligned with RAS block mask. */
        adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;

        adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
                adev->ras_hw_enabled & amdgpu_ras_mask;

        /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
        if (!amdgpu_sriov_vf(adev)) {
                adev->aca.is_enabled =
                        (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
                        amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
                        amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
        }

        /* bad page feature is not applicable to specific app platform */
        if (adev->gmc.is_app_apu &&
            amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
                amdgpu_bad_page_threshold = 0;
}

static void amdgpu_ras_counte_dw(struct work_struct *work)
{
        struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
                                              ras_counte_delay_work.work);
        struct amdgpu_device *adev = con->adev;
        struct drm_device *dev = adev_to_drm(adev);
        unsigned long ce_count, ue_count;
        int res;

        res = pm_runtime_get_sync(dev->dev);
        if (res < 0)
                goto Out;

        /* Cache new values.
         */
        if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
                atomic_set(&con->ras_ce_count, ce_count);
                atomic_set(&con->ras_ue_count, ue_count);
        }

Out:
        pm_runtime_put_autosuspend(dev->dev);
}

static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
{
        return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
                        AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
                        AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
                        AMDGPU_RAS_ERROR__PARITY;
}

static void ras_event_mgr_init(struct ras_event_manager *mgr)
{
        struct ras_event_state *event_state;
        int i;

        memset(mgr, 0, sizeof(*mgr));
        atomic64_set(&mgr->seqno, 0);

        for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
                event_state = &mgr->event_state[i];
                event_state->last_seqno = RAS_EVENT_INVALID_ID;
                atomic64_set(&event_state->count, 0);
        }
}

static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
{
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
        struct amdgpu_hive_info *hive;

        if (!ras)
                return;

        hive = amdgpu_get_xgmi_hive(adev);
        ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;

        /* init event manager with node 0 on xgmi system */
        if (!amdgpu_reset_in_recovery(adev)) {
                if (!hive || adev->gmc.xgmi.node_id == 0)
                        ras_event_mgr_init(ras->event_mgr);
        }

        if (hive)
                amdgpu_put_xgmi_hive(hive);
}

static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        if (!con || (adev->flags & AMD_IS_APU))
                return;

        switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
        case IP_VERSION(13, 0, 2):
        case IP_VERSION(13, 0, 6):
        case IP_VERSION(13, 0, 12):
                con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
                break;
        case IP_VERSION(13, 0, 14):
                con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
                break;
        default:
                break;
        }
}

int amdgpu_ras_init(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        int r;

        if (con)
                return 0;

        con = kzalloc(sizeof(*con) +
                        sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
                        sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
                        GFP_KERNEL);
        if (!con)
                return -ENOMEM;

        con->adev = adev;
        INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
        atomic_set(&con->ras_ce_count, 0);
        atomic_set(&con->ras_ue_count, 0);

        con->objs = (struct ras_manager *)(con + 1);

        amdgpu_ras_set_context(adev, con);

        amdgpu_ras_check_supported(adev);

        if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
                /* set gfx block ras context feature for VEGA20 Gaming
                 * send ras disable cmd to ras ta during ras late init.
                 */
                if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
                        con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);

                        return 0;
                }

                r = 0;
                goto release_con;
        }

        con->update_channel_flag = false;
        con->features = 0;
        con->schema = 0;
        INIT_LIST_HEAD(&con->head);
        /* Might need get this flag from vbios. */
        con->flags = RAS_DEFAULT_FLAGS;

        /* initialize nbio ras function ahead of any other
         * ras functions so hardware fatal error interrupt
         * can be enabled as early as possible */
        switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
        case IP_VERSION(7, 4, 0):
        case IP_VERSION(7, 4, 1):
        case IP_VERSION(7, 4, 4):
                if (!adev->gmc.xgmi.connected_to_cpu)
                        adev->nbio.ras = &nbio_v7_4_ras;
                break;
        case IP_VERSION(4, 3, 0):
                if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
                        /* unlike other generation of nbio ras,
                         * nbio v4_3 only support fatal error interrupt
                         * to inform software that DF is freezed due to
                         * system fatal error event. driver should not
                         * enable nbio ras in such case. Instead,
                         * check DF RAS */
                        adev->nbio.ras = &nbio_v4_3_ras;
                break;
        case IP_VERSION(6, 3, 1):
                if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
                        /* unlike other generation of nbio ras,
                         * nbif v6_3_1 only support fatal error interrupt
                         * to inform software that DF is freezed due to
                         * system fatal error event. driver should not
                         * enable nbio ras in such case. Instead,
                         * check DF RAS
                         */
                        adev->nbio.ras = &nbif_v6_3_1_ras;
                break;
        case IP_VERSION(7, 9, 0):
        case IP_VERSION(7, 9, 1):
                if (!adev->gmc.is_app_apu)
                        adev->nbio.ras = &nbio_v7_9_ras;
                break;
        default:
                /* nbio ras is not available */
                break;
        }

        /* nbio ras block needs to be enabled ahead of other ras blocks
         * to handle fatal error */
        r = amdgpu_nbio_ras_sw_init(adev);
        if (r)
                goto release_con;

        if (adev->nbio.ras &&
            adev->nbio.ras->init_ras_controller_interrupt) {
                r = adev->nbio.ras->init_ras_controller_interrupt(adev);
                if (r)
                        goto release_con;
        }

        if (adev->nbio.ras &&
            adev->nbio.ras->init_ras_err_event_athub_interrupt) {
                r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
                if (r)
                        goto release_con;
        }

        /* Packed socket_id to ras feature mask bits[31:29] */
        if (adev->smuio.funcs &&
            adev->smuio.funcs->get_socket_id)
                con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
                                        AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);

        /* Get RAS schema for particular SOC */
        con->schema = amdgpu_get_ras_schema(adev);

        amdgpu_ras_init_reserved_vram_size(adev);

        if (amdgpu_ras_fs_init(adev)) {
                r = -EINVAL;
                goto release_con;
        }

        if (amdgpu_ras_aca_is_supported(adev)) {
                if (amdgpu_aca_is_enabled(adev))
                        r = amdgpu_aca_init(adev);
                else
                        r = amdgpu_mca_init(adev);
                if (r)
                        goto release_con;
        }

        con->init_task_pid = task_pid_nr(current);
        get_task_comm(con->init_task_comm, current);

        mutex_init(&con->critical_region_lock);
        INIT_LIST_HEAD(&con->critical_region_head);

        dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
                 "hardware ability[%x] ras_mask[%x]\n",
                 adev->ras_hw_enabled, adev->ras_enabled);

        return 0;
release_con:
        amdgpu_ras_set_context(adev, NULL);
        kfree(con);

        return r;
}

int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
{
        if (adev->gmc.xgmi.connected_to_cpu ||
            adev->gmc.is_app_apu)
                return 1;
        return 0;
}

static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
                                        struct ras_common_if *ras_block)
{
        struct ras_query_if info = {
                .head = *ras_block,
        };

        if (!amdgpu_persistent_edc_harvesting_supported(adev))
                return 0;

        if (amdgpu_ras_query_error_status(adev, &info) != 0)
                drm_warn(adev_to_drm(adev), "RAS init query failure");

        if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
                drm_warn(adev_to_drm(adev), "RAS init harvest reset failure");

        return 0;
}

bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
{
       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

       if (!con)
               return false;

       return con->poison_supported;
}

/* helper function to handle common stuff in ip late init phase */
int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
                         struct ras_common_if *ras_block)
{
        struct amdgpu_ras_block_object *ras_obj = NULL;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_query_if *query_info;
        unsigned long ue_count, ce_count;
        int r;

        /* disable RAS feature per IP block if it is not supported */
        if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
                amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
                return 0;
        }

        r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
        if (r) {
                if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
                        /* in resume phase, if fail to enable ras,
                         * clean up all ras fs nodes, and disable ras */
                        goto cleanup;
                } else
                        return r;
        }

        /* check for errors on warm reset edc persisant supported ASIC */
        amdgpu_persistent_edc_harvesting(adev, ras_block);

        /* in resume phase, no need to create ras fs node */
        if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
                return 0;

        ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
        if (ras_obj->ras_cb || (ras_obj->hw_ops &&
            (ras_obj->hw_ops->query_poison_status ||
            ras_obj->hw_ops->handle_poison_consumption))) {
                r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
                if (r)
                        goto cleanup;
        }

        if (ras_obj->hw_ops &&
            (ras_obj->hw_ops->query_ras_error_count ||
             ras_obj->hw_ops->query_ras_error_status)) {
                r = amdgpu_ras_sysfs_create(adev, ras_block);
                if (r)
                        goto interrupt;

                /* Those are the cached values at init.
                 */
                query_info = kzalloc_obj(*query_info);
                if (!query_info)
                        return -ENOMEM;
                memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));

                if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
                        atomic_set(&con->ras_ce_count, ce_count);
                        atomic_set(&con->ras_ue_count, ue_count);
                }

                kfree(query_info);
        }

        return 0;

interrupt:
        if (ras_obj->ras_cb)
                amdgpu_ras_interrupt_remove_handler(adev, ras_block);
cleanup:
        amdgpu_ras_feature_enable(adev, ras_block, 0);
        return r;
}

static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
                         struct ras_common_if *ras_block)
{
        return amdgpu_ras_block_late_init(adev, ras_block);
}

/* helper function to remove ras fs node and interrupt handler */
void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
                          struct ras_common_if *ras_block)
{
        struct amdgpu_ras_block_object *ras_obj;
        if (!ras_block)
                return;

        amdgpu_ras_sysfs_remove(adev, ras_block);

        ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
        if (ras_obj->ras_cb)
                amdgpu_ras_interrupt_remove_handler(adev, ras_block);
}

static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
                          struct ras_common_if *ras_block)
{
        return amdgpu_ras_block_late_fini(adev, ras_block);
}

/* do some init work after IP late init as dependence.
 * and it runs in resume/gpu reset/booting up cases.
 */
void amdgpu_ras_resume(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;

        if (!adev->ras_enabled || !con) {
                /* clean ras context for VEGA20 Gaming after send ras disable cmd */
                amdgpu_release_ras_context(adev);

                return;
        }

        if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
                /* Set up all other IPs which are not implemented. There is a
                 * tricky thing that IP's actual ras error type should be
                 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
                 * ERROR_NONE make sense anyway.
                 */
                amdgpu_ras_enable_all_features(adev, 1);

                /* We enable ras on all hw_supported block, but as boot
                 * parameter might disable some of them and one or more IP has
                 * not implemented yet. So we disable them on behalf.
                 */
                list_for_each_entry_safe(obj, tmp, &con->head, node) {
                        if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
                                amdgpu_ras_feature_enable(adev, &obj->head, 0);
                                /* there should be no any reference. */
                                WARN_ON(alive_obj(obj));
                        }
                }
        }
}

void amdgpu_ras_suspend(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        if (!adev->ras_enabled || !con)
                return;

        amdgpu_ras_disable_all_features(adev, 0);
        /* Make sure all ras objects are disabled. */
        if (AMDGPU_RAS_GET_FEATURES(con->features))
                amdgpu_ras_disable_all_features(adev, 1);
}

int amdgpu_ras_late_init(struct amdgpu_device *adev)
{
        struct amdgpu_ras_block_list *node, *tmp;
        struct amdgpu_ras_block_object *obj;
        int r;

        amdgpu_ras_event_mgr_init(adev);

        if (amdgpu_ras_aca_is_supported(adev)) {
                if (amdgpu_reset_in_recovery(adev)) {
                        if (amdgpu_aca_is_enabled(adev))
                                r = amdgpu_aca_reset(adev);
                        else
                                r = amdgpu_mca_reset(adev);
                        if (r)
                                return r;
                }

                if (!amdgpu_sriov_vf(adev)) {
                        if (amdgpu_aca_is_enabled(adev))
                                amdgpu_ras_set_aca_debug_mode(adev, false);
                        else
                                amdgpu_ras_set_mca_debug_mode(adev, false);
                }
        }

        /* Guest side doesn't need init ras feature */
        if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
                return 0;

        list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
                obj = node->ras_obj;
                if (!obj) {
                        dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
                        continue;
                }

                if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
                        continue;

                if (obj->ras_late_init) {
                        r = obj->ras_late_init(adev, &obj->ras_comm);
                        if (r) {
                                dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
                                        obj->ras_comm.name, r);
                                return r;
                        }
                } else
                        amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
        }

        amdgpu_ras_check_bad_page_status(adev);

        return 0;
}

/* do some fini work before IP fini as dependence */
int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        if (!adev->ras_enabled || !con)
                return 0;


        /* Need disable ras on all IPs here before ip [hw/sw]fini */
        if (AMDGPU_RAS_GET_FEATURES(con->features))
                amdgpu_ras_disable_all_features(adev, 0);
        amdgpu_ras_recovery_fini(adev);
        return 0;
}

int amdgpu_ras_fini(struct amdgpu_device *adev)
{
        struct amdgpu_ras_block_list *ras_node, *tmp;
        struct amdgpu_ras_block_object *obj = NULL;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        if (!adev->ras_enabled || !con)
                return 0;

        amdgpu_ras_critical_region_fini(adev);
        mutex_destroy(&con->critical_region_lock);

        list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
                if (ras_node->ras_obj) {
                        obj = ras_node->ras_obj;
                        if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
                            obj->ras_fini)
                                obj->ras_fini(adev, &obj->ras_comm);
                        else
                                amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
                }

                /* Clear ras blocks from ras_list and free ras block list node */
                list_del(&ras_node->node);
                kfree(ras_node);
        }

        amdgpu_ras_fs_fini(adev);
        amdgpu_ras_interrupt_remove_all(adev);

        if (amdgpu_ras_aca_is_supported(adev)) {
                if (amdgpu_aca_is_enabled(adev))
                        amdgpu_aca_fini(adev);
                else
                        amdgpu_mca_fini(adev);
        }

        WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");

        if (AMDGPU_RAS_GET_FEATURES(con->features))
                amdgpu_ras_disable_all_features(adev, 0);

        cancel_delayed_work_sync(&con->ras_counte_delay_work);

        amdgpu_ras_set_context(adev, NULL);
        kfree(con);

        return 0;
}

bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
{
        struct amdgpu_ras *ras;

        ras = amdgpu_ras_get_context(adev);
        if (!ras)
                return false;

        return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
}

void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
{
        struct amdgpu_ras *ras;

        ras = amdgpu_ras_get_context(adev);
        if (ras) {
                if (status)
                        set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
                else
                        clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
        }
}

void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
{
        struct amdgpu_ras *ras;

        ras = amdgpu_ras_get_context(adev);
        if (ras) {
                ras->ras_err_state = 0;
                ras->gpu_reset_flags = 0;
        }
}

void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
                               enum amdgpu_ras_block block)
{
        struct amdgpu_ras *ras;

        ras = amdgpu_ras_get_context(adev);
        if (ras)
                set_bit(block, &ras->ras_err_state);
}

bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
{
        struct amdgpu_ras *ras;

        ras = amdgpu_ras_get_context(adev);
        if (ras) {
                if (block == AMDGPU_RAS_BLOCK__ANY)
                        return (ras->ras_err_state != 0);
                else
                        return test_bit(block, &ras->ras_err_state) ||
                               test_bit(AMDGPU_RAS_BLOCK__LAST,
                                        &ras->ras_err_state);
        }

        return false;
}

static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
{
        struct amdgpu_ras *ras;

        ras = amdgpu_ras_get_context(adev);
        if (!ras)
                return NULL;

        return ras->event_mgr;
}

int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
                                     const void *caller)
{
        struct ras_event_manager *event_mgr;
        struct ras_event_state *event_state;
        int ret = 0;

        if (amdgpu_uniras_enabled(adev))
                return 0;

        if (type >= RAS_EVENT_TYPE_COUNT) {
                ret = -EINVAL;
                goto out;
        }

        event_mgr = __get_ras_event_mgr(adev);
        if (!event_mgr) {
                ret = -EINVAL;
                goto out;
        }

        event_state = &event_mgr->event_state[type];
        event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
        atomic64_inc(&event_state->count);

out:
        if (ret && caller)
                dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
                         (int)type, caller, ret);

        return ret;
}

u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
{
        struct ras_event_manager *event_mgr;
        u64 id;

        if (type >= RAS_EVENT_TYPE_COUNT)
                return RAS_EVENT_INVALID_ID;

        switch (type) {
        case RAS_EVENT_TYPE_FATAL:
        case RAS_EVENT_TYPE_POISON_CREATION:
        case RAS_EVENT_TYPE_POISON_CONSUMPTION:
                event_mgr = __get_ras_event_mgr(adev);
                if (!event_mgr)
                        return RAS_EVENT_INVALID_ID;

                id = event_mgr->event_state[type].last_seqno;
                break;
        case RAS_EVENT_TYPE_INVALID:
        default:
                id = RAS_EVENT_INVALID_ID;
                break;
        }

        return id;
}

int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
        if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
                struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
                enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
                u64 event_id = RAS_EVENT_INVALID_ID;

                if (amdgpu_uniras_enabled(adev))
                        return 0;

                if (!amdgpu_ras_mark_ras_event(adev, type))
                        event_id = amdgpu_ras_acquire_event_id(adev, type);

                RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
                              "(ERREVENT_ATHUB_INTERRUPT) detected!\n");

                amdgpu_ras_set_fed(adev, true);
                ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
                amdgpu_ras_reset_gpu(adev);
        }

        return -EBUSY;
}

bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
{
        if (adev->asic_type == CHIP_VEGA20 &&
            adev->pm.fw_version <= 0x283400) {
                return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
                                amdgpu_ras_intr_triggered();
        }

        return false;
}

void amdgpu_release_ras_context(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        if (!con)
                return;

        if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
                con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
                amdgpu_ras_set_context(adev, NULL);
                kfree(con);
        }
}

#ifdef CONFIG_X86_MCE_AMD
static struct amdgpu_device *find_adev(uint32_t node_id)
{
        int i;
        struct amdgpu_device *adev = NULL;

        for (i = 0; i < mce_adev_list.num_gpu; i++) {
                adev = mce_adev_list.devs[i];

                if (adev && adev->gmc.xgmi.connected_to_cpu &&
                    adev->gmc.xgmi.physical_node_id == node_id)
                        break;
                adev = NULL;
        }

        return adev;
}

#define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
#define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
#define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
#define GPU_ID_OFFSET           8

static int amdgpu_bad_page_notifier(struct notifier_block *nb,
                                    unsigned long val, void *data)
{
        struct mce *m = (struct mce *)data;
        struct amdgpu_device *adev = NULL;
        uint32_t gpu_id = 0;
        uint32_t umc_inst = 0, ch_inst = 0;

        /*
         * If the error was generated in UMC_V2, which belongs to GPU UMCs,
         * and error occurred in DramECC (Extended error code = 0) then only
         * process the error, else bail out.
         */
        if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
                    (XEC(m->status, 0x3f) == 0x0)))
                return NOTIFY_DONE;

        /*
         * If it is correctable error, return.
         */
        if (mce_is_correctable(m))
                return NOTIFY_OK;

        /*
         * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
         */
        gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;

        adev = find_adev(gpu_id);
        if (!adev) {
                DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
                                                                gpu_id);
                return NOTIFY_DONE;
        }

        /*
         * If it is uncorrectable error, then find out UMC instance and
         * channel index.
         */
        umc_inst = GET_UMC_INST(m->ipid);
        ch_inst = GET_CHAN_INDEX(m->ipid);

        dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
                             umc_inst, ch_inst);

        if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
                return NOTIFY_OK;
        else
                return NOTIFY_DONE;
}

static struct notifier_block amdgpu_bad_page_nb = {
        .notifier_call  = amdgpu_bad_page_notifier,
        .priority       = MCE_PRIO_UC,
};

static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
{
        /*
         * Add the adev to the mce_adev_list.
         * During mode2 reset, amdgpu device is temporarily
         * removed from the mgpu_info list which can cause
         * page retirement to fail.
         * Use this list instead of mgpu_info to find the amdgpu
         * device on which the UMC error was reported.
         */
        mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;

        /*
         * Register the x86 notifier only once
         * with MCE subsystem.
         */
        if (notifier_registered == false) {
                mce_register_decode_chain(&amdgpu_bad_page_nb);
                notifier_registered = true;
        }
}
static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
{
        int i, j;

        if (!notifier_registered && !mce_adev_list.num_gpu)
                return;
        for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
                if (mce_adev_list.devs[i] == adev)
                        mce_adev_list.devs[i] = NULL;
                if (!mce_adev_list.devs[i])
                        ++j;
        }

        if (j == mce_adev_list.num_gpu) {
                mce_adev_list.num_gpu = 0;
                /* Unregister x86 notifier with MCE subsystem. */
                if (notifier_registered) {
                        mce_unregister_decode_chain(&amdgpu_bad_page_nb);
                        notifier_registered = false;
                }
        }
}
#endif

struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
{
        if (!adev)
                return NULL;

        return adev->psp.ras_context.ras;
}

int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
{
        if (!adev)
                return -EINVAL;

        adev->psp.ras_context.ras = ras_con;
        return 0;
}

/* check if ras is supported on block, say, sdma, gfx */
int amdgpu_ras_is_supported(struct amdgpu_device *adev,
                unsigned int block)
{
        int ret = 0;
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);

        if (block >= AMDGPU_RAS_BLOCK_COUNT)
                return 0;

        ret = ras && (adev->ras_enabled & (1 << block));

        /* For the special asic with mem ecc enabled but sram ecc
         * not enabled, even if the ras block is not supported on
         * .ras_enabled, if the asic supports poison mode and the
         * ras block has ras configuration, it can be considered
         * that the ras block supports ras function.
         */
        if (!ret &&
            (block == AMDGPU_RAS_BLOCK__GFX ||
             block == AMDGPU_RAS_BLOCK__SDMA ||
             block == AMDGPU_RAS_BLOCK__VCN ||
             block == AMDGPU_RAS_BLOCK__JPEG) &&
                (amdgpu_ras_mask & (1 << block)) &&
            amdgpu_ras_is_poison_mode_supported(adev) &&
            amdgpu_ras_get_ras_block(adev, block, 0))
                ret = 1;

        return ret;
}

int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);

        /* mode1 is the only selection for RMA status */
        if (amdgpu_ras_is_rma(adev)) {
                ras->gpu_reset_flags = 0;
                ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
        }

        if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
                struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
                int hive_ras_recovery = 0;

                if (hive) {
                        hive_ras_recovery = atomic_read(&hive->ras_recovery);
                        amdgpu_put_xgmi_hive(hive);
                }
                /* In the case of multiple GPUs, after a GPU has started
                 * resetting all GPUs on hive, other GPUs do not need to
                 * trigger GPU reset again.
                 */
                if (!hive_ras_recovery)
                        amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
                else
                        atomic_set(&ras->in_recovery, 0);
        } else {
                flush_work(&ras->recovery_work);
                amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
        }

        return 0;
}

int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        int ret = 0;

        if (con) {
                ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
                if (!ret)
                        con->is_aca_debug_mode = enable;
        }

        return ret;
}

int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        int ret = 0;

        if (con) {
                if (amdgpu_aca_is_enabled(adev))
                        ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
                else
                        ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
                if (!ret)
                        con->is_aca_debug_mode = enable;
        }

        return ret;
}

bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;

        if (!con)
                return false;

        if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
            (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
                return con->is_aca_debug_mode;
        else
                return true;
}

bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
                                     unsigned int *error_query_mode)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
        const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;

        if (!con) {
                *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
                return false;
        }

        if (amdgpu_sriov_vf(adev)) {
                *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
        } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
                *error_query_mode =
                        (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
        } else {
                *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
        }

        return true;
}

/* Register each ip ras block into amdgpu ras */
int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
                struct amdgpu_ras_block_object *ras_block_obj)
{
        struct amdgpu_ras_block_list *ras_node;
        if (!adev || !ras_block_obj)
                return -EINVAL;

        ras_node = kzalloc_obj(*ras_node);
        if (!ras_node)
                return -ENOMEM;

        INIT_LIST_HEAD(&ras_node->node);
        ras_node->ras_obj = ras_block_obj;
        list_add_tail(&ras_node->node, &adev->ras_list);

        return 0;
}

void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
{
        if (!err_type_name)
                return;

        switch (err_type) {
        case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
                sprintf(err_type_name, "correctable");
                break;
        case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
                sprintf(err_type_name, "uncorrectable");
                break;
        default:
                sprintf(err_type_name, "unknown");
                break;
        }
}

bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
                                         const struct amdgpu_ras_err_status_reg_entry *reg_entry,
                                         uint32_t instance,
                                         uint32_t *memory_id)
{
        uint32_t err_status_lo_data, err_status_lo_offset;

        if (!reg_entry)
                return false;

        err_status_lo_offset =
                AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
                                            reg_entry->seg_lo, reg_entry->reg_lo);
        err_status_lo_data = RREG32(err_status_lo_offset);

        if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
            !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
                return false;

        *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);

        return true;
}

bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
                                       const struct amdgpu_ras_err_status_reg_entry *reg_entry,
                                       uint32_t instance,
                                       unsigned long *err_cnt)
{
        uint32_t err_status_hi_data, err_status_hi_offset;

        if (!reg_entry)
                return false;

        err_status_hi_offset =
                AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
                                            reg_entry->seg_hi, reg_entry->reg_hi);
        err_status_hi_data = RREG32(err_status_hi_offset);

        if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
            !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
                /* keep the check here in case we need to refer to the result later */
                dev_dbg(adev->dev, "Invalid err_info field\n");

        /* read err count */
        *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);

        return true;
}

void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
                                           const struct amdgpu_ras_err_status_reg_entry *reg_list,
                                           uint32_t reg_list_size,
                                           const struct amdgpu_ras_memory_id_entry *mem_list,
                                           uint32_t mem_list_size,
                                           uint32_t instance,
                                           uint32_t err_type,
                                           unsigned long *err_count)
{
        uint32_t memory_id;
        unsigned long err_cnt;
        char err_type_name[16];
        uint32_t i, j;

        for (i = 0; i < reg_list_size; i++) {
                /* query memory_id from err_status_lo */
                if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
                                                         instance, &memory_id))
                        continue;

                /* query err_cnt from err_status_hi */
                if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
                                                       instance, &err_cnt) ||
                    !err_cnt)
                        continue;

                *err_count += err_cnt;

                /* log the errors */
                amdgpu_ras_get_error_type_name(err_type, err_type_name);
                if (!mem_list) {
                        /* memory_list is not supported */
                        dev_info(adev->dev,
                                 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
                                 err_cnt, err_type_name,
                                 reg_list[i].block_name,
                                 instance, memory_id);
                } else {
                        for (j = 0; j < mem_list_size; j++) {
                                if (memory_id == mem_list[j].memory_id) {
                                        dev_info(adev->dev,
                                                 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
                                                 err_cnt, err_type_name,
                                                 reg_list[i].block_name,
                                                 instance, mem_list[j].name);
                                        break;
                                }
                        }
                }
        }
}

void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
                                           const struct amdgpu_ras_err_status_reg_entry *reg_list,
                                           uint32_t reg_list_size,
                                           uint32_t instance)
{
        uint32_t err_status_lo_offset, err_status_hi_offset;
        uint32_t i;

        for (i = 0; i < reg_list_size; i++) {
                err_status_lo_offset =
                        AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
                                                    reg_list[i].seg_lo, reg_list[i].reg_lo);
                err_status_hi_offset =
                        AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
                                                    reg_list[i].seg_hi, reg_list[i].reg_hi);
                WREG32(err_status_lo_offset, 0);
                WREG32(err_status_hi_offset, 0);
        }
}

int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
{
        memset(err_data, 0, sizeof(*err_data));

        INIT_LIST_HEAD(&err_data->err_node_list);

        return 0;
}

static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
{
        if (!err_node)
                return;

        list_del(&err_node->node);
        kvfree(err_node);
}

void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
{
        struct ras_err_node *err_node, *tmp;

        list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
                amdgpu_ras_error_node_release(err_node);
}

static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
                                                             struct amdgpu_smuio_mcm_config_info *mcm_info)
{
        struct ras_err_node *err_node;
        struct amdgpu_smuio_mcm_config_info *ref_id;

        if (!err_data || !mcm_info)
                return NULL;

        for_each_ras_error(err_node, err_data) {
                ref_id = &err_node->err_info.mcm_info;

                if (mcm_info->socket_id == ref_id->socket_id &&
                    mcm_info->die_id == ref_id->die_id)
                        return err_node;
        }

        return NULL;
}

static struct ras_err_node *amdgpu_ras_error_node_new(void)
{
        struct ras_err_node *err_node;

        err_node = kvzalloc_obj(*err_node);
        if (!err_node)
                return NULL;

        INIT_LIST_HEAD(&err_node->node);

        return err_node;
}

static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
{
        struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
        struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
        struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
        struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;

        if (unlikely(infoa->socket_id != infob->socket_id))
                return infoa->socket_id - infob->socket_id;
        else
                return infoa->die_id - infob->die_id;

        return 0;
}

static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
                                struct amdgpu_smuio_mcm_config_info *mcm_info)
{
        struct ras_err_node *err_node;

        err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
        if (err_node)
                return &err_node->err_info;

        err_node = amdgpu_ras_error_node_new();
        if (!err_node)
                return NULL;

        memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));

        err_data->err_list_count++;
        list_add_tail(&err_node->node, &err_data->err_node_list);
        list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);

        return &err_node->err_info;
}

int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
                                        struct amdgpu_smuio_mcm_config_info *mcm_info,
                                        u64 count)
{
        struct ras_err_info *err_info;

        if (!err_data || !mcm_info)
                return -EINVAL;

        if (!count)
                return 0;

        err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
        if (!err_info)
                return -EINVAL;

        err_info->ue_count += count;
        err_data->ue_count += count;

        return 0;
}

int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
                                        struct amdgpu_smuio_mcm_config_info *mcm_info,
                                        u64 count)
{
        struct ras_err_info *err_info;

        if (!err_data || !mcm_info)
                return -EINVAL;

        if (!count)
                return 0;

        err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
        if (!err_info)
                return -EINVAL;

        err_info->ce_count += count;
        err_data->ce_count += count;

        return 0;
}

int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
                                        struct amdgpu_smuio_mcm_config_info *mcm_info,
                                        u64 count)
{
        struct ras_err_info *err_info;

        if (!err_data || !mcm_info)
                return -EINVAL;

        if (!count)
                return 0;

        err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
        if (!err_info)
                return -EINVAL;

        err_info->de_count += count;
        err_data->de_count += count;

        return 0;
}

#define mmMP0_SMN_C2PMSG_92     0x1609C
#define mmMP0_SMN_C2PMSG_126    0x160BE
static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
                                                 u32 instance)
{
        u32 socket_id, aid_id, hbm_id;
        u32 fw_status;
        u32 boot_error;
        u64 reg_addr;

        /* The pattern for smn addressing in other SOC could be different from
         * the one for aqua_vanjaram. We should revisit the code if the pattern
         * is changed. In such case, replace the aqua_vanjaram implementation
         * with more common helper */
        reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
                   aqua_vanjaram_encode_ext_smn_addressing(instance);
        fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);

        reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
                   aqua_vanjaram_encode_ext_smn_addressing(instance);
        boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);

        socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
        aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
        hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);

        if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
                         socket_id, aid_id, hbm_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
                         socket_id, aid_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
                         socket_id, aid_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
                         socket_id, aid_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
                         socket_id, aid_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
                         socket_id, aid_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
                         socket_id, aid_id, hbm_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
                         socket_id, aid_id, hbm_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
                         socket_id, aid_id, fw_status);

        if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
                dev_info(adev->dev,
                         "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
                         socket_id, aid_id, fw_status);
}

static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
                                           u32 instance)
{
        u64 reg_addr;
        u32 reg_data;
        int retry_loop;

        reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
                   aqua_vanjaram_encode_ext_smn_addressing(instance);

        for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
                reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
                if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
                        return false;
                else
                        msleep(1);
        }

        return true;
}

void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
{
        u32 i;

        for (i = 0; i < num_instances; i++) {
                if (amdgpu_ras_boot_error_detected(adev, i))
                        amdgpu_ras_boot_time_error_reporting(adev, i);
        }
}

int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
        uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
        int ret = 0;

        if (amdgpu_ras_check_critical_address(adev, start))
                return 0;

        mutex_lock(&con->page_rsv_lock);
        ret = amdgpu_vram_mgr_query_page_status(mgr, start);
        if (ret == -ENOENT)
                ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
        mutex_unlock(&con->page_rsv_lock);

        return ret;
}

void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
                                const char *fmt, ...)
{
        struct va_format vaf;
        va_list args;

        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;

        if (RAS_EVENT_ID_IS_VALID(event_id))
                dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
        else
                dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);

        va_end(args);
}

bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);

        if (amdgpu_uniras_enabled(adev))
                return amdgpu_ras_mgr_is_rma(adev);

        if (!con)
                return false;

        return con->is_rma;
}

int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
                        struct amdgpu_bo *bo)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct amdgpu_vram_mgr_resource *vres;
        struct ras_critical_region *region;
        struct drm_buddy_block *block;
        int ret = 0;

        if (!bo || !bo->tbo.resource)
                return -EINVAL;

        vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);

        mutex_lock(&con->critical_region_lock);

        /* Check if the bo had been recorded */
        list_for_each_entry(region, &con->critical_region_head, node)
                if (region->bo == bo)
                        goto out;

        /* Record new critical amdgpu bo */
        list_for_each_entry(block, &vres->blocks, link) {
                region = kzalloc_obj(*region);
                if (!region) {
                        ret = -ENOMEM;
                        goto out;
                }
                region->bo = bo;
                region->start = amdgpu_vram_mgr_block_start(block);
                region->size = amdgpu_vram_mgr_block_size(block);
                list_add_tail(&region->node, &con->critical_region_head);
        }

out:
        mutex_unlock(&con->critical_region_lock);

        return ret;
}

static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
{
        amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
}

static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_critical_region *region, *tmp;

        mutex_lock(&con->critical_region_lock);
        list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
                list_del(&region->node);
                kfree(region);
        }
        mutex_unlock(&con->critical_region_lock);
}

bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
{
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_critical_region *region;
        bool ret = false;

        mutex_lock(&con->critical_region_lock);
        list_for_each_entry(region, &con->critical_region_head, node) {
                if ((region->start <= addr) &&
                    (addr < (region->start + region->size))) {
                        ret = true;
                        break;
                }
        }
        mutex_unlock(&con->critical_region_lock);

        return ret;
}

void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
                                          struct list_head *device_list)
{
        struct amdgpu_device *tmp_adev = NULL;

        list_for_each_entry(tmp_adev, device_list, reset_list) {
                if (amdgpu_uniras_enabled(tmp_adev))
                        amdgpu_ras_mgr_pre_reset(tmp_adev);
        }
}

void amdgpu_ras_post_reset(struct amdgpu_device *adev,
                                          struct list_head *device_list)
{
        struct amdgpu_device *tmp_adev = NULL;

        list_for_each_entry(tmp_adev, device_list, reset_list) {
                if (amdgpu_uniras_enabled(tmp_adev))
                        amdgpu_ras_mgr_post_reset(tmp_adev);
        }
}