root/drivers/acpi/arm64/iort.c
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2016, Semihalf
 *      Author: Tomasz Nowicki <tn@semihalf.com>
 *
 * This file implements early detection/parsing of I/O mapping
 * reported to OS through firmware via I/O Remapping Table (IORT)
 * IORT document number: ARM DEN 0049A
 */

#define pr_fmt(fmt)     "ACPI: IORT: " fmt

#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include "init.h"

#define IORT_TYPE_MASK(type)    (1 << (type))
#define IORT_MSI_TYPE           (1 << ACPI_IORT_NODE_ITS_GROUP)
#define IORT_IOMMU_TYPE         ((1 << ACPI_IORT_NODE_SMMU) |   \
                                (1 << ACPI_IORT_NODE_SMMU_V3))

struct iort_its_msi_chip {
        struct list_head        list;
        struct fwnode_handle    *fw_node;
        phys_addr_t             base_addr;
        u32                     translation_id;
};

struct iort_fwnode {
        struct list_head list;
        struct acpi_iort_node *iort_node;
        struct fwnode_handle *fwnode;
};
static LIST_HEAD(iort_fwnode_list);
static DEFINE_SPINLOCK(iort_fwnode_lock);

/**
 * iort_set_fwnode() - Create iort_fwnode and use it to register
 *                     iommu data in the iort_fwnode_list
 *
 * @iort_node: IORT table node associated with the IOMMU
 * @fwnode: fwnode associated with the IORT node
 *
 * Returns: 0 on success
 *          <0 on failure
 */
static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
                                  struct fwnode_handle *fwnode)
{
        struct iort_fwnode *np;

        np = kzalloc_obj(struct iort_fwnode, GFP_ATOMIC);

        if (WARN_ON(!np))
                return -ENOMEM;

        INIT_LIST_HEAD(&np->list);
        np->iort_node = iort_node;
        np->fwnode = fwnode;

        spin_lock(&iort_fwnode_lock);
        list_add_tail(&np->list, &iort_fwnode_list);
        spin_unlock(&iort_fwnode_lock);

        return 0;
}

/**
 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
 *
 * @node: IORT table node to be looked-up
 *
 * Returns: fwnode_handle pointer on success, NULL on failure
 */
static inline struct fwnode_handle *iort_get_fwnode(
                        struct acpi_iort_node *node)
{
        struct iort_fwnode *curr;
        struct fwnode_handle *fwnode = NULL;

        spin_lock(&iort_fwnode_lock);
        list_for_each_entry(curr, &iort_fwnode_list, list) {
                if (curr->iort_node == node) {
                        fwnode = curr->fwnode;
                        break;
                }
        }
        spin_unlock(&iort_fwnode_lock);

        return fwnode;
}

/**
 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
 *
 * @node: IORT table node associated with fwnode to delete
 */
static inline void iort_delete_fwnode(struct acpi_iort_node *node)
{
        struct iort_fwnode *curr, *tmp;

        spin_lock(&iort_fwnode_lock);
        list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
                if (curr->iort_node == node) {
                        list_del(&curr->list);
                        kfree(curr);
                        break;
                }
        }
        spin_unlock(&iort_fwnode_lock);
}

/**
 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
 *
 * @fwnode: fwnode associated with device to be looked-up
 *
 * Returns: iort_node pointer on success, NULL on failure
 */
static inline struct acpi_iort_node *iort_get_iort_node(
                        struct fwnode_handle *fwnode)
{
        struct iort_fwnode *curr;
        struct acpi_iort_node *iort_node = NULL;

        spin_lock(&iort_fwnode_lock);
        list_for_each_entry(curr, &iort_fwnode_list, list) {
                if (curr->fwnode == fwnode) {
                        iort_node = curr->iort_node;
                        break;
                }
        }
        spin_unlock(&iort_fwnode_lock);

        return iort_node;
}

typedef acpi_status (*iort_find_node_callback)
        (struct acpi_iort_node *node, void *context);

/* Root pointer to the mapped IORT table */
static struct acpi_table_header *iort_table;

static LIST_HEAD(iort_msi_chip_list);
static DEFINE_SPINLOCK(iort_msi_chip_lock);

/**
 * iort_register_domain_token() - register domain token along with related
 * ITS ID and base address to the list from where we can get it back later on.
 * @trans_id: ITS ID.
 * @base: ITS base address.
 * @fw_node: Domain token.
 *
 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
 */
int iort_register_domain_token(int trans_id, phys_addr_t base,
                               struct fwnode_handle *fw_node)
{
        struct iort_its_msi_chip *its_msi_chip;

        its_msi_chip = kzalloc_obj(*its_msi_chip);
        if (!its_msi_chip)
                return -ENOMEM;

        its_msi_chip->fw_node = fw_node;
        its_msi_chip->translation_id = trans_id;
        its_msi_chip->base_addr = base;

        spin_lock(&iort_msi_chip_lock);
        list_add(&its_msi_chip->list, &iort_msi_chip_list);
        spin_unlock(&iort_msi_chip_lock);

        return 0;
}

/**
 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
 * @trans_id: ITS ID.
 *
 * Returns: none.
 */
void iort_deregister_domain_token(int trans_id)
{
        struct iort_its_msi_chip *its_msi_chip, *t;

        spin_lock(&iort_msi_chip_lock);
        list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
                if (its_msi_chip->translation_id == trans_id) {
                        list_del(&its_msi_chip->list);
                        kfree(its_msi_chip);
                        break;
                }
        }
        spin_unlock(&iort_msi_chip_lock);
}

/**
 * iort_find_domain_token() - Find domain token based on given ITS ID
 * @trans_id: ITS ID.
 *
 * Returns: domain token when find on the list, NULL otherwise
 */
struct fwnode_handle *iort_find_domain_token(int trans_id)
{
        struct fwnode_handle *fw_node = NULL;
        struct iort_its_msi_chip *its_msi_chip;

        spin_lock(&iort_msi_chip_lock);
        list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
                if (its_msi_chip->translation_id == trans_id) {
                        fw_node = its_msi_chip->fw_node;
                        break;
                }
        }
        spin_unlock(&iort_msi_chip_lock);

        return fw_node;
}

static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
                                             iort_find_node_callback callback,
                                             void *context)
{
        struct acpi_iort_node *iort_node, *iort_end;
        struct acpi_table_iort *iort;
        int i;

        if (!iort_table)
                return NULL;

        /* Get the first IORT node */
        iort = (struct acpi_table_iort *)iort_table;
        iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
                                 iort->node_offset);
        iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
                                iort_table->length);

        for (i = 0; i < iort->node_count; i++) {
                if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
                               "IORT node pointer overflows, bad table!\n"))
                        return NULL;

                if (iort_node->type == type &&
                    ACPI_SUCCESS(callback(iort_node, context)))
                        return iort_node;

                iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
                                         iort_node->length);
        }

        return NULL;
}

static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
                                            void *context)
{
        struct device *dev = context;
        acpi_status status = AE_NOT_FOUND;

        if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
            node->type == ACPI_IORT_NODE_IWB) {
                struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
                struct acpi_iort_named_component *ncomp;
                struct acpi_iort_iwb *iwb;
                struct device *cdev = dev;
                struct acpi_device *adev;
                const char *device_name;

                /*
                 * Walk the device tree to find a device with an
                 * ACPI companion; there is no point in scanning
                 * IORT for a device matching a named component or IWB if
                 * the device does not have an ACPI companion to
                 * start with.
                 */
                do {
                        adev = ACPI_COMPANION(cdev);
                        if (adev)
                                break;

                        cdev = cdev->parent;
                } while (cdev);

                if (!adev)
                        goto out;

                status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
                if (ACPI_FAILURE(status)) {
                        dev_warn(cdev, "Can't get device full path name\n");
                        goto out;
                }

                if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
                        ncomp = (struct acpi_iort_named_component *)node->node_data;
                        device_name = ncomp->device_name;
                } else {
                        iwb = (struct acpi_iort_iwb *)node->node_data;
                        device_name = iwb->device_name;
                }
                status = !strcmp(device_name, buf.pointer) ?  AE_OK : AE_NOT_FOUND;
                acpi_os_free(buf.pointer);
        } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
                struct acpi_iort_root_complex *pci_rc;
                struct pci_bus *bus;

                bus = to_pci_bus(dev);
                pci_rc = (struct acpi_iort_root_complex *)node->node_data;

                /*
                 * It is assumed that PCI segment numbers maps one-to-one
                 * with root complexes. Each segment number can represent only
                 * one root complex.
                 */
                status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
                                                        AE_OK : AE_NOT_FOUND;
        }
out:
        return status;
}

static acpi_status iort_match_iwb_callback(struct acpi_iort_node *node, void *context)
{
        struct acpi_iort_iwb *iwb;
        u32 *id = context;

        if (node->type != ACPI_IORT_NODE_IWB)
                return AE_NOT_FOUND;

        iwb = (struct acpi_iort_iwb *)node->node_data;
        if (iwb->iwb_index != *id)
                return AE_NOT_FOUND;

        return AE_OK;
}

static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
                       u32 *rid_out, bool check_overlap)
{
        /* Single mapping does not care for input id */
        if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
                if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
                    type == ACPI_IORT_NODE_IWB             ||
                    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
                        *rid_out = map->output_base;
                        return 0;
                }

                pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
                        map, type);
                return -ENXIO;
        }

        if (rid_in < map->input_base ||
            (rid_in > map->input_base + map->id_count))
                return -ENXIO;

        if (check_overlap) {
                /*
                 * We already found a mapping for this input ID at the end of
                 * another region. If it coincides with the start of this
                 * region, we assume the prior match was due to the off-by-1
                 * issue mentioned below, and allow it to be superseded.
                 * Otherwise, things are *really* broken, and we just disregard
                 * duplicate matches entirely to retain compatibility.
                 */
                pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
                       map, rid_in);
                if (rid_in != map->input_base)
                        return -ENXIO;

                pr_err(FW_BUG "applying workaround.\n");
        }

        *rid_out = map->output_base + (rid_in - map->input_base);

        /*
         * Due to confusion regarding the meaning of the id_count field (which
         * carries the number of IDs *minus 1*), we may have to disregard this
         * match if it is at the end of the range, and overlaps with the start
         * of another one.
         */
        if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
                return -EAGAIN;
        return 0;
}

static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
                                               u32 *id_out, int index)
{
        struct acpi_iort_node *parent;
        struct acpi_iort_id_mapping *map;

        if (!node->mapping_offset || !node->mapping_count ||
                                     index >= node->mapping_count)
                return NULL;

        map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
                           node->mapping_offset + index * sizeof(*map));

        /* Firmware bug! */
        if (!map->output_reference) {
                pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
                       node, node->type);
                return NULL;
        }

        parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
                               map->output_reference);

        if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
                if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
                    node->type == ACPI_IORT_NODE_IWB ||
                    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
                    node->type == ACPI_IORT_NODE_SMMU_V3 ||
                    node->type == ACPI_IORT_NODE_PMCG) {
                        *id_out = map->output_base;
                        return parent;
                }
        }

        return NULL;
}

#ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
#endif

static int iort_get_id_mapping_index(struct acpi_iort_node *node)
{
        struct acpi_iort_smmu_v3 *smmu;
        struct acpi_iort_pmcg *pmcg;

        switch (node->type) {
        case ACPI_IORT_NODE_SMMU_V3:
                /*
                 * SMMUv3 dev ID mapping index was introduced in revision 1
                 * table, not available in revision 0
                 */
                if (node->revision < 1)
                        return -EINVAL;

                smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
                /*
                 * Until IORT E.e (node rev. 5), the ID mapping index was
                 * defined to be valid unless all interrupts are GSIV-based.
                 */
                if (node->revision < 5) {
                        if (smmu->event_gsiv && smmu->pri_gsiv &&
                            smmu->gerr_gsiv && smmu->sync_gsiv)
                                return -EINVAL;
                } else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) {
                        return -EINVAL;
                }

                if (smmu->id_mapping_index >= node->mapping_count) {
                        pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
                               node, node->type);
                        return -EINVAL;
                }

                return smmu->id_mapping_index;
        case ACPI_IORT_NODE_PMCG:
                pmcg = (struct acpi_iort_pmcg *)node->node_data;
                if (pmcg->overflow_gsiv || node->mapping_count == 0)
                        return -EINVAL;

                return 0;
        default:
                return -EINVAL;
        }
}

static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
                                               u32 id_in, u32 *id_out,
                                               u8 type_mask)
{
        u32 id = id_in;

        /* Parse the ID mapping tree to find specified node type */
        while (node) {
                struct acpi_iort_id_mapping *map;
                int i, index, rc = 0;
                u32 out_ref = 0, map_id = id;

                if (IORT_TYPE_MASK(node->type) & type_mask) {
                        if (id_out)
                                *id_out = id;
                        return node;
                }

                if (!node->mapping_offset || !node->mapping_count)
                        goto fail_map;

                map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
                                   node->mapping_offset);

                /* Firmware bug! */
                if (!map->output_reference) {
                        pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
                               node, node->type);
                        goto fail_map;
                }

                /*
                 * Get the special ID mapping index (if any) and skip its
                 * associated ID map to prevent erroneous multi-stage
                 * IORT ID translations.
                 */
                index = iort_get_id_mapping_index(node);

                /* Do the ID translation */
                for (i = 0; i < node->mapping_count; i++, map++) {
                        /* if it is special mapping index, skip it */
                        if (i == index)
                                continue;

                        rc = iort_id_map(map, node->type, map_id, &id, out_ref);
                        if (!rc)
                                break;
                        if (rc == -EAGAIN)
                                out_ref = map->output_reference;
                }

                if (i == node->mapping_count && !out_ref)
                        goto fail_map;

                node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
                                    rc ? out_ref : map->output_reference);
        }

fail_map:
        /* Map input ID to output ID unchanged on mapping failure */
        if (id_out)
                *id_out = id_in;

        return NULL;
}

static struct acpi_iort_node *iort_node_map_platform_id(
                struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
                int index)
{
        struct acpi_iort_node *parent;
        u32 id;

        /* step 1: retrieve the initial dev id */
        parent = iort_node_get_id(node, &id, index);
        if (!parent)
                return NULL;

        /*
         * optional step 2: map the initial dev id if its parent is not
         * the target type we want, map it again for the use cases such
         * as NC (named component) -> SMMU -> ITS. If the type is matched,
         * return the initial dev id and its parent pointer directly.
         */
        if (!(IORT_TYPE_MASK(parent->type) & type_mask))
                parent = iort_node_map_id(parent, id, id_out, type_mask);
        else
                if (id_out)
                        *id_out = id;

        return parent;
}

static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
{
        struct pci_bus *pbus;

        if (!dev_is_pci(dev)) {
                struct acpi_iort_node *node;
                /*
                 * scan iort_fwnode_list to see if it's an iort platform
                 * device (such as SMMU, PMCG),its iort node already cached
                 * and associated with fwnode when iort platform devices
                 * were initialized.
                 */
                node = iort_get_iort_node(dev->fwnode);
                if (node)
                        return node;
                /*
                 * if not, then it should be a platform device defined in
                 * DSDT/SSDT (with Named Component node in IORT) or an
                 * IWB device in the DSDT/SSDT.
                 */
                node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
                                      iort_match_node_callback, dev);
                if (node)
                        return node;
                return iort_scan_node(ACPI_IORT_NODE_IWB,
                                      iort_match_node_callback, dev);
        }

        pbus = to_pci_dev(dev)->bus;

        return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
                              iort_match_node_callback, &pbus->dev);
}

/**
 * iort_msi_map_id() - Map a MSI input ID for a device
 * @dev: The device for which the mapping is to be done.
 * @input_id: The device input ID.
 *
 * Returns: mapped MSI ID on success, input ID otherwise
 */
u32 iort_msi_map_id(struct device *dev, u32 input_id)
{
        struct acpi_iort_node *node;
        u32 dev_id;

        node = iort_find_dev_node(dev);
        if (!node)
                return input_id;

        iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
        return dev_id;
}

/**
 * iort_msi_xlate() - Map a MSI input ID for a device
 * @dev: The device for which the mapping is to be done.
 * @input_id: The device input ID.
 * @fwnode: Pointer to store the fwnode.
 *
 * Returns: mapped MSI ID on success, input ID otherwise
 *          On success, the fwnode pointer is initialized to the MSI
 *          controller fwnode handle.
 */
u32 iort_msi_xlate(struct device *dev, u32 input_id, struct fwnode_handle **fwnode)
{
        struct acpi_iort_its_group *its;
        struct acpi_iort_node *node;
        u32 dev_id;

        node = iort_find_dev_node(dev);
        if (!node)
                return input_id;

        node = iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
        if (!node)
                return input_id;

        /* Move to ITS specific data */
        its = (struct acpi_iort_its_group *)node->node_data;

        *fwnode = iort_find_domain_token(its->identifiers[0]);

        return dev_id;
}

int iort_its_translate_pa(struct fwnode_handle *node, phys_addr_t *base)
{
        struct iort_its_msi_chip *its_msi_chip;
        int ret = -ENODEV;

        spin_lock(&iort_msi_chip_lock);
        list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
                if (its_msi_chip->fw_node == node) {
                        *base = its_msi_chip->base_addr;
                        ret = 0;
                        break;
                }
        }
        spin_unlock(&iort_msi_chip_lock);

        return ret;
}

static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
{
        struct fwnode_handle *fwnode = iort_find_domain_token(its_id);

        if (!fwnode)
                return -ENODEV;

        return iort_its_translate_pa(fwnode, base);
}

/**
 * iort_pmsi_get_msi_info() - Get the device id and translate frame PA for a device
 * @dev: The device for which the mapping is to be done.
 * @dev_id: The device ID found.
 * @pa: optional pointer to store translate frame address.
 *
 * Returns: 0 for successful devid and pa retrieval, -ENODEV on error
 */
int iort_pmsi_get_msi_info(struct device *dev, u32 *dev_id, phys_addr_t *pa)
{
        struct acpi_iort_node *node, *parent = NULL;
        struct acpi_iort_its_group *its;
        int i, index;

        node = iort_find_dev_node(dev);
        if (!node)
                return -ENODEV;

        index = iort_get_id_mapping_index(node);
        /* if there is a valid index, go get the dev_id directly */
        if (index >= 0) {
                parent = iort_node_get_id(node, dev_id, index);
        } else {
                for (i = 0; i < node->mapping_count; i++) {
                        parent = iort_node_map_platform_id(node, dev_id,
                                                      IORT_MSI_TYPE, i);
                        if (parent)
                                break;
                }
        }

        if (!parent)
                return -ENODEV;

        if (pa) {
                int ret;

                its = (struct acpi_iort_its_group *)node->node_data;
                ret = iort_find_its_base(its->identifiers[0], pa);
                if (ret)
                        return ret;
        }

        return 0;
}

/**
 * iort_dev_find_its_id() - Find the ITS identifier for a device
 * @dev: The device.
 * @id: Device's ID
 * @idx: Index of the ITS identifier list.
 * @its_id: ITS identifier.
 *
 * Returns: 0 on success, appropriate error value otherwise
 */
static int iort_dev_find_its_id(struct device *dev, u32 id,
                                unsigned int idx, int *its_id)
{
        struct acpi_iort_its_group *its;
        struct acpi_iort_node *node;

        node = iort_find_dev_node(dev);
        if (!node)
                return -ENXIO;

        node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
        if (!node)
                return -ENXIO;

        /* Move to ITS specific data */
        its = (struct acpi_iort_its_group *)node->node_data;
        if (idx >= its->its_count) {
                dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
                        idx, its->its_count);
                return -ENXIO;
        }

        *its_id = its->identifiers[idx];
        return 0;
}

/**
 * iort_get_device_domain() - Find MSI domain related to a device
 * @dev: The device.
 * @id: Requester ID for the device.
 * @bus_token: irq domain bus token.
 *
 * Returns: the MSI domain for this device, NULL otherwise
 */
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
                                          enum irq_domain_bus_token bus_token)
{
        struct fwnode_handle *handle;
        int its_id;

        if (iort_dev_find_its_id(dev, id, 0, &its_id))
                return NULL;

        handle = iort_find_domain_token(its_id);
        if (!handle)
                return NULL;

        return irq_find_matching_fwnode(handle, bus_token);
}

struct fwnode_handle *iort_iwb_handle(u32 iwb_id)
{
        struct fwnode_handle *fwnode;
        struct acpi_iort_node *node;
        struct acpi_device *device;
        struct acpi_iort_iwb *iwb;
        acpi_status status;
        acpi_handle handle;

        /* find its associated IWB node */
        node = iort_scan_node(ACPI_IORT_NODE_IWB, iort_match_iwb_callback, &iwb_id);
        if (!node)
                return NULL;

        iwb = (struct acpi_iort_iwb *)node->node_data;
        status = acpi_get_handle(NULL, iwb->device_name, &handle);
        if (ACPI_FAILURE(status))
                return NULL;

        device = acpi_get_acpi_dev(handle);
        if (!device)
                return NULL;

        fwnode = acpi_fwnode_handle(device);
        acpi_put_acpi_dev(device);

        return fwnode;
}

static void iort_set_device_domain(struct device *dev,
                                   struct acpi_iort_node *node)
{
        struct acpi_iort_its_group *its;
        struct acpi_iort_node *msi_parent;
        struct acpi_iort_id_mapping *map;
        struct fwnode_handle *iort_fwnode;
        struct irq_domain *domain;
        int index;

        index = iort_get_id_mapping_index(node);
        if (index < 0)
                return;

        map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
                           node->mapping_offset + index * sizeof(*map));

        /* Firmware bug! */
        if (!map->output_reference ||
            !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
                pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
                       node, node->type);
                return;
        }

        msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
                                  map->output_reference);

        if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
                return;

        /* Move to ITS specific data */
        its = (struct acpi_iort_its_group *)msi_parent->node_data;

        iort_fwnode = iort_find_domain_token(its->identifiers[0]);
        if (!iort_fwnode)
                return;

        domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
        if (domain)
                dev_set_msi_domain(dev, domain);
}

/**
 * iort_get_platform_device_domain() - Find MSI domain related to a
 * platform device
 * @dev: the dev pointer associated with the platform device
 *
 * Returns: the MSI domain for this device, NULL otherwise
 */
static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
{
        struct acpi_iort_node *node, *msi_parent = NULL;
        struct fwnode_handle *iort_fwnode;
        struct acpi_iort_its_group *its;
        int i;

        /* find its associated iort node */
        node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
                              iort_match_node_callback, dev);
        if (!node) {
                /* find its associated iort node */
                node = iort_scan_node(ACPI_IORT_NODE_IWB,
                                      iort_match_node_callback, dev);

                if (!node)
                        return NULL;
        }

        /* then find its msi parent node */
        for (i = 0; i < node->mapping_count; i++) {
                msi_parent = iort_node_map_platform_id(node, NULL,
                                                       IORT_MSI_TYPE, i);
                if (msi_parent)
                        break;
        }

        if (!msi_parent)
                return NULL;

        /* Move to ITS specific data */
        its = (struct acpi_iort_its_group *)msi_parent->node_data;

        iort_fwnode = iort_find_domain_token(its->identifiers[0]);
        if (!iort_fwnode)
                return NULL;

        return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
}

void acpi_configure_pmsi_domain(struct device *dev)
{
        struct irq_domain *msi_domain;

        msi_domain = iort_get_platform_device_domain(dev);
        if (msi_domain)
                dev_set_msi_domain(dev, msi_domain);
}

#ifdef CONFIG_IOMMU_API
static void iort_rmr_free(struct device *dev,
                          struct iommu_resv_region *region)
{
        struct iommu_iort_rmr_data *rmr_data;

        rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
        kfree(rmr_data->sids);
        kfree(rmr_data);
}

static struct iommu_iort_rmr_data *iort_rmr_alloc(
                                        struct acpi_iort_rmr_desc *rmr_desc,
                                        int prot, enum iommu_resv_type type,
                                        u32 *sids, u32 num_sids)
{
        struct iommu_iort_rmr_data *rmr_data;
        struct iommu_resv_region *region;
        u32 *sids_copy;
        u64 addr = rmr_desc->base_address, size = rmr_desc->length;

        rmr_data = kmalloc_obj(*rmr_data);
        if (!rmr_data)
                return NULL;

        /* Create a copy of SIDs array to associate with this rmr_data */
        sids_copy = kmemdup_array(sids, num_sids, sizeof(*sids), GFP_KERNEL);
        if (!sids_copy) {
                kfree(rmr_data);
                return NULL;
        }
        rmr_data->sids = sids_copy;
        rmr_data->num_sids = num_sids;

        if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
                /* PAGE align base addr and size */
                addr &= PAGE_MASK;
                size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));

                pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
                       rmr_desc->base_address,
                       rmr_desc->base_address + rmr_desc->length - 1,
                       addr, addr + size - 1);
        }

        region = &rmr_data->rr;
        INIT_LIST_HEAD(&region->list);
        region->start = addr;
        region->length = size;
        region->prot = prot;
        region->type = type;
        region->free = iort_rmr_free;

        return rmr_data;
}

static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
                                        u32 count)
{
        int i, j;

        for (i = 0; i < count; i++) {
                u64 end, start = desc[i].base_address, length = desc[i].length;

                if (!length) {
                        pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
                               start);
                        continue;
                }

                end = start + length - 1;

                /* Check for address overlap */
                for (j = i + 1; j < count; j++) {
                        u64 e_start = desc[j].base_address;
                        u64 e_end = e_start + desc[j].length - 1;

                        if (start <= e_end && end >= e_start)
                                pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
                                       start, end);
                }
        }
}

/*
 * Please note, we will keep the already allocated RMR reserve
 * regions in case of a memory allocation failure.
 */
static void iort_get_rmrs(struct acpi_iort_node *node,
                          struct acpi_iort_node *smmu,
                          u32 *sids, u32 num_sids,
                          struct list_head *head)
{
        struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
        struct acpi_iort_rmr_desc *rmr_desc;
        int i;

        rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
                                rmr->rmr_offset);

        iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);

        for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
                struct iommu_iort_rmr_data *rmr_data;
                enum iommu_resv_type type;
                int prot = IOMMU_READ | IOMMU_WRITE;

                if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
                        type = IOMMU_RESV_DIRECT_RELAXABLE;
                else
                        type = IOMMU_RESV_DIRECT;

                if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
                        prot |= IOMMU_PRIV;

                /* Attributes 0x00 - 0x03 represents device memory */
                if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
                                ACPI_IORT_RMR_ATTR_DEVICE_GRE)
                        prot |= IOMMU_MMIO;
                else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
                                ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
                        prot |= IOMMU_CACHE;

                rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
                                          sids, num_sids);
                if (!rmr_data)
                        return;

                list_add_tail(&rmr_data->rr.list, head);
        }
}

static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
                                u32 new_count)
{
        u32 *new_sids;
        u32 total_count = count + new_count;
        int i;

        new_sids = krealloc_array(sids, count + new_count,
                                  sizeof(*new_sids), GFP_KERNEL);
        if (!new_sids) {
                kfree(sids);
                return NULL;
        }

        for (i = count; i < total_count; i++)
                new_sids[i] = id_start++;

        return new_sids;
}

static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
                             u32 id_count)
{
        int i;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);

        /*
         * Make sure the kernel has preserved the boot firmware PCIe
         * configuration. This is required to ensure that the RMR PCIe
         * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
         */
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
                struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);

                if (!host->preserve_config)
                        return false;
        }

        for (i = 0; i < fwspec->num_ids; i++) {
                if (fwspec->ids[i] >= id_start &&
                    fwspec->ids[i] <= id_start + id_count)
                        return true;
        }

        return false;
}

static void iort_node_get_rmr_info(struct acpi_iort_node *node,
                                   struct acpi_iort_node *iommu,
                                   struct device *dev, struct list_head *head)
{
        struct acpi_iort_node *smmu = NULL;
        struct acpi_iort_rmr *rmr;
        struct acpi_iort_id_mapping *map;
        u32 *sids = NULL;
        u32 num_sids = 0;
        int i;

        if (!node->mapping_offset || !node->mapping_count) {
                pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
                       node);
                return;
        }

        rmr = (struct acpi_iort_rmr *)node->node_data;
        if (!rmr->rmr_offset || !rmr->rmr_count)
                return;

        map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
                           node->mapping_offset);

        /*
         * Go through the ID mappings and see if we have a match for SMMU
         * and dev(if !NULL). If found, get the sids for the Node.
         * Please note, id_count is equal to the number of IDs  in the
         * range minus one.
         */
        for (i = 0; i < node->mapping_count; i++, map++) {
                struct acpi_iort_node *parent;

                parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
                                      map->output_reference);
                if (parent != iommu)
                        continue;

                /* If dev is valid, check RMR node corresponds to the dev SID */
                if (dev && !iort_rmr_has_dev(dev, map->output_base,
                                             map->id_count))
                        continue;

                /* Retrieve SIDs associated with the Node. */
                sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
                                           map->id_count + 1);
                if (!sids)
                        return;

                num_sids += map->id_count + 1;
        }

        if (!sids)
                return;

        iort_get_rmrs(node, smmu, sids, num_sids, head);
        kfree(sids);
}

static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
                           struct list_head *head)
{
        struct acpi_table_iort *iort;
        struct acpi_iort_node *iort_node, *iort_end;
        int i;

        /* Only supports ARM DEN 0049E.d onwards */
        if (iort_table->revision < 5)
                return;

        iort = (struct acpi_table_iort *)iort_table;

        iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
                                 iort->node_offset);
        iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
                                iort_table->length);

        for (i = 0; i < iort->node_count; i++) {
                if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
                               "IORT node pointer overflows, bad table!\n"))
                        return;

                if (iort_node->type == ACPI_IORT_NODE_RMR)
                        iort_node_get_rmr_info(iort_node, iommu, dev, head);

                iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
                                         iort_node->length);
        }
}

/*
 * Populate the RMR list associated with a given IOMMU and dev(if provided).
 * If dev is NULL, the function populates all the RMRs associated with the
 * given IOMMU.
 */
static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
                                            struct device *dev,
                                            struct list_head *head)
{
        struct acpi_iort_node *iommu;

        iommu = iort_get_iort_node(iommu_fwnode);
        if (!iommu)
                return;

        iort_find_rmrs(iommu, dev, head);
}

static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
        struct acpi_iort_node *iommu;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);

        iommu = iort_get_iort_node(fwspec->iommu_fwnode);

        if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
                struct acpi_iort_smmu_v3 *smmu;

                smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
                if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
                        return iommu;
        }

        return NULL;
}

/*
 * Retrieve platform specific HW MSI reserve regions.
 * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
 * associated with the device are the HW MSI reserved regions.
 */
static void iort_iommu_msi_get_resv_regions(struct device *dev,
                                            struct list_head *head)
{
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
        struct acpi_iort_its_group *its;
        struct acpi_iort_node *iommu_node, *its_node = NULL;
        int i;

        iommu_node = iort_get_msi_resv_iommu(dev);
        if (!iommu_node)
                return;

        /*
         * Current logic to reserve ITS regions relies on HW topologies
         * where a given PCI or named component maps its IDs to only one
         * ITS group; if a PCI or named component can map its IDs to
         * different ITS groups through IORT mappings this function has
         * to be reworked to ensure we reserve regions for all ITS groups
         * a given PCI or named component may map IDs to.
         */

        for (i = 0; i < fwspec->num_ids; i++) {
                its_node = iort_node_map_id(iommu_node,
                                        fwspec->ids[i],
                                        NULL, IORT_MSI_TYPE);
                if (its_node)
                        break;
        }

        if (!its_node)
                return;

        /* Move to ITS specific data */
        its = (struct acpi_iort_its_group *)its_node->node_data;

        for (i = 0; i < its->its_count; i++) {
                phys_addr_t base;

                if (!iort_find_its_base(its->identifiers[i], &base)) {
                        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
                        struct iommu_resv_region *region;

                        region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
                                                         prot, IOMMU_RESV_MSI,
                                                         GFP_KERNEL);
                        if (region)
                                list_add_tail(&region->list, head);
                }
        }
}

/**
 * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
 * @dev: Device from iommu_get_resv_regions()
 * @head: Reserved region list from iommu_get_resv_regions()
 */
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
{
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);

        iort_iommu_msi_get_resv_regions(dev, head);
        iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
}

/**
 * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
 *                     associated StreamIDs information.
 * @iommu_fwnode: fwnode associated with IOMMU
 * @head: Resereved region list
 */
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
                       struct list_head *head)
{
        iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
}
EXPORT_SYMBOL_GPL(iort_get_rmr_sids);

/**
 * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
 * @iommu_fwnode: fwnode associated with IOMMU
 * @head: Resereved region list
 */
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
                       struct list_head *head)
{
        struct iommu_resv_region *entry, *next;

        list_for_each_entry_safe(entry, next, head, list)
                entry->free(NULL, entry);
}
EXPORT_SYMBOL_GPL(iort_put_rmr_sids);

static inline bool iort_iommu_driver_enabled(u8 type)
{
        switch (type) {
        case ACPI_IORT_NODE_SMMU_V3:
                return IS_ENABLED(CONFIG_ARM_SMMU_V3);
        case ACPI_IORT_NODE_SMMU:
                return IS_ENABLED(CONFIG_ARM_SMMU);
        default:
                pr_warn("IORT node type %u does not describe an SMMU\n", type);
                return false;
        }
}

static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
{
        struct acpi_iort_root_complex *pci_rc;

        pci_rc = (struct acpi_iort_root_complex *)node->node_data;
        return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}

static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node)
{
        struct acpi_iort_memory_access *memory_access;
        struct acpi_iort_root_complex *pci_rc;

        pci_rc = (struct acpi_iort_root_complex *)node->node_data;
        memory_access =
                (struct acpi_iort_memory_access *)&pci_rc->memory_properties;
        return memory_access->memory_flags & ACPI_IORT_MF_CANWBS;
}

static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
                            u32 streamid)
{
        struct fwnode_handle *iort_fwnode;

        /* If there's no SMMU driver at all, give up now */
        if (!node || !iort_iommu_driver_enabled(node->type))
                return -ENODEV;

        iort_fwnode = iort_get_fwnode(node);
        if (!iort_fwnode)
                return -ENODEV;

        /*
         * If the SMMU drivers are enabled but not loaded/probed
         * yet, this will defer.
         */
        return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode);
}

struct iort_pci_alias_info {
        struct device *dev;
        struct acpi_iort_node *node;
};

static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
{
        struct iort_pci_alias_info *info = data;
        struct acpi_iort_node *parent;
        u32 streamid;

        parent = iort_node_map_id(info->node, alias, &streamid,
                                  IORT_IOMMU_TYPE);
        return iort_iommu_xlate(info->dev, parent, streamid);
}

static void iort_named_component_init(struct device *dev,
                                      struct acpi_iort_node *node)
{
        struct property_entry props[3] = {};
        struct acpi_iort_named_component *nc;

        nc = (struct acpi_iort_named_component *)node->node_data;
        props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
                                      FIELD_GET(ACPI_IORT_NC_PASID_BITS,
                                                nc->node_flags));
        if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
                props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");

        if (device_create_managed_software_node(dev, props, NULL))
                dev_warn(dev, "Could not add device properties\n");
}

static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
{
        struct acpi_iort_node *parent;
        int err = -ENODEV, i = 0;
        u32 streamid = 0;

        do {

                parent = iort_node_map_platform_id(node, &streamid,
                                                   IORT_IOMMU_TYPE,
                                                   i++);

                if (parent)
                        err = iort_iommu_xlate(dev, parent, streamid);
        } while (parent && !err);

        return err;
}

static int iort_nc_iommu_map_id(struct device *dev,
                                struct acpi_iort_node *node,
                                const u32 *in_id)
{
        struct acpi_iort_node *parent;
        u32 streamid;

        parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
        if (parent)
                return iort_iommu_xlate(dev, parent, streamid);

        return -ENODEV;
}


/**
 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
 *
 * @dev: device to configure
 * @id_in: optional input id const value pointer
 *
 * Returns: 0 on success, <0 on failure
 */
int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
{
        struct acpi_iort_node *node;
        int err = -ENODEV;

        if (dev_is_pci(dev)) {
                struct iommu_fwspec *fwspec;
                struct pci_bus *bus = to_pci_dev(dev)->bus;
                struct iort_pci_alias_info info = { .dev = dev };

                node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
                                      iort_match_node_callback, &bus->dev);
                if (!node)
                        return -ENODEV;

                info.node = node;
                err = pci_for_each_dma_alias(to_pci_dev(dev),
                                             iort_pci_iommu_init, &info);

                fwspec = dev_iommu_fwspec_get(dev);
                if (fwspec && iort_pci_rc_supports_ats(node))
                        fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
                if (fwspec && iort_pci_rc_supports_canwbs(node))
                        fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS;
        } else {
                node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
                                      iort_match_node_callback, dev);
                if (!node)
                        return -ENODEV;

                err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
                              iort_nc_iommu_map(dev, node);

                if (!err)
                        iort_named_component_init(dev, node);
        }

        return err;
}

#else
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
{ }
int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
{ return -ENODEV; }
#endif

static int nc_dma_get_range(struct device *dev, u64 *limit)
{
        struct acpi_iort_node *node;
        struct acpi_iort_named_component *ncomp;

        node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
                              iort_match_node_callback, dev);
        if (!node)
                return -ENODEV;

        ncomp = (struct acpi_iort_named_component *)node->node_data;

        if (!ncomp->memory_address_limit) {
                pr_warn(FW_BUG "Named component missing memory address limit\n");
                return -EINVAL;
        }

        *limit = ncomp->memory_address_limit >= 64 ? U64_MAX :
                        (1ULL << ncomp->memory_address_limit) - 1;

        return 0;
}

static int rc_dma_get_range(struct device *dev, u64 *limit)
{
        struct acpi_iort_node *node;
        struct acpi_iort_root_complex *rc;
        struct pci_bus *pbus = to_pci_dev(dev)->bus;

        node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
                              iort_match_node_callback, &pbus->dev);
        if (!node || node->revision < 1)
                return -ENODEV;

        rc = (struct acpi_iort_root_complex *)node->node_data;

        if (!rc->memory_address_limit) {
                pr_warn(FW_BUG "Root complex missing memory address limit\n");
                return -EINVAL;
        }

        *limit = rc->memory_address_limit >= 64 ? U64_MAX :
                        (1ULL << rc->memory_address_limit) - 1;

        return 0;
}

/**
 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
 * @dev: device to lookup
 * @limit: DMA limit result pointer
 *
 * Return: 0 on success, an error otherwise.
 */
int iort_dma_get_ranges(struct device *dev, u64 *limit)
{
        if (dev_is_pci(dev))
                return rc_dma_get_range(dev, limit);
        else
                return nc_dma_get_range(dev, limit);
}

static void __init acpi_iort_register_irq(int hwirq, const char *name,
                                          int trigger,
                                          struct resource *res)
{
        int irq = acpi_register_gsi(NULL, hwirq, trigger,
                                    ACPI_ACTIVE_HIGH);

        if (irq <= 0) {
                pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
                                                                      name);
                return;
        }

        res->start = irq;
        res->end = irq;
        res->flags = IORESOURCE_IRQ;
        res->name = name;
}

static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
{
        struct acpi_iort_smmu_v3 *smmu;
        /* Always present mem resource */
        int num_res = 1;

        /* Retrieve SMMUv3 specific data */
        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;

        if (smmu->event_gsiv)
                num_res++;

        if (smmu->pri_gsiv)
                num_res++;

        if (smmu->gerr_gsiv)
                num_res++;

        if (smmu->sync_gsiv)
                num_res++;

        return num_res;
}

static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
{
        /*
         * Cavium ThunderX2 implementation doesn't not support unique
         * irq line. Use single irq line for all the SMMUv3 interrupts.
         */
        if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
                return false;

        /*
         * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
         * SPI numbers here.
         */
        return smmu->event_gsiv == smmu->pri_gsiv &&
               smmu->event_gsiv == smmu->gerr_gsiv &&
               smmu->event_gsiv == smmu->sync_gsiv;
}

static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
{
        /*
         * Override the size, for Cavium ThunderX2 implementation
         * which doesn't support the page 1 SMMU register space.
         */
        if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
                return SZ_64K;

        return SZ_128K;
}

static void __init arm_smmu_v3_init_resources(struct resource *res,
                                              struct acpi_iort_node *node)
{
        struct acpi_iort_smmu_v3 *smmu;
        int num_res = 0;

        /* Retrieve SMMUv3 specific data */
        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;

        res[num_res].start = smmu->base_address;
        res[num_res].end = smmu->base_address +
                                arm_smmu_v3_resource_size(smmu) - 1;
        res[num_res].flags = IORESOURCE_MEM;

        num_res++;
        if (arm_smmu_v3_is_combined_irq(smmu)) {
                if (smmu->event_gsiv)
                        acpi_iort_register_irq(smmu->event_gsiv, "combined",
                                               ACPI_EDGE_SENSITIVE,
                                               &res[num_res++]);
        } else {

                if (smmu->event_gsiv)
                        acpi_iort_register_irq(smmu->event_gsiv, "eventq",
                                               ACPI_EDGE_SENSITIVE,
                                               &res[num_res++]);

                if (smmu->pri_gsiv)
                        acpi_iort_register_irq(smmu->pri_gsiv, "priq",
                                               ACPI_EDGE_SENSITIVE,
                                               &res[num_res++]);

                if (smmu->gerr_gsiv)
                        acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
                                               ACPI_EDGE_SENSITIVE,
                                               &res[num_res++]);

                if (smmu->sync_gsiv)
                        acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
                                               ACPI_EDGE_SENSITIVE,
                                               &res[num_res++]);
        }
}

static void __init arm_smmu_v3_dma_configure(struct device *dev,
                                             struct acpi_iort_node *node)
{
        struct acpi_iort_smmu_v3 *smmu;
        enum dev_dma_attr attr;

        /* Retrieve SMMUv3 specific data */
        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;

        attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
                        DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;

        /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
        dev->dma_mask = &dev->coherent_dma_mask;

        /* Configure DMA for the page table walker */
        acpi_dma_configure(dev, attr);
}

#if defined(CONFIG_ACPI_NUMA)
/*
 * set numa proximity domain for smmuv3 device
 */
static int  __init arm_smmu_v3_set_proximity(struct device *dev,
                                              struct acpi_iort_node *node)
{
        struct acpi_iort_smmu_v3 *smmu;

        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
        if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
                int dev_node = pxm_to_node(smmu->pxm);

                if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
                        return -EINVAL;

                set_dev_node(dev, dev_node);
                pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
                        smmu->base_address,
                        smmu->pxm);
        }
        return 0;
}
#else
#define arm_smmu_v3_set_proximity NULL
#endif

static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
{
        struct acpi_iort_smmu *smmu;

        /* Retrieve SMMU specific data */
        smmu = (struct acpi_iort_smmu *)node->node_data;

        /*
         * Only consider the global fault interrupt and ignore the
         * configuration access interrupt.
         *
         * MMIO address and global fault interrupt resources are always
         * present so add them to the context interrupt count as a static
         * value.
         */
        return smmu->context_interrupt_count + 2;
}

static void __init arm_smmu_init_resources(struct resource *res,
                                           struct acpi_iort_node *node)
{
        struct acpi_iort_smmu *smmu;
        int i, hw_irq, trigger, num_res = 0;
        u64 *ctx_irq, *glb_irq;

        /* Retrieve SMMU specific data */
        smmu = (struct acpi_iort_smmu *)node->node_data;

        res[num_res].start = smmu->base_address;
        res[num_res].end = smmu->base_address + smmu->span - 1;
        res[num_res].flags = IORESOURCE_MEM;
        num_res++;

        glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
        /* Global IRQs */
        hw_irq = IORT_IRQ_MASK(glb_irq[0]);
        trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);

        acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
                                     &res[num_res++]);

        /* Context IRQs */
        ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
        for (i = 0; i < smmu->context_interrupt_count; i++) {
                hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
                trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);

                acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
                                       &res[num_res++]);
        }
}

static void __init arm_smmu_dma_configure(struct device *dev,
                                          struct acpi_iort_node *node)
{
        struct acpi_iort_smmu *smmu;
        enum dev_dma_attr attr;

        /* Retrieve SMMU specific data */
        smmu = (struct acpi_iort_smmu *)node->node_data;

        attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
                        DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;

        /* We expect the dma masks to be equivalent for SMMU set-ups */
        dev->dma_mask = &dev->coherent_dma_mask;

        /* Configure DMA for the page table walker */
        acpi_dma_configure(dev, attr);
}

static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
{
        struct acpi_iort_pmcg *pmcg;

        /* Retrieve PMCG specific data */
        pmcg = (struct acpi_iort_pmcg *)node->node_data;

        /*
         * There are always 2 memory resources.
         * If the overflow_gsiv is present then add that for a total of 3.
         */
        return pmcg->overflow_gsiv ? 3 : 2;
}

static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
                                                   struct acpi_iort_node *node)
{
        struct acpi_iort_pmcg *pmcg;

        /* Retrieve PMCG specific data */
        pmcg = (struct acpi_iort_pmcg *)node->node_data;

        res[0].start = pmcg->page0_base_address;
        res[0].end = pmcg->page0_base_address + SZ_4K - 1;
        res[0].flags = IORESOURCE_MEM;
        /*
         * The initial version in DEN0049C lacked a way to describe register
         * page 1, which makes it broken for most PMCG implementations; in
         * that case, just let the driver fail gracefully if it expects to
         * find a second memory resource.
         */
        if (node->revision > 0) {
                res[1].start = pmcg->page1_base_address;
                res[1].end = pmcg->page1_base_address + SZ_4K - 1;
                res[1].flags = IORESOURCE_MEM;
        }

        if (pmcg->overflow_gsiv)
                acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
                                       ACPI_EDGE_SENSITIVE, &res[2]);
}

static struct acpi_platform_list pmcg_plat_info[] __initdata = {
        /* HiSilicon Hip08 Platform */
        {"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
         "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
        /* HiSilicon Hip09 Platform */
        {"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
         "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
        {"HISI  ", "HIP09A  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
         "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
        /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
        {"HISI  ", "HIP10   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
         "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
        {"HISI  ", "HIP10C  ", 0, ACPI_SIG_IORT, greater_than_or_equal,
         "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
        {"HISI  ", "HIP11   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
         "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
        { }
};

static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
{
        u32 model;
        int idx;

        idx = acpi_match_platform_list(pmcg_plat_info);
        if (idx >= 0)
                model = pmcg_plat_info[idx].data;
        else
                model = IORT_SMMU_V3_PMCG_GENERIC;

        return platform_device_add_data(pdev, &model, sizeof(model));
}

struct iort_dev_config {
        const char *name;
        int (*dev_init)(struct acpi_iort_node *node);
        void (*dev_dma_configure)(struct device *dev,
                                  struct acpi_iort_node *node);
        int (*dev_count_resources)(struct acpi_iort_node *node);
        void (*dev_init_resources)(struct resource *res,
                                     struct acpi_iort_node *node);
        int (*dev_set_proximity)(struct device *dev,
                                    struct acpi_iort_node *node);
        int (*dev_add_platdata)(struct platform_device *pdev);
};

static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
        .name = "arm-smmu-v3",
        .dev_dma_configure = arm_smmu_v3_dma_configure,
        .dev_count_resources = arm_smmu_v3_count_resources,
        .dev_init_resources = arm_smmu_v3_init_resources,
        .dev_set_proximity = arm_smmu_v3_set_proximity,
};

static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
        .name = "arm-smmu",
        .dev_dma_configure = arm_smmu_dma_configure,
        .dev_count_resources = arm_smmu_count_resources,
        .dev_init_resources = arm_smmu_init_resources,
};

static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
        .name = "arm-smmu-v3-pmcg",
        .dev_count_resources = arm_smmu_v3_pmcg_count_resources,
        .dev_init_resources = arm_smmu_v3_pmcg_init_resources,
        .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
};

static __init const struct iort_dev_config *iort_get_dev_cfg(
                        struct acpi_iort_node *node)
{
        switch (node->type) {
        case ACPI_IORT_NODE_SMMU_V3:
                return &iort_arm_smmu_v3_cfg;
        case ACPI_IORT_NODE_SMMU:
                return &iort_arm_smmu_cfg;
        case ACPI_IORT_NODE_PMCG:
                return &iort_arm_smmu_v3_pmcg_cfg;
        default:
                return NULL;
        }
}

/**
 * iort_add_platform_device() - Allocate a platform device for IORT node
 * @node: Pointer to device ACPI IORT node
 * @ops: Pointer to IORT device config struct
 *
 * Returns: 0 on success, <0 failure
 */
static int __init iort_add_platform_device(struct acpi_iort_node *node,
                                           const struct iort_dev_config *ops)
{
        struct fwnode_handle *fwnode;
        struct platform_device *pdev;
        struct resource *r;
        int ret, count;

        pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
        if (!pdev)
                return -ENOMEM;

        if (ops->dev_set_proximity) {
                ret = ops->dev_set_proximity(&pdev->dev, node);
                if (ret)
                        goto dev_put;
        }

        count = ops->dev_count_resources(node);

        r = kzalloc_objs(*r, count);
        if (!r) {
                ret = -ENOMEM;
                goto dev_put;
        }

        ops->dev_init_resources(r, node);

        ret = platform_device_add_resources(pdev, r, count);
        /*
         * Resources are duplicated in platform_device_add_resources,
         * free their allocated memory
         */
        kfree(r);

        if (ret)
                goto dev_put;

        /*
         * Platform devices based on PMCG nodes uses platform_data to
         * pass the hardware model info to the driver. For others, add
         * a copy of IORT node pointer to platform_data to be used to
         * retrieve IORT data information.
         */
        if (ops->dev_add_platdata)
                ret = ops->dev_add_platdata(pdev);
        else
                ret = platform_device_add_data(pdev, &node, sizeof(node));

        if (ret)
                goto dev_put;

        fwnode = iort_get_fwnode(node);

        if (!fwnode) {
                ret = -ENODEV;
                goto dev_put;
        }

        pdev->dev.fwnode = fwnode;

        if (ops->dev_dma_configure)
                ops->dev_dma_configure(&pdev->dev, node);

        iort_set_device_domain(&pdev->dev, node);

        ret = platform_device_add(pdev);
        if (ret)
                goto dma_deconfigure;

        return 0;

dma_deconfigure:
        arch_teardown_dma_ops(&pdev->dev);
dev_put:
        platform_device_put(pdev);

        return ret;
}

#ifdef CONFIG_PCI
static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
{
        static bool acs_enabled __initdata;

        if (acs_enabled)
                return;

        if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
                struct acpi_iort_node *parent;
                struct acpi_iort_id_mapping *map;
                int i;

                map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
                                   iort_node->mapping_offset);

                for (i = 0; i < iort_node->mapping_count; i++, map++) {
                        if (!map->output_reference)
                                continue;

                        parent = ACPI_ADD_PTR(struct acpi_iort_node,
                                        iort_table,  map->output_reference);
                        /*
                         * If we detect a RC->SMMU mapping, make sure
                         * we enable ACS on the system.
                         */
                        if ((parent->type == ACPI_IORT_NODE_SMMU) ||
                                (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
                                pci_request_acs();
                                acs_enabled = true;
                                return;
                        }
                }
        }
}
#else
static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
#endif

static void __init iort_init_platform_devices(void)
{
        struct acpi_iort_node *iort_node, *iort_end;
        struct acpi_table_iort *iort;
        struct fwnode_handle *fwnode;
        int i, ret;
        const struct iort_dev_config *ops;

        /*
         * iort_table and iort both point to the start of IORT table, but
         * have different struct types
         */
        iort = (struct acpi_table_iort *)iort_table;

        /* Get the first IORT node */
        iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
                                 iort->node_offset);
        iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
                                iort_table->length);

        for (i = 0; i < iort->node_count; i++) {
                if (iort_node >= iort_end) {
                        pr_err("iort node pointer overflows, bad table\n");
                        return;
                }

                iort_enable_acs(iort_node);

                ops = iort_get_dev_cfg(iort_node);
                if (ops) {
                        fwnode = acpi_alloc_fwnode_static();
                        if (!fwnode)
                                return;

                        iort_set_fwnode(iort_node, fwnode);

                        ret = iort_add_platform_device(iort_node, ops);
                        if (ret) {
                                iort_delete_fwnode(iort_node);
                                acpi_free_fwnode_static(fwnode);
                                return;
                        }
                }

                iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
                                         iort_node->length);
        }
}

void __init acpi_iort_init(void)
{
        acpi_status status;

        /* iort_table will be used at runtime after the iort init,
         * so we don't need to call acpi_put_table() to release
         * the IORT table mapping.
         */
        status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
        if (ACPI_FAILURE(status)) {
                if (status != AE_NOT_FOUND) {
                        const char *msg = acpi_format_exception(status);

                        pr_err("Failed to get table, %s\n", msg);
                }

                return;
        }

        iort_init_platform_devices();
}

#ifdef CONFIG_ZONE_DMA
/*
 * Extract the highest CPU physical address accessible to all DMA masters in
 * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
 */
phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
{
        phys_addr_t limit = PHYS_ADDR_MAX;
        struct acpi_iort_node *node, *end;
        struct acpi_table_iort *iort;
        acpi_status status;
        int i;

        if (acpi_disabled)
                return limit;

        status = acpi_get_table(ACPI_SIG_IORT, 0,
                                (struct acpi_table_header **)&iort);
        if (ACPI_FAILURE(status))
                return limit;

        node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
        end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);

        for (i = 0; i < iort->node_count; i++) {
                if (node >= end)
                        break;

                switch (node->type) {
                        struct acpi_iort_named_component *ncomp;
                        struct acpi_iort_root_complex *rc;
                        phys_addr_t local_limit;

                case ACPI_IORT_NODE_NAMED_COMPONENT:
                        ncomp = (struct acpi_iort_named_component *)node->node_data;
                        local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
                        limit = min_not_zero(limit, local_limit);
                        break;

                case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
                        if (node->revision < 1)
                                break;

                        rc = (struct acpi_iort_root_complex *)node->node_data;
                        local_limit = DMA_BIT_MASK(rc->memory_address_limit);
                        limit = min_not_zero(limit, local_limit);
                        break;
                }
                node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
        }
        acpi_put_table(&iort->header);
        return limit;
}
#endif