#include <linux/acpi_iort.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include "msi.h"
int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct irq_domain *domain;
domain = dev_get_msi_domain(&dev->dev);
if (domain && irq_domain_is_hierarchy(domain))
return msi_domain_alloc_irqs_all_locked(&dev->dev, MSI_DEFAULT_DOMAIN, nvec);
return pci_msi_legacy_setup_msi_irqs(dev, nvec, type);
}
void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
{
struct irq_domain *domain;
domain = dev_get_msi_domain(&dev->dev);
if (domain && irq_domain_is_hierarchy(domain)) {
msi_domain_free_irqs_all_locked(&dev->dev, MSI_DEFAULT_DOMAIN);
} else {
pci_msi_legacy_teardown_msi_irqs(dev);
msi_free_msi_descs(&dev->dev);
}
}
static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
if (desc->irq == irq_data->irq)
__pci_write_msi_msg(desc, msg);
}
static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
arg->hwirq = desc->msi_index;
}
static void cond_shutdown_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
irq_chip_shutdown_parent(data);
else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_mask_parent(data);
}
static unsigned int cond_startup_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
return irq_chip_startup_parent(data);
else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_unmask_parent(data);
return 0;
}
static void pci_irq_shutdown_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_mask(desc, BIT(data->irq - desc->irq));
cond_shutdown_parent(data);
}
static unsigned int pci_irq_startup_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
unsigned int ret = cond_startup_parent(data);
pci_msi_unmask(desc, BIT(data->irq - desc->irq));
return ret;
}
static void pci_irq_mask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_mask(desc, BIT(data->irq - desc->irq));
}
static void pci_irq_unmask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_unmask(desc, BIT(data->irq - desc->irq));
}
#ifdef CONFIG_GENERIC_IRQ_RESERVATION_MODE
# define MSI_REACTIVATE MSI_FLAG_MUST_REACTIVATE
#else
# define MSI_REACTIVATE 0
#endif
#define MSI_COMMON_FLAGS (MSI_FLAG_FREE_MSI_DESCS | \
MSI_FLAG_ACTIVATE_EARLY | \
MSI_FLAG_DEV_SYSFS | \
MSI_REACTIVATE)
static const struct msi_domain_template pci_msi_template = {
.chip = {
.name = "PCI-MSI",
.irq_startup = pci_irq_startup_msi,
.irq_shutdown = pci_irq_shutdown_msi,
.irq_mask = pci_irq_mask_msi,
.irq_unmask = pci_irq_unmask_msi,
.irq_write_msi_msg = pci_msi_domain_write_msg,
.flags = IRQCHIP_ONESHOT_SAFE,
},
.ops = {
.set_desc = pci_device_domain_set_desc,
},
.info = {
.flags = MSI_COMMON_FLAGS | MSI_FLAG_MULTI_PCI_MSI,
.bus_token = DOMAIN_BUS_PCI_DEVICE_MSI,
},
};
static void pci_irq_shutdown_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
cond_shutdown_parent(data);
}
static unsigned int pci_irq_startup_msix(struct irq_data *data)
{
unsigned int ret = cond_startup_parent(data);
pci_msix_unmask(irq_data_get_msi_desc(data));
return ret;
}
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
}
static void pci_irq_unmask_msix(struct irq_data *data)
{
pci_msix_unmask(irq_data_get_msi_desc(data));
}
void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc)
{
if (!desc->pci.mask_base)
msix_prepare_msi_desc(to_pci_dev(desc->dev), desc);
}
EXPORT_SYMBOL_GPL(pci_msix_prepare_desc);
static const struct msi_domain_template pci_msix_template = {
.chip = {
.name = "PCI-MSIX",
.irq_startup = pci_irq_startup_msix,
.irq_shutdown = pci_irq_shutdown_msix,
.irq_mask = pci_irq_mask_msix,
.irq_unmask = pci_irq_unmask_msix,
.irq_write_msi_msg = pci_msi_domain_write_msg,
.flags = IRQCHIP_ONESHOT_SAFE,
},
.ops = {
.prepare_desc = pci_msix_prepare_desc,
.set_desc = pci_device_domain_set_desc,
},
.info = {
.flags = MSI_COMMON_FLAGS | MSI_FLAG_PCI_MSIX |
MSI_FLAG_PCI_MSIX_ALLOC_DYN,
.bus_token = DOMAIN_BUS_PCI_DEVICE_MSIX,
},
};
static bool pci_match_device_domain(struct pci_dev *pdev, enum irq_domain_bus_token bus_token)
{
return msi_match_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN, bus_token);
}
static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_domain_template *tmpl,
unsigned int hwsize)
{
struct irq_domain *domain = dev_get_msi_domain(&pdev->dev);
if (!domain || !irq_domain_is_msi_parent(domain))
return true;
return msi_create_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN, tmpl,
hwsize, NULL, NULL);
}
bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize)
{
if (WARN_ON_ONCE(pdev->msix_enabled))
return false;
if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSI))
return true;
if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX))
msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN);
return pci_create_device_domain(pdev, &pci_msi_template, hwsize);
}
bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize)
{
if (WARN_ON_ONCE(pdev->msi_enabled))
return false;
if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX))
return true;
if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSI))
msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN);
return pci_create_device_domain(pdev, &pci_msix_template, hwsize);
}
bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
enum support_mode mode)
{
struct msi_domain_info *info;
struct irq_domain *domain;
unsigned int supported;
domain = dev_get_msi_domain(&pdev->dev);
if (!domain || !irq_domain_is_hierarchy(domain)) {
if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS))
return mode == ALLOW_LEGACY;
return false;
}
if (!irq_domain_is_msi_parent(domain)) {
info = domain->host_data;
supported = info->flags;
} else {
supported = domain->msi_parent_ops->supported_flags;
}
return (supported & feature_mask) == feature_mask;
}
static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
{
u32 *pa = data;
u8 bus = PCI_BUS_NUM(*pa);
if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus)
*pa = alias;
return 0;
}
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
{
struct device_node *of_node;
u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
rid = of_node ? of_msi_xlate(&pdev->dev, &of_node, rid) :
iort_msi_map_id(&pdev->dev, rid);
return rid;
}
u32 pci_msi_map_rid_ctlr_node(struct irq_domain *domain, struct pci_dev *pdev,
struct fwnode_handle **node)
{
u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
if (irq_domain_get_of_node(domain)) {
struct device_node *msi_ctlr_node = NULL;
rid = of_msi_xlate(&pdev->dev, &msi_ctlr_node, rid);
if (msi_ctlr_node)
*node = of_fwnode_handle(msi_ctlr_node);
} else {
rid = iort_msi_xlate(&pdev->dev, rid, node);
}
return rid;
}
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
struct irq_domain *dom;
u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
if (!dom)
dom = iort_get_device_domain(&pdev->dev, rid,
DOMAIN_BUS_PCI_MSI);
return dom;
}