#include <linux/units.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
#include <linux/aer.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
#include "trace.h"
static unsigned short media_ready_timeout = 60;
module_param(media_ready_timeout, ushort, 0644);
MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
static int pci_get_port_num(struct pci_dev *pdev)
{
u32 lnkcap;
int type;
type = pci_pcie_type(pdev);
if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT)
return -EINVAL;
if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
&lnkcap))
return -ENXIO;
return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
}
struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
struct device *dport_dev)
{
struct cxl_register_map map;
struct pci_dev *pdev;
int port_num, rc;
if (!dev_is_pci(dport_dev))
return ERR_PTR(-EINVAL);
pdev = to_pci_dev(dport_dev);
port_num = pci_get_port_num(pdev);
if (port_num < 0)
return ERR_PTR(port_num);
rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
if (rc)
return ERR_PTR(rc);
device_lock_assert(&port->dev);
return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport_by_dev, "CXL");
static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
int d = cxlds->cxl_dvsec;
bool valid = false;
int rc, i;
u32 temp;
if (id > CXL_DVSEC_RANGE_MAX)
return -EINVAL;
i = 1;
do {
rc = pci_read_config_dword(pdev,
d + PCI_DVSEC_CXL_RANGE_SIZE_LOW(id),
&temp);
if (rc)
return rc;
valid = FIELD_GET(PCI_DVSEC_CXL_MEM_INFO_VALID, temp);
if (valid)
break;
msleep(1000);
} while (i--);
if (!valid) {
dev_err(&pdev->dev,
"Timeout awaiting memory range %d valid after 1s.\n",
id);
return -ETIMEDOUT;
}
return 0;
}
static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
int d = cxlds->cxl_dvsec;
bool active = false;
int rc, i;
u32 temp;
if (id > CXL_DVSEC_RANGE_MAX)
return -EINVAL;
for (i = media_ready_timeout; i; i--) {
rc = pci_read_config_dword(
pdev, d + PCI_DVSEC_CXL_RANGE_SIZE_LOW(id), &temp);
if (rc)
return rc;
active = FIELD_GET(PCI_DVSEC_CXL_MEM_ACTIVE, temp);
if (active)
break;
msleep(1000);
}
if (!active) {
dev_err(&pdev->dev,
"timeout awaiting memory active after %d seconds\n",
media_ready_timeout);
return -ETIMEDOUT;
}
return 0;
}
int cxl_await_media_ready(struct cxl_dev_state *cxlds)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
int d = cxlds->cxl_dvsec;
int rc, i, hdm_count;
u64 md_status;
u16 cap;
rc = pci_read_config_word(pdev,
d + PCI_DVSEC_CXL_CAP, &cap);
if (rc)
return rc;
hdm_count = FIELD_GET(PCI_DVSEC_CXL_HDM_COUNT, cap);
for (i = 0; i < hdm_count; i++) {
rc = cxl_dvsec_mem_range_valid(cxlds, i);
if (rc)
return rc;
}
for (i = 0; i < hdm_count; i++) {
rc = cxl_dvsec_mem_range_active(cxlds, i);
if (rc)
return rc;
}
md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
if (!CXLMDEV_READY(md_status))
return -EIO;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, "CXL");
static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
int d = cxlds->cxl_dvsec;
u16 ctrl;
int rc;
rc = pci_read_config_word(pdev, d + PCI_DVSEC_CXL_CTRL, &ctrl);
if (rc < 0)
return rc;
if ((ctrl & PCI_DVSEC_CXL_MEM_ENABLE) == val)
return 1;
ctrl &= ~PCI_DVSEC_CXL_MEM_ENABLE;
ctrl |= val;
rc = pci_write_config_word(pdev, d + PCI_DVSEC_CXL_CTRL, ctrl);
if (rc < 0)
return rc;
return 0;
}
static void clear_mem_enable(void *cxlds)
{
cxl_set_mem_enable(cxlds, 0);
}
static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
{
int rc;
rc = cxl_set_mem_enable(cxlds, PCI_DVSEC_CXL_MEM_ENABLE);
if (rc < 0)
return rc;
if (rc > 0)
return 0;
return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
}
static int dvsec_range_allowed(struct device *dev, const void *arg)
{
const struct range *dev_range = arg;
struct cxl_decoder *cxld;
if (!is_root_decoder(dev))
return 0;
cxld = to_cxl_decoder(dev);
if (!(cxld->flags & CXL_DECODER_F_RAM))
return 0;
return range_contains(&cxld->hpa_range, dev_range);
}
static void disable_hdm(void *_cxlhdm)
{
u32 global_ctrl;
struct cxl_hdm *cxlhdm = _cxlhdm;
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
}
static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
u32 global_ctrl;
global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
struct device *dev = cxlds->dev;
int hdm_count, rc, i, ranges = 0;
int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
if (!d) {
dev_dbg(dev, "No DVSEC Capability\n");
return -ENXIO;
}
rc = pci_read_config_word(pdev, d + PCI_DVSEC_CXL_CAP, &cap);
if (rc)
return rc;
if (!(cap & PCI_DVSEC_CXL_MEM_CAPABLE)) {
dev_dbg(dev, "Not MEM Capable\n");
return -ENXIO;
}
hdm_count = FIELD_GET(PCI_DVSEC_CXL_HDM_COUNT, cap);
if (!hdm_count || hdm_count > 2)
return -EINVAL;
rc = pci_read_config_word(pdev, d + PCI_DVSEC_CXL_CTRL, &ctrl);
if (rc)
return rc;
info->mem_enabled = FIELD_GET(PCI_DVSEC_CXL_MEM_ENABLE, ctrl);
if (!info->mem_enabled)
return 0;
for (i = 0; i < hdm_count; i++) {
u64 base, size;
u32 temp;
rc = cxl_dvsec_mem_range_valid(cxlds, i);
if (rc)
return rc;
rc = pci_read_config_dword(
pdev, d + PCI_DVSEC_CXL_RANGE_SIZE_HIGH(i), &temp);
if (rc)
return rc;
size = (u64)temp << 32;
rc = pci_read_config_dword(
pdev, d + PCI_DVSEC_CXL_RANGE_SIZE_LOW(i), &temp);
if (rc)
return rc;
size |= temp & PCI_DVSEC_CXL_MEM_SIZE_LOW;
if (!size) {
continue;
}
rc = pci_read_config_dword(
pdev, d + PCI_DVSEC_CXL_RANGE_BASE_HIGH(i), &temp);
if (rc)
return rc;
base = (u64)temp << 32;
rc = pci_read_config_dword(
pdev, d + PCI_DVSEC_CXL_RANGE_BASE_LOW(i), &temp);
if (rc)
return rc;
base |= temp & PCI_DVSEC_CXL_MEM_BASE_LOW;
info->dvsec_range[ranges++] = (struct range) {
.start = base,
.end = base + size - 1
};
}
info->ranges = ranges;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, "CXL");
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
struct device *dev = cxlds->dev;
struct cxl_port *root;
int i, rc, allowed;
u32 global_ctrl = 0;
if (hdm)
global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
return devm_cxl_enable_mem(&port->dev, cxlds);
if (!hdm)
return -ENODEV;
if (!info->mem_enabled) {
rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
return rc;
return devm_cxl_enable_mem(&port->dev, cxlds);
}
root = to_cxl_port(port->dev.parent);
while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
root = to_cxl_port(root->dev.parent);
if (!is_cxl_root(root)) {
dev_err(dev, "Failed to acquire root port for HDM enable\n");
return -ENODEV;
}
for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
dvsec_range_allowed);
if (!cxld_dev) {
dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
continue;
}
dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
put_device(cxld_dev);
allowed++;
}
if (!allowed) {
dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
return -ENXIO;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
#define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
static int cxl_cdat_get_length(struct device *dev,
struct pci_doe_mb *doe_mb,
size_t *length)
{
__le32 request = CDAT_DOE_REQ(0);
__le32 response[2];
int rc;
rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS,
&request, sizeof(request),
&response, sizeof(response));
if (rc < 0) {
dev_err(dev, "DOE failed: %d", rc);
return rc;
}
if (rc < sizeof(response))
return -EIO;
*length = le32_to_cpu(response[1]);
dev_dbg(dev, "CDAT length %zu\n", *length);
return 0;
}
static int cxl_cdat_read_table(struct device *dev,
struct pci_doe_mb *doe_mb,
struct cdat_doe_rsp *rsp, size_t *length)
{
size_t received, remaining = *length;
unsigned int entry_handle = 0;
union cdat_data *data;
__le32 saved_dw = 0;
do {
__le32 request = CDAT_DOE_REQ(entry_handle);
int rc;
rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS,
&request, sizeof(request),
rsp, sizeof(*rsp) + remaining);
if (rc < 0) {
dev_err(dev, "DOE failed: %d", rc);
return rc;
}
if (rc < sizeof(*rsp))
return -EIO;
data = (union cdat_data *)rsp->data;
received = rc - sizeof(*rsp);
if (entry_handle == 0) {
if (received != sizeof(data->header))
return -EIO;
} else {
if (received < sizeof(data->entry) ||
received != le16_to_cpu(data->entry.length))
return -EIO;
}
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
le32_to_cpu(rsp->doe_header));
rsp->doe_header = saved_dw;
remaining -= received;
rsp = (void *)rsp + received;
saved_dw = rsp->doe_header;
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
*length -= remaining;
return 0;
}
static unsigned char cdat_checksum(void *buf, size_t size)
{
unsigned char sum, *data = buf;
size_t i;
for (sum = 0, i = 0; i < size; i++)
sum += data[i];
return sum;
}
void read_cdat_data(struct cxl_port *port)
{
struct device *uport = port->uport_dev;
struct device *dev = &port->dev;
struct pci_doe_mb *doe_mb;
struct pci_dev *pdev = NULL;
struct cxl_memdev *cxlmd;
struct cdat_doe_rsp *buf;
size_t table_length, length;
int rc;
if (is_cxl_memdev(uport)) {
struct device *host;
cxlmd = to_cxl_memdev(uport);
host = cxlmd->dev.parent;
if (dev_is_pci(host))
pdev = to_pci_dev(host);
} else if (dev_is_pci(uport)) {
pdev = to_pci_dev(uport);
}
if (!pdev)
return;
doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS);
if (!doe_mb) {
dev_dbg(dev, "No CDAT mailbox\n");
return;
}
port->cdat_available = true;
if (cxl_cdat_get_length(dev, doe_mb, &length)) {
dev_dbg(dev, "No CDAT length\n");
return;
}
buf = devm_kzalloc(dev, sizeof(*buf) + length, GFP_KERNEL);
if (!buf)
goto err;
table_length = length;
rc = cxl_cdat_read_table(dev, doe_mb, buf, &length);
if (rc)
goto err;
if (table_length != length)
dev_warn(dev, "Malformed CDAT table length (%zu:%zu), discarding trailing data\n",
table_length, length);
if (cdat_checksum(buf->data, length))
goto err;
port->cdat.table = buf->data;
port->cdat.length = length;
return;
err:
devm_kfree(dev, buf);
dev_err(dev, "Failed to read/validate CDAT.\n");
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, "CXL");
static int cxl_flit_size(struct pci_dev *pdev)
{
if (cxl_pci_flit_256(pdev))
return 256;
return 68;
}
long cxl_pci_get_latency(struct pci_dev *pdev)
{
long bw;
bw = pcie_link_speed_mbps(pdev);
if (bw < 0)
return 0;
bw /= BITS_PER_BYTE;
return cxl_flit_size(pdev) * MEGA / bw;
}
static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data)
{
struct cxl_port *port = data;
struct cxl_decoder *cxld;
struct cxl_hdm *cxlhdm;
void __iomem *hdm;
u32 ctrl;
if (!is_endpoint_decoder(dev))
return 0;
cxld = to_cxl_decoder(dev);
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return 0;
cxlhdm = dev_get_drvdata(&port->dev);
hdm = cxlhdm->regs.hdm_decoder;
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl);
}
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
{
return device_for_each_child(&port->dev, port,
__cxl_endpoint_decoder_reset_detected);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, "CXL");
int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
{
int speed, bw;
u16 lnksta;
u32 width;
speed = pcie_link_speed_mbps(pdev);
if (speed < 0)
return speed;
speed /= BITS_PER_BYTE;
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
bw = speed * width;
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
c[i].read_bandwidth = bw;
c[i].write_bandwidth = bw;
}
return 0;
}
#define GPF_TIMEOUT_BASE_MAX 2
#define GPF_TIMEOUT_SCALE_MAX 7
u16 cxl_gpf_get_dvsec(struct device *dev)
{
struct pci_dev *pdev;
bool is_port = true;
u16 dvsec;
if (!dev_is_pci(dev))
return 0;
pdev = to_pci_dev(dev);
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT)
is_port = false;
dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
is_port ? PCI_DVSEC_CXL_PORT_GPF : PCI_DVSEC_CXL_DEVICE_GPF);
if (!dvsec)
dev_warn(dev, "%s GPF DVSEC not present\n",
is_port ? "Port" : "Device");
return dvsec;
}
EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL");
static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
{
u64 base, scale;
int rc, offset;
u16 ctrl;
switch (phase) {
case 1:
offset = PCI_DVSEC_CXL_PORT_GPF_PHASE_1_CONTROL;
base = PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_BASE;
scale = PCI_DVSEC_CXL_PORT_GPF_PHASE_1_TMO_SCALE;
break;
case 2:
offset = PCI_DVSEC_CXL_PORT_GPF_PHASE_2_CONTROL;
base = PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_BASE;
scale = PCI_DVSEC_CXL_PORT_GPF_PHASE_2_TMO_SCALE;
break;
default:
return -EINVAL;
}
rc = pci_read_config_word(pdev, dvsec + offset, &ctrl);
if (rc)
return rc;
if (FIELD_GET(base, ctrl) == GPF_TIMEOUT_BASE_MAX &&
FIELD_GET(scale, ctrl) == GPF_TIMEOUT_SCALE_MAX)
return 0;
ctrl = FIELD_PREP(base, GPF_TIMEOUT_BASE_MAX);
ctrl |= FIELD_PREP(scale, GPF_TIMEOUT_SCALE_MAX);
rc = pci_write_config_word(pdev, dvsec + offset, ctrl);
if (!rc)
pci_dbg(pdev, "Port GPF phase %d timeout: %d0 secs\n",
phase, GPF_TIMEOUT_BASE_MAX);
return rc;
}
int cxl_gpf_port_setup(struct cxl_dport *dport)
{
if (!dport)
return -EINVAL;
if (!dport->gpf_dvsec) {
struct pci_dev *pdev;
int dvsec;
dvsec = cxl_gpf_get_dvsec(dport->dport_dev);
if (!dvsec)
return -EINVAL;
dport->gpf_dvsec = dvsec;
pdev = to_pci_dev(dport->dport_dev);
update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1);
update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2);
}
return 0;
}
struct cxl_walk_context {
struct pci_bus *bus;
struct cxl_port *port;
int type;
int error;
int count;
};
static int count_dports(struct pci_dev *pdev, void *data)
{
struct cxl_walk_context *ctx = data;
int type = pci_pcie_type(pdev);
if (pdev->bus != ctx->bus)
return 0;
if (!pci_is_pcie(pdev))
return 0;
if (type != ctx->type)
return 0;
ctx->count++;
return 0;
}
int cxl_port_get_possible_dports(struct cxl_port *port)
{
struct pci_bus *bus = cxl_port_to_pci_bus(port);
struct cxl_walk_context ctx;
int type;
if (!bus) {
dev_err(&port->dev, "No PCI bus found for port %s\n",
dev_name(&port->dev));
return -ENXIO;
}
if (pci_is_root_bus(bus))
type = PCI_EXP_TYPE_ROOT_PORT;
else
type = PCI_EXP_TYPE_DOWNSTREAM;
ctx = (struct cxl_walk_context) {
.bus = bus,
.type = type,
};
pci_walk_bus(bus, count_dports, &ctx);
return ctx.count;
}