#include "xe_mmio.h"
#include <linux/delay.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/minmax.h>
#include <linux/pci.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "regs/xe_bars.h"
#include "xe_device.h"
#include "xe_gt_sriov_vf.h"
#include "xe_sriov.h"
#include "xe_trace.h"
#include "xe_wa.h"
#include "generated/xe_device_wa_oob.h"
static void tiles_fini(void *arg)
{
struct xe_device *xe = arg;
struct xe_tile *tile;
int id;
for_each_remote_tile(tile, xe, id)
tile->mmio.regs = NULL;
}
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
u8 id;
if (xe->info.tile_count == 1)
return;
for_each_remote_tile(tile, xe, id)
xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
}
int xe_mmio_probe_tiles(struct xe_device *xe)
{
size_t tile_mmio_size = SZ_16M;
mmio_multi_tile_setup(xe, tile_mmio_size);
return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
}
static void mmio_fini(void *arg)
{
struct xe_device *xe = arg;
struct xe_tile *root_tile = xe_device_get_root_tile(xe);
pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
xe->mmio.regs = NULL;
root_tile->mmio.regs = NULL;
}
int xe_mmio_probe_early(struct xe_device *xe)
{
struct xe_tile *root_tile = xe_device_get_root_tile(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
if (!xe->mmio.regs) {
drm_err(&xe->drm, "failed to map registers\n");
return -EIO;
}
xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M);
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO);
void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size)
{
xe_tile_assert(tile, size <= XE_REG_ADDR_MAX);
mmio->regs = ptr;
mmio->regs_size = size;
mmio->tile = tile;
}
static void mmio_flush_pending_writes(struct xe_mmio *mmio)
{
#define DUMMY_REG_OFFSET 0x130030
int i;
if (!XE_DEVICE_WA(mmio->tile->xe, 15015404425))
return;
for (i = 0; i < 4; i++)
writel(0, mmio->regs + DUMMY_REG_OFFSET);
}
u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
mmio_flush_pending_writes(mmio);
val = readb(mmio->regs + addr);
trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
mmio_flush_pending_writes(mmio);
val = readw(mmio->regs + addr);
trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
mmio->tile->primary_gt, reg, val);
else
writel(val, mmio->regs + addr);
}
u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u32 val;
mmio_flush_pending_writes(mmio);
if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
mmio->tile->primary_gt, reg);
else
val = readl(mmio->regs + addr);
trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
{
u32 old, reg_val;
old = xe_mmio_read32(mmio, reg);
reg_val = (old & ~clr) | set;
xe_mmio_write32(mmio, reg, reg_val);
return old;
}
int xe_mmio_write32_and_verify(struct xe_mmio *mmio,
struct xe_reg reg, u32 val, u32 mask, u32 eval)
{
u32 reg_val;
xe_mmio_write32(mmio, reg, val);
reg_val = xe_mmio_read32(mmio, reg);
return (reg_val & mask) != eval ? -EINVAL : 0;
}
bool xe_mmio_in_range(const struct xe_mmio *mmio,
const struct xe_mmio_range *range,
struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
return range && addr >= range->start && addr <= range->end;
}
u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
{
struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
u32 ldw, udw, oldudw, retries;
xe_tile_assert(mmio->tile, !in_range(mmio->adj_limit, reg.addr + 1, 7));
oldudw = xe_mmio_read32(mmio, reg_udw);
for (retries = 5; retries; --retries) {
ldw = xe_mmio_read32(mmio, reg);
udw = xe_mmio_read32(mmio, reg_udw);
if (udw == oldudw)
break;
oldudw = udw;
}
drm_WARN(&mmio->tile->xe->drm, retries == 0,
"64-bit read of %#x did not stabilize\n", reg.addr);
return (u64)udw << 32 | ldw;
}
static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
bool check;
for (;;) {
read = xe_mmio_read32(mmio, reg);
check = (read & mask) == val;
if (!expect_match)
check = !check;
if (check) {
ret = 0;
break;
}
cur = ktime_get_raw();
if (!ktime_before(cur, end))
break;
if (ktime_after(ktime_add_us(cur, wait), end))
wait = ktime_us_delta(end, cur);
if (atomic)
udelay(wait);
else
usleep_range(wait, wait << 1);
wait <<= 1;
}
if (ret != 0) {
read = xe_mmio_read32(mmio, reg);
check = (read & mask) == val;
if (!expect_match)
check = !check;
if (check)
ret = 0;
}
if (out_val)
*out_val = read;
return ret;
}
int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
}
int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
}
#ifdef CONFIG_PCI_IOV
static size_t vf_regs_stride(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
}
void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid)
{
struct xe_tile *tile = base->tile;
struct xe_device *xe = tile->xe;
size_t offset = vf_regs_stride(xe) * vfid;
xe_assert(xe, IS_SRIOV_PF(xe));
xe_assert(xe, vfid);
xe_assert(xe, !base->sriov_vf_gt);
xe_assert(xe, base->regs_size > offset);
*mmio = *base;
mmio->regs += offset;
mmio->regs_size -= offset;
}
#endif