mmio_read
if (!mmio_read(size, ve->gpa, &val))
FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD);
FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD);
return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
if (read_resp->req_id != mmio_read->seq_num) {
mmio_read->seq_num, offset, read_resp->req_id,
spin_unlock(&mmio_read->lock);
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
read_resp = mmio_read->read_resp;
spin_lock(&mmio_read->lock);
mmio_read->seq_num++;
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
read_resp->req_id = mmio_read->seq_num + 0x9aL;
addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
spin_lock_init(&mmio_read->lock);
mmio_read->read_resp =
dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
&mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (!mmio_read->read_resp)
mmio_read->read_resp->req_id = 0;
mmio_read->seq_num = 0;
mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
mmio_read->seq_num);
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
mmio_read->read_resp, mmio_read->read_resp_dma_addr);
exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout);
if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
struct efa_com_mmio_read mmio_read;
edev->mmio_read.mmio_read_timeout =
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
spin_lock_init(&mmio_read->lock);
mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
&mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
mmio_read->read_resp->req_id = 0x0;
mmio_read->seq_num = 0x0;
mmio_read->readless_supported = true;
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
mmio_read->readless_supported = readless_supported;
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
mmio_read->read_resp_dma_addr);
mmio_read->read_resp = NULL;
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
mmio_read->read_resp;
u32 timeout = mmio_read->reg_read_to;
if (!mmio_read->readless_supported)
spin_lock_irqsave(&mmio_read->lock, flags);
mmio_read->seq_num++;
read_resp->req_id = mmio_read->seq_num + 0xDEAD;
mmio_read_reg |= mmio_read->seq_num &
if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
spin_unlock_irqrestore(&mmio_read->lock, flags);
struct ena_com_mmio_read mmio_read;
adapter->ena_dev->mmio_read.reg_read_to =
if (par->board->mmio_read)
return par->board->mmio_read(par);
u16 (*mmio_read)(struct broadsheetfb_par *);
.read = mmio_read,