CIRC_SPACE
(CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
return CIRC_SPACE(drvdata->circ.head,
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
avail = CIRC_SPACE(bchan->tail, bchan->head,
#define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\
if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
space = CIRC_SPACE(ctb->tail, head, ctb->size);
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
space = CIRC_SPACE(header->write_index, header->read_index,
(CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
u32 credits = CIRC_SPACE(0, 0, CTB_G2H_BUFFER_DWORDS) - G2H_ROOM_BUFFER_DWORDS;
h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
return CIRC_SPACE(pf_queue->head, pf_queue->tail, pf_queue->size) <=
if (CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) >= 1) {
if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
if (!CIRC_SPACE(req->setup_head, req->acked_tail,
if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
if (CIRC_SPACE(head, tail, XMIT_SIZE) < n + 2) {
return CIRC_SPACE(ring->head, ring->tail, ring->size);
while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
num = CIRC_SPACE(start, end, RX_DESC_NUM);
CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len,
return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size))
for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
buf_space = CIRC_SPACE(cb->head, cb->tail, LOG_SIZE);
avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
if (CIRC_SPACE(rb->head, rb->tail, CUSTOM_BIOS_INPUT_RING_ENTRIES) == 0) {
return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
if (CIRC_SPACE(info->xmit.head,
return CIRC_SPACE(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE);
count = CIRC_SPACE(bc->head, bc->tail, BUF_SIZE);
if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
return CIRC_SPACE(head, tail, data_size) >= size;
return CIRC_SPACE(tail, head, data_size) >= size;
handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
(CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))