root/drivers/net/ethernet/intel/ice/ice_xsk.c
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */

#include <linux/bpf_trace.h>
#include <linux/unroll.h>
#include <net/libeth/xdp.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_type.h"
#include "ice_xsk.h"
#include "ice_txrx.h"
#include "ice_txrx_lib.h"
#include "ice_lib.h"

static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
{
        return &rx_ring->xdp_buf[idx];
}

/**
 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
 * @vsi: VSI that has netdev
 * @q_vector: q_vector that has NAPI context
 * @enable: true for enable, false for disable
 */
void
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
                     bool enable)
{
        if (!vsi->netdev || !q_vector)
                return;

        if (enable)
                napi_enable(&q_vector->napi);
        else
                napi_disable(&q_vector->napi);
}

/**
 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
 * @vsi: the VSI that contains queue vector being un-configured
 * @rx_ring: Rx ring that will have its IRQ disabled
 * @q_vector: queue vector
 */
void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
                 struct ice_q_vector *q_vector)
{
        struct ice_pf *pf = vsi->back;
        struct ice_hw *hw = &pf->hw;
        u16 reg;
        u32 val;

        /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
         * here only QINT_RQCTL
         */
        reg = rx_ring->reg_idx;
        val = rd32(hw, QINT_RQCTL(reg));
        val &= ~QINT_RQCTL_CAUSE_ENA_M;
        wr32(hw, QINT_RQCTL(reg), val);

        if (q_vector) {
                wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
                ice_flush(hw);
                synchronize_irq(q_vector->irq.virq);
        }
}

/**
 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
 * @vsi: the VSI that contains queue vector
 * @q_vector: queue vector
 * @qid: queue index
 */
void
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
        u16 reg_idx = q_vector->reg_idx;
        struct ice_pf *pf = vsi->back;
        struct ice_hw *hw = &pf->hw;
        int q, _qid = qid;

        ice_cfg_itr(hw, q_vector);

        for (q = 0; q < q_vector->num_ring_tx; q++) {
                ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx);
                _qid++;
        }

        _qid = qid;

        for (q = 0; q < q_vector->num_ring_rx; q++) {
                ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx);
                _qid++;
        }

        ice_flush(hw);
}

/**
 * ice_qvec_ena_irq - Enable IRQ for given queue vector
 * @vsi: the VSI that contains queue vector
 * @q_vector: queue vector
 */
void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
        struct ice_pf *pf = vsi->back;
        struct ice_hw *hw = &pf->hw;

        ice_irq_dynamic_ena(hw, vsi, q_vector);

        ice_flush(hw);
}

/**
 * ice_xsk_pool_disable - disable a buffer pool region
 * @vsi: Current VSI
 * @qid: queue ID
 *
 * Returns 0 on success, negative on failure
 */
static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
        struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);

        if (!pool)
                return -EINVAL;

        xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);

        return 0;
}

/**
 * ice_xsk_pool_enable - enable a buffer pool region
 * @vsi: Current VSI
 * @pool: pointer to a requested buffer pool region
 * @qid: queue ID
 *
 * Returns 0 on success, negative on failure
 */
static int
ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
        int err;

        if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF)
                return -EINVAL;

        if (qid >= vsi->netdev->real_num_rx_queues ||
            qid >= vsi->netdev->real_num_tx_queues)
                return -EINVAL;

        err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
                               ICE_RX_DMA_ATTR);
        if (err)
                return err;

        return 0;
}

/**
 * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
 * @rx_ring: Rx ring
 * @pool_present: is pool for XSK present
 *
 * Try allocating memory and return ENOMEM, if failed to allocate.
 * If allocation was successful, substitute buffer with allocated one.
 * Returns 0 on success, negative on failure
 */
int
ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
{
        if (pool_present) {
                rx_ring->xdp_buf = kzalloc_objs(*rx_ring->xdp_buf,
                                                rx_ring->count);
                if (!rx_ring->xdp_buf)
                        return -ENOMEM;
        } else {
                kfree(rx_ring->xdp_buf);
                rx_ring->xdp_buf = NULL;
        }

        return 0;
}

/**
 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
 * @vsi: Current VSI
 * @pool: buffer pool to enable/associate to a ring, NULL to disable
 * @qid: queue ID
 *
 * Returns 0 on success, negative on failure
 */
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
        struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
        bool if_running, pool_present = !!pool;
        int ret = 0, pool_failure = 0;

        if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
                netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
                pool_failure = -EINVAL;
                goto failure;
        }

        if_running = !test_bit(ICE_VSI_DOWN, vsi->state) &&
                     ice_is_xdp_ena_vsi(vsi);

        if (if_running) {
                ret = ice_qp_dis(vsi, qid);
                if (ret) {
                        netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
                        goto xsk_pool_if_up;
                }

                ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
                if (ret)
                        goto xsk_pool_if_up;
        }

        pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
                                      ice_xsk_pool_disable(vsi, qid);

xsk_pool_if_up:
        if (if_running) {
                ret = ice_qp_ena(vsi, qid);
                if (!ret && pool_present)
                        napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
                else if (ret)
                        netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
        }

failure:
        if (pool_failure) {
                netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
                           pool_present ? "en" : "dis", pool_failure);
                return pool_failure;
        }

        return ret;
}

/**
 * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
 * @pool: XSK Buffer pool to pull the buffers from
 * @xdp: SW ring of xdp_buff that will hold the buffers
 * @rx_desc: Pointer to Rx descriptors that will be filled
 * @count: The number of buffers to allocate
 *
 * This function allocates a number of Rx buffers from the fill ring
 * or the internal recycle mechanism and places them on the Rx ring.
 *
 * Note that ring wrap should be handled by caller of this function.
 *
 * Returns the amount of allocated Rx descriptors
 */
static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
                             union ice_32b_rx_flex_desc *rx_desc, u16 count)
{
        dma_addr_t dma;
        u16 buffs;
        int i;

        buffs = xsk_buff_alloc_batch(pool, xdp, count);
        for (i = 0; i < buffs; i++) {
                dma = xsk_buff_xdp_get_dma(*xdp);
                rx_desc->read.pkt_addr = cpu_to_le64(dma);
                rx_desc->wb.status_error0 = 0;

                rx_desc++;
                xdp++;
        }

        return buffs;
}

/**
 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
 * @rx_ring: Rx ring
 * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
 * @count: The number of buffers to allocate
 *
 * Place the @count of descriptors onto Rx ring. Handle the ring wrap
 * for case where space from next_to_use up to the end of ring is less
 * than @count. Finally do a tail bump.
 *
 * Returns true if all allocations were successful, false if any fail.
 */
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
                                   struct xsk_buff_pool *xsk_pool, u16 count)
{
        u32 nb_buffs_extra = 0, nb_buffs = 0;
        union ice_32b_rx_flex_desc *rx_desc;
        u16 ntu = rx_ring->next_to_use;
        u16 total_count = count;
        struct xdp_buff **xdp;

        rx_desc = ICE_RX_DESC(rx_ring, ntu);
        xdp = ice_xdp_buf(rx_ring, ntu);

        if (ntu + count >= rx_ring->count) {
                nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
                                                   rx_ring->count - ntu);
                if (nb_buffs_extra != rx_ring->count - ntu) {
                        ntu += nb_buffs_extra;
                        goto exit;
                }
                rx_desc = ICE_RX_DESC(rx_ring, 0);
                xdp = ice_xdp_buf(rx_ring, 0);
                ntu = 0;
                count -= nb_buffs_extra;
                ice_release_rx_desc(rx_ring, 0);
        }

        nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);

        ntu += nb_buffs;
        if (ntu == rx_ring->count)
                ntu = 0;

exit:
        if (rx_ring->next_to_use != ntu)
                ice_release_rx_desc(rx_ring, ntu);

        return total_count == (nb_buffs_extra + nb_buffs);
}

/**
 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
 * @rx_ring: Rx ring
 * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
 * @count: The number of buffers to allocate
 *
 * Wrapper for internal allocation routine; figure out how many tail
 * bumps should take place based on the given threshold
 *
 * Returns true if all calls to internal alloc routine succeeded
 */
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
                          struct xsk_buff_pool *xsk_pool, u16 count)
{
        u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
        u16 leftover, i, tail_bumps;

        tail_bumps = count / rx_thresh;
        leftover = count - (tail_bumps * rx_thresh);

        for (i = 0; i < tail_bumps; i++)
                if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
                        return false;
        return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
}

/**
 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
 * @xdp_ring: XDP Tx ring
 * @xsk_pool: AF_XDP buffer pool pointer
 */
static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
                                struct xsk_buff_pool *xsk_pool)
{
        u16 ntc = xdp_ring->next_to_clean;
        struct ice_tx_desc *tx_desc;
        u16 cnt = xdp_ring->count;
        struct ice_tx_buf *tx_buf;
        u16 completed_frames = 0;
        u16 xsk_frames = 0;
        u16 last_rs;
        int i;

        last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
        tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
        if (tx_desc->cmd_type_offset_bsz &
            cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
                if (last_rs >= ntc)
                        completed_frames = last_rs - ntc + 1;
                else
                        completed_frames = last_rs + cnt - ntc + 1;
        }

        if (!completed_frames)
                return 0;

        if (likely(!xdp_ring->xdp_tx_active)) {
                xsk_frames = completed_frames;
                goto skip;
        }

        ntc = xdp_ring->next_to_clean;
        for (i = 0; i < completed_frames; i++) {
                tx_buf = &xdp_ring->tx_buf[ntc];

                if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
                        tx_buf->type = ICE_TX_BUF_EMPTY;
                        xsk_buff_free(tx_buf->xdp);
                        xdp_ring->xdp_tx_active--;
                } else {
                        xsk_frames++;
                }

                ntc++;
                if (ntc >= xdp_ring->count)
                        ntc = 0;
        }
skip:
        tx_desc->cmd_type_offset_bsz = 0;
        xdp_ring->next_to_clean += completed_frames;
        if (xdp_ring->next_to_clean >= cnt)
                xdp_ring->next_to_clean -= cnt;
        if (xsk_frames)
                xsk_tx_completed(xsk_pool, xsk_frames);

        return completed_frames;
}

/**
 * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
 * @xdp: XDP buffer to xmit
 * @xdp_ring: XDP ring to produce descriptor onto
 * @xsk_pool: AF_XDP buffer pool pointer
 *
 * note that this function works directly on xdp_buff, no need to convert
 * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
 * side will be able to xsk_buff_free() it.
 *
 * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
 * was not enough space on XDP ring
 */
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
                              struct ice_tx_ring *xdp_ring,
                              struct xsk_buff_pool *xsk_pool)
{
        struct skb_shared_info *sinfo = NULL;
        u32 size = xdp->data_end - xdp->data;
        u32 ntu = xdp_ring->next_to_use;
        struct ice_tx_desc *tx_desc;
        struct ice_tx_buf *tx_buf;
        struct xdp_buff *head;
        u32 nr_frags = 0;
        u32 free_space;
        u32 frag = 0;

        free_space = ICE_DESC_UNUSED(xdp_ring);
        if (free_space < ICE_RING_QUARTER(xdp_ring))
                free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);

        if (unlikely(!free_space))
                goto busy;

        if (unlikely(xdp_buff_has_frags(xdp))) {
                sinfo = xdp_get_shared_info_from_buff(xdp);
                nr_frags = sinfo->nr_frags;
                if (free_space < nr_frags + 1)
                        goto busy;
        }

        tx_desc = ICE_TX_DESC(xdp_ring, ntu);
        tx_buf = &xdp_ring->tx_buf[ntu];
        head = xdp;

        for (;;) {
                dma_addr_t dma;

                dma = xsk_buff_xdp_get_dma(xdp);
                xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);

                tx_buf->xdp = xdp;
                tx_buf->type = ICE_TX_BUF_XSK_TX;
                tx_desc->buf_addr = cpu_to_le64(dma);
                tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
                /* account for each xdp_buff from xsk_buff_pool */
                xdp_ring->xdp_tx_active++;

                if (++ntu == xdp_ring->count)
                        ntu = 0;

                if (frag == nr_frags)
                        break;

                tx_desc = ICE_TX_DESC(xdp_ring, ntu);
                tx_buf = &xdp_ring->tx_buf[ntu];

                xdp = xsk_buff_get_frag(head);
                size = skb_frag_size(&sinfo->frags[frag]);
                frag++;
        }

        xdp_ring->next_to_use = ntu;
        /* update last descriptor from a frame with EOP */
        tx_desc->cmd_type_offset_bsz |=
                cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);

        return ICE_XDP_TX;

busy:
        ice_stats_inc(xdp_ring->ring_stats, tx_busy);

        return ICE_XDP_CONSUMED;
}

/**
 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
 * @rx_ring: Rx ring
 * @xdp: xdp_buff used as input to the XDP program
 * @xdp_prog: XDP program to run
 * @xdp_ring: ring to be used for XDP_TX action
 * @xsk_pool: AF_XDP buffer pool pointer
 *
 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
 */
static int
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
               struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
               struct xsk_buff_pool *xsk_pool)
{
        int err, result = ICE_XDP_PASS;
        u32 act;

        act = bpf_prog_run_xdp(xdp_prog, xdp);

        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
                if (!err)
                        return ICE_XDP_REDIR;
                if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
                        result = ICE_XDP_EXIT;
                else
                        result = ICE_XDP_CONSUMED;
                goto out_failure;
        }

        switch (act) {
        case XDP_PASS:
                break;
        case XDP_TX:
                result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
                if (result == ICE_XDP_CONSUMED)
                        goto out_failure;
                break;
        case XDP_DROP:
                result = ICE_XDP_CONSUMED;
                break;
        default:
                bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_ABORTED:
                result = ICE_XDP_CONSUMED;
out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                break;
        }

        return result;
}

/**
 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
 * @rx_ring: AF_XDP Rx ring
 * @xsk_pool: AF_XDP buffer pool pointer
 * @budget: NAPI budget
 *
 * Returns number of processed packets on success, remaining budget on failure.
 */
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
                        struct xsk_buff_pool *xsk_pool,
                        int budget)
{
        struct xdp_buff *first = (struct xdp_buff *)rx_ring->xsk;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        u32 ntc = rx_ring->next_to_clean;
        u32 ntu = rx_ring->next_to_use;
        struct ice_tx_ring *xdp_ring;
        unsigned int xdp_xmit = 0;
        struct bpf_prog *xdp_prog;
        u32 cnt = rx_ring->count;
        bool failure = false;
        int entries_to_alloc;

        /* ZC patch is enabled only when XDP program is set,
         * so here it can not be NULL
         */
        xdp_prog = READ_ONCE(rx_ring->xdp_prog);
        xdp_ring = rx_ring->xdp_ring;

        while (likely(total_rx_packets < (unsigned int)budget)) {
                union ice_32b_rx_flex_desc *rx_desc;
                unsigned int size, xdp_res = 0;
                struct xdp_buff *xdp;
                struct sk_buff *skb;
                u16 stat_err_bits;
                u16 vlan_tci;

                rx_desc = ICE_RX_DESC(rx_ring, ntc);

                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
                if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
                        break;

                /* This memory barrier is needed to keep us from reading
                 * any other fields out of the rx_desc until we have
                 * verified the descriptor has been written back.
                 */
                dma_rmb();

                if (unlikely(ntc == ntu))
                        break;

                xdp = *ice_xdp_buf(rx_ring, ntc);

                size = le16_to_cpu(rx_desc->wb.pkt_len) &
                                   ICE_RX_FLX_DESC_PKT_LEN_M;

                xsk_buff_set_size(xdp, size);
                xsk_buff_dma_sync_for_cpu(xdp);

                if (!first) {
                        first = xdp;
                } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
                        xsk_buff_free(first);
                        first = NULL;
                }

                if (++ntc == cnt)
                        ntc = 0;

                if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!first))
                        continue;

                ((struct libeth_xdp_buff *)first)->desc = rx_desc;

                xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
                                         xsk_pool);
                if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
                        xdp_xmit |= xdp_res;
                } else if (xdp_res == ICE_XDP_EXIT) {
                        failure = true;
                        first = NULL;
                        break;
                } else if (xdp_res == ICE_XDP_CONSUMED) {
                        xsk_buff_free(first);
                } else if (xdp_res == ICE_XDP_PASS) {
                        goto construct_skb;
                }

                total_rx_bytes += xdp_get_buff_len(first);
                total_rx_packets++;

                first = NULL;
                continue;

construct_skb:
                /* XDP_PASS path */
                skb = xdp_build_skb_from_zc(first);
                if (!skb) {
                        xsk_buff_free(first);
                        first = NULL;

                        ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
                        continue;
                }

                first = NULL;

                total_rx_bytes += skb->len;
                total_rx_packets++;

                vlan_tci = ice_get_vlan_tci(rx_desc);

                ice_process_skb_fields(rx_ring, rx_desc, skb);
                ice_receive_skb(rx_ring, skb, vlan_tci);
        }

        rx_ring->next_to_clean = ntc;
        rx_ring->xsk = (struct libeth_xdp_buff *)first;

        entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
        if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
                failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
                                                 entries_to_alloc);

        ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
        ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);

        if (xsk_uses_need_wakeup(xsk_pool)) {
                /* ntu could have changed when allocating entries above, so
                 * use rx_ring value instead of stack based one
                 */
                if (failure || ntc == rx_ring->next_to_use)
                        xsk_set_rx_need_wakeup(xsk_pool);
                else
                        xsk_clear_rx_need_wakeup(xsk_pool);

                return (int)total_rx_packets;
        }

        return failure ? budget : (int)total_rx_packets;
}

/**
 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
 * @xdp_ring: XDP ring to produce the HW Tx descriptor on
 * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
 * @desc: AF_XDP descriptor to pull the DMA address and length from
 * @total_bytes: bytes accumulator that will be used for stats update
 */
static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
                         struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
                         unsigned int *total_bytes)
{
        struct ice_tx_desc *tx_desc;
        dma_addr_t dma;

        dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
        xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);

        tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
        tx_desc->buf_addr = cpu_to_le64(dma);
        tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
                                                      0, desc->len, 0);

        *total_bytes += desc->len;
}

/**
 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
 * @total_bytes: bytes accumulator that will be used for stats update
 */
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
                               struct xsk_buff_pool *xsk_pool,
                               struct xdp_desc *descs,
                               unsigned int *total_bytes)
{
        u16 ntu = xdp_ring->next_to_use;
        struct ice_tx_desc *tx_desc;
        u32 i;

        unrolled_count(PKTS_PER_BATCH)
        for (i = 0; i < PKTS_PER_BATCH; i++) {
                dma_addr_t dma;

                dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
                xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);

                tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
                tx_desc->buf_addr = cpu_to_le64(dma);
                tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
                                                              0, descs[i].len, 0);

                *total_bytes += descs[i].len;
        }

        xdp_ring->next_to_use = ntu;
}

/**
 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
 * @nb_pkts: count of packets to be send
 * @total_bytes: bytes accumulator that will be used for stats update
 */
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
                                struct xsk_buff_pool *xsk_pool,
                                struct xdp_desc *descs, u32 nb_pkts,
                                unsigned int *total_bytes)
{
        u32 batched, leftover, i;

        batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
        leftover = nb_pkts & (PKTS_PER_BATCH - 1);
        for (i = 0; i < batched; i += PKTS_PER_BATCH)
                ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
        for (; i < batched + leftover; i++)
                ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
}

/**
 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 * @xsk_pool: AF_XDP buffer pool pointer
 *
 * Returns true if there is no more work that needs to be done, false otherwise
 */
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{
        struct xdp_desc *descs = xsk_pool->tx_descs;
        u32 nb_pkts, nb_processed = 0;
        unsigned int total_bytes = 0;
        int budget;

        ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);

        if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
            !netif_running(xdp_ring->vsi->netdev))
                return true;

        budget = ICE_DESC_UNUSED(xdp_ring);
        budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));

        nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
        if (!nb_pkts)
                return true;

        if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
                nb_processed = xdp_ring->count - xdp_ring->next_to_use;
                ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
                                    &total_bytes);
                xdp_ring->next_to_use = 0;
        }

        ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
                            nb_pkts - nb_processed, &total_bytes);

        ice_set_rs_bit(xdp_ring);
        ice_xdp_ring_update_tail(xdp_ring);
        ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);

        if (xsk_uses_need_wakeup(xsk_pool))
                xsk_set_tx_need_wakeup(xsk_pool);

        return nb_pkts < budget;
}

/**
 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
 * @netdev: net_device
 * @queue_id: queue to wake up
 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
 *
 * Returns negative on error, zero otherwise.
 */
int
ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
               u32 __always_unused flags)
{
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_q_vector *q_vector;
        struct ice_vsi *vsi = np->vsi;
        struct ice_tx_ring *ring;

        if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
                return -ENETDOWN;

        if (!ice_is_xdp_ena_vsi(vsi))
                return -EINVAL;

        if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
                return -EINVAL;

        ring = vsi->rx_rings[queue_id]->xdp_ring;

        if (!READ_ONCE(ring->xsk_pool))
                return -EINVAL;

        /* The idea here is that if NAPI is running, mark a miss, so
         * it will run again. If not, trigger an interrupt and
         * schedule the NAPI from interrupt context. If NAPI would be
         * scheduled here, the interrupt affinity would not be
         * honored.
         */
        q_vector = ring->q_vector;
        if (!napi_if_scheduled_mark_missed(&q_vector->napi))
                ice_trigger_sw_intr(&vsi->back->hw, q_vector);

        return 0;
}

/**
 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
 * @vsi: VSI to be checked
 *
 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
 */
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
        int i;

        ice_for_each_rxq(vsi, i) {
                if (xsk_get_pool_from_qid(vsi->netdev, i))
                        return true;
        }

        return false;
}

/**
 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
 * @rx_ring: ring to be cleaned
 */
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
        u16 ntc = rx_ring->next_to_clean;
        u16 ntu = rx_ring->next_to_use;

        if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
                xdp_rxq_info_unreg(&rx_ring->xdp_rxq);

        while (ntc != ntu) {
                struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);

                xsk_buff_free(xdp);
                ntc++;
                if (ntc >= rx_ring->count)
                        ntc = 0;
        }
}

/**
 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
 * @xdp_ring: XDP_Tx ring
 */
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
{
        u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
        u32 xsk_frames = 0;

        while (ntc != ntu) {
                struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];

                if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
                        tx_buf->type = ICE_TX_BUF_EMPTY;
                        xsk_buff_free(tx_buf->xdp);
                } else {
                        xsk_frames++;
                }

                ntc++;
                if (ntc >= xdp_ring->count)
                        ntc = 0;
        }

        if (xsk_frames)
                xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}