#ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_
#include <linux/dim.h>
#include <net/libeth/cache.h>
#include <net/libeth/types.h>
#include <net/netdev_queues.h>
#include <net/tcp.h>
#include <net/xdp.h>
#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h"
#define IDPF_LARGE_MAX_Q 256
#define IDPF_MAX_Q 16
#define IDPF_MIN_Q 2
#define IDPF_MAX_MBXQ 1
#define IDPF_MIN_TXQ_DESC 64
#define IDPF_MIN_RXQ_DESC 64
#define IDPF_MIN_TXQ_COMPLQ_DESC 256
#define IDPF_MAX_QIDS 256
#define IDPF_REQ_DESC_MULTIPLE 32
#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
#define IDPF_MAX_DESCS 8160
#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
#define MIN_SUPPORT_TXDID (\
VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
VIRTCHNL2_TXDID_FLEX_TSO_CTX)
#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
#define IDPF_COMPLQ_PER_GROUP 1
#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1
#define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
#define IDPF_BUFQ2_ENA 1
#define IDPF_NUMQ_PER_CHUNK 1
#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
#define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1
#define IDPF_MIN_RDMA_VEC 2
#define IDPF_RESERVED_VECS 1
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
#define IDPF_DFLT_RX_Q_DESC_COUNT 512
#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
#define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
#define IDPF_RX_BUMP_NTC(rxq, ntc) \
do { \
if (unlikely(++(ntc) == (rxq)->desc_count)) { \
ntc = 0; \
idpf_queue_change(GEN_CHK, rxq); \
} \
} while (0)
#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
do { \
if (unlikely(++(idx) == (q)->desc_count)) \
idx = 0; \
} while (0)
#define IDPF_RX_MAX_BUF_SZ (16384 - 128)
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
#define IDPF_TX_TSO_MIN_MSS 88
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
#define IDPF_RFL_BI_GEN_M BIT(16)
#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
#define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
#define IDPF_TX_COMPLQ_PENDING(txq) \
(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
#define IDPF_TXBUF_NULL U32_MAX
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
#define IDPF_TX_FLAGS_TSO BIT(0)
#define IDPF_TX_FLAGS_IPV4 BIT(1)
#define IDPF_TX_FLAGS_IPV6 BIT(2)
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
#define IDPF_TX_FLAGS_TSYN BIT(4)
struct libeth_rq_napi_stats;
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q;
struct idpf_flex_tx_sched_desc flow;
};
#define idpf_tx_buf libeth_sqe
struct idpf_tx_offload_params {
u32 tx_flags;
u32 hdr_offsets;
u32 cd_tunneling;
u32 tso_len;
u16 mss;
u16 tso_segs;
u16 tso_hdr_len;
u16 td_cmd;
};
struct idpf_tx_splitq_params {
enum idpf_tx_desc_dtype_value dtype;
u16 eop_cmd;
union {
u16 compl_tag;
u16 td_tag;
};
struct idpf_tx_offload_params offload;
u16 prev_ntu;
u16 prev_refill_ntc;
bool prev_refill_gen;
};
enum idpf_tx_ctx_desc_eipt_offload {
IDPF_TX_CTX_EXT_IP_NONE = 0x0,
IDPF_TX_CTX_EXT_IP_IPV6 = 0x1,
IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
};
#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
#define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
sizeof(struct idpf_flex_tx_desc))
#define IDPF_TX_DESCS_FOR_CTX 1
#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
IDPF_TX_DESCS_PER_CACHE_LINE + \
IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
#define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
#define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
#define idpf_rx_buf libeth_fqe
#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
(sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
#define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info)
#define IDPF_RX_MAX_PTYPES_PER_BUF \
DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
IDPF_RX_MAX_PTYPE_SZ)
#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
#define IDPF_TUN_IP_GRE (\
IDPF_PTYPE_TUNNEL_IP |\
IDPF_PTYPE_TUNNEL_IP_GRENAT)
#define IDPF_TUN_IP_GRE_MAC (\
IDPF_TUN_IP_GRE |\
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
#define IDPF_RX_MAX_PTYPE 1024
#define IDPF_RX_MAX_BASE_PTYPE 256
#define IDPF_INVALID_PTYPE_ID 0xFFFF
enum idpf_tunnel_state {
IDPF_PTYPE_TUNNEL_IP = BIT(0),
IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2),
};
struct idpf_ptype_state {
bool outer_ip:1;
bool outer_frag:1;
u8 tunnel_state:6;
};
enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK,
__IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_CRC_EN,
__IDPF_Q_RSC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
__IDPF_Q_NOIRQ,
__IDPF_Q_XDP,
__IDPF_Q_XSK,
__IDPF_Q_FLAGS_NBITS,
};
#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_has_clear(f, q) \
__test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
#define idpf_queue_assign(f, q, v) \
__assign_bit(__IDPF_Q_##f, (q)->flags, v)
struct idpf_vec_regs {
u32 dyn_ctl_reg;
u32 itrn_reg;
u32 itrn_index_spacing;
};
struct idpf_intr_reg {
void __iomem *dyn_ctl;
u32 dyn_ctl_intena_m;
u32 dyn_ctl_intena_msk_m;
u32 dyn_ctl_itridx_s;
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
u32 dyn_ctl_wb_on_itr_m;
u32 dyn_ctl_sw_itridx_ena_m;
u32 dyn_ctl_swint_trig_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
u32 icr_ena_ctlq_m;
};
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
struct idpf_vport *vport;
u16 num_rxq;
u16 num_txq;
u16 num_bufq;
u16 num_complq;
u16 num_xsksq;
struct idpf_rx_queue **rx;
struct idpf_tx_queue **tx;
struct idpf_buf_queue **bufq;
struct idpf_compl_queue **complq;
struct idpf_tx_queue **xsksq;
struct idpf_intr_reg intr_reg;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
call_single_data_t csd;
u16 total_events;
bool wb_on_itr;
struct napi_struct napi;
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
u32 tx_itr_idx;
struct dim rx_dim;
u16 rx_itr_value;
bool rx_intr_mode;
u32 rx_itr_idx;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u16 v_idx;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 136,
56 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
u64_stats_t bytes;
u64_stats_t rsc_pkts;
u64_stats_t hw_csum_err;
u64_stats_t hsplit_pkts;
u64_stats_t hsplit_buf_ovf;
u64_stats_t bad_descs;
};
struct idpf_tx_queue_stats {
u64_stats_t packets;
u64_stats_t bytes;
u64_stats_t lso_pkts;
u64_stats_t linearize;
u64_stats_t q_busy;
u64_stats_t skb_drops;
u64_stats_t dma_map_errs;
u64_stats_t tstamp_skipped;
};
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
#define IDPF_ITR_GRAN_S 1
#define IDPF_ITR_MASK 0x1FFE
#define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK)
#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
#define IDPF_ITR_RX_DEF IDPF_ITR_20K
#define IDPF_SW_ITR_UPDATE_IDX 2
#define IDPF_NO_ITR_UPDATE_IDX 3
#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
struct idpf_rx_queue {
__cacheline_group_begin_aligned(read_mostly);
union {
union virtchnl2_rx_desc *rx;
struct virtchnl2_singleq_rx_buf_desc *single_buf;
void *desc_ring;
};
union {
struct {
struct idpf_bufq_set *bufq_sets;
struct napi_struct *napi;
struct bpf_prog __rcu *xdp_prog;
};
struct {
struct libeth_fqe *rx_buf;
struct page_pool *pp;
void __iomem *tail;
};
};
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
u32 num_xdp_txq;
union {
struct idpf_tx_queue **xdpsqs;
struct {
u32 rxdids;
u32 truesize;
};
};
const struct libeth_rx_pt *rx_ptype_lkup;
struct xdp_rxq_info xdp_rxq;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
u32 next_to_alloc;
union {
struct libeth_xdp_buff_stash xdp;
struct {
struct libeth_xdp_buff *xsk;
struct xsk_buff_pool *pool;
};
};
u64 cached_phc_time;
struct u64_stats_sync stats_sync;
struct idpf_rx_queue_stats q_stats;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
u16 rx_buffer_low_watermark;
u16 rx_hbuf_size;
u16 rx_buf_size;
u16 rx_max_pkt_size;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_rx_queue,
ALIGN(64, __alignof(struct xdp_rxq_info)) +
sizeof(struct xdp_rxq_info),
96 + offsetof(struct idpf_rx_queue, q_stats) -
offsetofend(struct idpf_rx_queue, cached_phc_time),
32);
struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_mostly);
union {
struct idpf_base_tx_desc *base_tx;
struct idpf_base_tx_ctx_desc *base_ctx;
union idpf_tx_flex_desc *flex_tx;
union idpf_flex_tx_ctx_desc *flex_ctx;
void *desc_ring;
};
struct libeth_sqe *tx_buf;
union {
struct idpf_txq_group *txq_grp;
struct idpf_compl_queue *complq;
};
union {
struct device *dev;
struct xsk_buff_pool *pool;
};
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
union {
u16 tx_min_pkt_len;
u32 thresh;
};
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
union {
struct {
u16 last_re;
u16 tx_max_bufs;
union {
u32 cleaned_bytes;
u32 clean_budget;
};
u16 cleaned_pkts;
struct idpf_sw_queue *refillq;
};
struct {
u32 pending;
u32 xdp_tx;
struct libeth_xdpsq_timer *timer;
struct libeth_xdpsq_lock xdp_lock;
};
};
struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
struct work_struct *tstamp_task;
struct u64_stats_sync stats_sync;
struct idpf_tx_queue_stats q_stats;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
u32 buf_pool_size;
u32 rel_q_id;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
104 +
offsetof(struct idpf_tx_queue, cached_tstamp_caps) -
offsetofend(struct idpf_tx_queue, timer) +
offsetof(struct idpf_tx_queue, q_stats) -
offsetofend(struct idpf_tx_queue, tstamp_task),
32);
struct idpf_buf_queue {
__cacheline_group_begin_aligned(read_mostly);
struct virtchnl2_splitq_rx_buf_desc *split_buf;
union {
struct {
struct libeth_fqe *buf;
struct page_pool *pp;
};
struct {
struct libeth_xdp_buff **xsk_buf;
struct xsk_buff_pool *pool;
};
};
struct libeth_fqe *hdr_buf;
struct page_pool *hdr_pp;
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
u32 thresh;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
u32 next_to_alloc;
u32 pending;
u32 hdr_truesize;
u32 truesize;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
u16 rx_buffer_low_watermark;
u16 rx_hbuf_size;
u16 rx_buf_size;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
struct idpf_compl_queue {
__cacheline_group_begin_aligned(read_mostly);
union {
struct idpf_splitq_tx_compl_desc *comp;
struct idpf_splitq_4b_tx_compl_desc *comp_4b;
void *desc_ring;
};
struct idpf_txq_group *txq_grp;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
u32 clean_budget;
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
aligned_u64 num_completions;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
u32 q_id;
u32 size;
dma_addr_t dma;
struct idpf_q_vector *q_vector;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
struct idpf_sw_queue {
__cacheline_group_begin_aligned(read_mostly);
u32 *ring;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
u32 next_to_use;
u32 next_to_clean;
__cacheline_group_end_aligned(read_write);
};
libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
struct idpf_rxq_set {
struct idpf_rx_queue rxq;
struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
};
struct idpf_bufq_set {
struct idpf_buf_queue bufq;
int num_refillqs;
struct idpf_sw_queue *refillqs;
};
struct idpf_rxq_group {
struct idpf_vport *vport;
union {
struct {
u16 num_rxq;
struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
} singleq;
struct {
u16 num_rxq_sets;
u16 num_bufq_sets;
struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
struct idpf_bufq_set *bufq_sets;
} splitq;
};
};
struct idpf_txq_group {
struct idpf_vport *vport;
u16 num_txq;
struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
struct idpf_compl_queue *complq;
aligned_u64 num_completions_pending;
};
static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
{
u32 cpu;
if (!q_vector)
return NUMA_NO_NODE;
cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
static inline u32 idpf_size_to_txd_count(unsigned int size)
{
return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
}
static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
unsigned int size, u64 td_tag)
{
return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
(td_cmd << IDPF_TXD_QW1_CMD_S) |
(td_offset << IDPF_TXD_QW1_OFFSET_S) |
((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
(td_tag << IDPF_TXD_QW1_L2TAG1_S));
}
void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size);
void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size);
static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
else
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
{
struct idpf_intr_reg *reg;
if (q_vector->wb_on_itr)
return;
q_vector->wb_on_itr = true;
reg = &q_vector->intr_reg;
writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
(IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
reg->dyn_ctl);
}
static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
{
return (refillq->next_to_use > refillq->next_to_clean ?
0 : refillq->desc_count) +
refillq->next_to_use - refillq->next_to_clean - 1;
}
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg,
struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q);
void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_queues_alloc(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_queues_rel(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_intr_alloc(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_intr_init(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_ena(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
struct idpf_rss_data *rss_data);
int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data);
int idpf_rx_bufs_init_all(struct idpf_vport *vport,
struct idpf_q_vec_rsrc *rsrc);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num);
struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
u32 q_num);
int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb, u32 *buf_count);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
bool idpf_rx_process_skb_fields(struct sk_buff *skb,
const struct libeth_xdp_buff *xdp,
struct libeth_rq_napi_stats *rs);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);
#endif