cqe_bcnt
u16 cqe_bcnt,
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
u32 cqe_bcnt)
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
u16 cqe_bcnt,
u32 cqe_bcnt);
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
stats->tls_decrypted_bytes += *cqe_bcnt;
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
u32 *cqe_bcnt)
u32 cqe_bcnt,
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
stats->lro_bytes += cqe_bcnt;
u32 cqe_bcnt,
stats->bytes += cqe_bcnt;
if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
u32 cqe_bcnt,
stats->bytes += cqe_bcnt;
return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
u32 cqe_bcnt, u32 metasize)
skb_put(skb, cqe_bcnt);
struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
cqe_bcnt, mxbuf);
cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
cqe_bcnt -= frag_consumed_bytes;
while (cqe_bcnt) {
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
cqe_bcnt -= frag_consumed_bytes;
u32 cqe_bcnt;
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
rq, wi, cqe, cqe_bcnt);
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
u32 cqe_bcnt;
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
rq, wi, cqe, cqe_bcnt);
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
u16 cqe_bcnt;
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
u32 byte_cnt = cqe_bcnt;
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
cqe_bcnt, mxbuf);
cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
cqe_bcnt,
skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
u16 cqe_bcnt;
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
rq, wi, cqe, cqe_bcnt, head_offset,
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
u32 cqe_bcnt,
stats->bytes += cqe_bcnt;
u32 cqe_bcnt;
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
rq, wi, cqe, cqe_bcnt);
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
u32 cqe_bcnt;
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
u32 cqe_bcnt)
tot_len = cqe_bcnt - network_depth;