ring_stats
struct ring_stats stats;
struct ring_stats ring_stats;
ring_stats = ring->stats;
stats->tx_bytes += ring_stats.tx_bytes;
stats->tx_packets += ring_stats.tx_pkts;
stats->tx_dropped += ring_stats.sw_err_cnt;
stats->tx_dropped += ring_stats.tx_vlan_err;
stats->tx_dropped += ring_stats.tx_l4_proto_err;
stats->tx_dropped += ring_stats.tx_l2l3l4_err;
stats->tx_dropped += ring_stats.tx_tso_err;
stats->tx_dropped += ring_stats.over_max_recursion;
stats->tx_dropped += ring_stats.hw_limitation;
stats->tx_dropped += ring_stats.copy_bits_err;
stats->tx_dropped += ring_stats.skb2sgl_err;
stats->tx_dropped += ring_stats.map_sg_err;
stats->tx_errors += ring_stats.sw_err_cnt;
stats->tx_errors += ring_stats.tx_vlan_err;
stats->tx_errors += ring_stats.tx_l4_proto_err;
stats->tx_errors += ring_stats.tx_l2l3l4_err;
stats->tx_errors += ring_stats.tx_tso_err;
stats->tx_errors += ring_stats.over_max_recursion;
stats->tx_errors += ring_stats.hw_limitation;
stats->tx_errors += ring_stats.copy_bits_err;
stats->tx_errors += ring_stats.skb2sgl_err;
stats->tx_errors += ring_stats.map_sg_err;
stats->rx_bytes += ring_stats.rx_bytes;
stats->rx_packets += ring_stats.rx_pkts;
stats->rx_dropped += ring_stats.l2_err;
stats->rx_errors += ring_stats.l2_err;
stats->rx_errors += ring_stats.l3l4_csum_err;
stats->rx_crc_errors += ring_stats.l2_err;
stats->multicast += ring_stats.rx_multicast;
stats->rx_length_errors += ring_stats.err_pkt_len;
struct ring_stats stats;
offsetof(struct ring_stats, _member), \
memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
if (!tx_ring || !tx_ring->ring_stats) {
if (!rx_ring || !rx_ring->ring_stats) {
u64_stats_update_begin(&tx_ring->ring_stats->syncp);
u64_stats_add(&tx_ring->ring_stats->pkts, pkts);
u64_stats_add(&tx_ring->ring_stats->bytes, bytes);
u64_stats_update_end(&tx_ring->ring_stats->syncp);
u64_stats_update_begin(&rx_ring->ring_stats->syncp);
u64_stats_add(&rx_ring->ring_stats->pkts, pkts);
u64_stats_add(&rx_ring->ring_stats->bytes, bytes);
u64_stats_update_end(&rx_ring->ring_stats->syncp);
start = u64_stats_fetch_begin(&ring->ring_stats->syncp);
*pkts = u64_stats_read(&ring->ring_stats->pkts);
*bytes = u64_stats_read(&ring->ring_stats->bytes);
} while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start));
start = u64_stats_fetch_begin(&ring->ring_stats->syncp);
*pkts = u64_stats_read(&ring->ring_stats->pkts);
*bytes = u64_stats_read(&ring->ring_stats->bytes);
} while (u64_stats_fetch_retry(&ring->ring_stats->syncp, start));
struct ice_ring_stats *ring_stats;
ring_stats = tx_ring_stats[i];
if (!ring_stats) {
ring_stats = kzalloc_obj(*ring_stats);
if (!ring_stats)
u64_stats_init(&ring_stats->syncp);
WRITE_ONCE(tx_ring_stats[i], ring_stats);
ring->ring_stats = ring_stats;
struct ice_ring_stats *ring_stats;
ring_stats = rx_ring_stats[i];
if (!ring_stats) {
ring_stats = kzalloc_obj(*ring_stats);
if (!ring_stats)
u64_stats_init(&ring_stats->syncp);
WRITE_ONCE(rx_ring_stats[i], ring_stats);
ring->ring_stats = ring_stats;
struct ice_ring_stats *ring_stats;
ring_stats = tx_ring->ring_stats;
if (!ring_stats)
packets = ice_stats_read(ring_stats, pkts) & INT_MAX;
if (ring_stats->tx.prev_pkt == packets) {
ring_stats->tx.prev_pkt =
struct ice_ring_stats *ring_stats;
ring_stats = kzalloc_obj(*ring_stats);
if (!ring_stats) {
xdp_ring->ring_stats = ring_stats;
kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
vsi->xdp_rings[i]->ring_stats = NULL;
kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
vsi->xdp_rings[i]->ring_stats = NULL;
struct ice_ring_stats *stats = ring->ring_stats;
struct ice_ring_stats *stats = ring->ring_stats;
if (!ring || !ring->ring_stats)
if (!ring || !ring->ring_stats)
ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
if (rx_ring->ring_stats)
if (!tx_ring->ring_stats)
if (!rx_ring->ring_stats)
ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
ice_stats_inc(tx_ring->ring_stats, tx_linearize);
ice_stats_inc(tx_ring->ring_stats, tx_busy);
ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
tx_ring->ring_stats->tx.prev_pkt = -1;
ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
struct ice_ring_stats *ring_stats;
struct ice_ring_stats *ring_stats;
ice_stats_inc(xdp_ring->ring_stats, tx_busy);
ice_stats_inc(rx_ring->ring_stats, rx_non_eop_descs);
ice_stats_inc(xdp_ring->ring_stats, tx_busy);
ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);