#include "ice_iflib.h"
#include "ice_common_txrx.h"
static int _ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi);
static int _ice_ift_txd_credits_update(struct ice_softc *sc, struct ice_tx_queue *txq, bool clear);
static int _ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget);
static int _ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri);
static void _ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
uint64_t *paddrs, uint16_t count);
static void _ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq,
uint32_t pidx);
static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi);
static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri);
static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi);
static int ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear);
static int ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi);
static void ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx);
static int ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static int ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri);
static void ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru);
static void ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
#define RX_FLEX_NIC(desc, field) \
(((struct ice_32b_rx_flex_desc_nic *)desc)->field)
struct if_txrx ice_txrx = {
.ift_txd_encap = ice_ift_txd_encap,
.ift_txd_flush = ice_ift_txd_flush,
.ift_txd_credits_update = ice_ift_txd_credits_update,
.ift_rxd_available = ice_ift_rxd_available,
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
.ift_rxd_refill = ice_ift_rxd_refill,
.ift_rxd_flush = ice_ift_rxd_flush,
.ift_txq_select_v2 = ice_ift_queue_select,
};
struct if_txrx ice_subif_txrx = {
.ift_txd_credits_update = ice_ift_txd_credits_update_subif,
.ift_txd_encap = ice_ift_txd_encap_subif,
.ift_txd_flush = ice_ift_txd_flush_subif,
.ift_rxd_available = ice_ift_rxd_available_subif,
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get_subif,
.ift_rxd_refill = ice_ift_rxd_refill_subif,
.ift_rxd_flush = ice_ift_rxd_flush_subif,
.ift_txq_select_v2 = NULL,
};
static int
_ice_ift_txd_encap(struct ice_tx_queue *txq, if_pkt_info_t pi)
{
int nsegs = pi->ipi_nsegs;
bus_dma_segment_t *segs = pi->ipi_segs;
struct ice_tx_desc *txd = NULL;
int i, j, mask, pidx_last;
u32 cmd, off;
cmd = off = 0;
i = pi->ipi_pidx;
if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) {
if (pi->ipi_csum_flags & CSUM_TSO) {
if (ice_tso_detect_sparse(pi))
return (EFBIG);
i = ice_tso_setup(txq, pi);
}
ice_tx_setup_offload(txq, pi, &cmd, &off);
}
if (pi->ipi_mflags & M_VLANTAG)
cmd |= ICE_TX_DESC_CMD_IL2TAG1;
mask = txq->desc_count - 1;
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
txd = &txq->tx_base[i];
seglen = segs[j].ds_len;
txd->buf_addr = htole64(segs[j].ds_addr);
txd->cmd_type_offset_bsz =
htole64(ICE_TX_DESC_DTYPE_DATA
| ((u64)cmd << ICE_TXD_QW1_CMD_S)
| ((u64)off << ICE_TXD_QW1_OFFSET_S)
| ((u64)seglen << ICE_TXD_QW1_TX_BUF_SZ_S)
| ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S));
txq->stats.tx_bytes += seglen;
pidx_last = i;
i = (i+1) & mask;
}
#define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
txd->cmd_type_offset_bsz |=
htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S));
txq->tx_rsq[txq->tx_rs_pidx] = pidx_last;
txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask;
MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx);
pi->ipi_new_pidx = i;
++txq->stats.tx_packets;
return (0);
}
static int
ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
return _ice_ift_txd_encap(txq, pi);
}
static void
ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
struct ice_hw *hw = &sc->hw;
wr32(hw, txq->tail, pidx);
}
static int
_ice_ift_txd_credits_update(struct ice_softc *sc __unused, struct ice_tx_queue *txq, bool clear)
{
qidx_t processed = 0;
qidx_t cur, prev, ntxd, rs_cidx;
int32_t delta;
bool is_done;
rs_cidx = txq->tx_rs_cidx;
if (rs_cidx == txq->tx_rs_pidx)
return (0);
cur = txq->tx_rsq[rs_cidx];
MPASS(cur != QIDX_INVALID);
is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
if (!is_done)
return (0);
else if (clear == false)
return (1);
prev = txq->tx_cidx_processed;
ntxd = txq->desc_count;
do {
MPASS(prev != cur);
delta = (int32_t)cur - (int32_t)prev;
if (delta < 0)
delta += ntxd;
MPASS(delta > 0);
processed += delta;
prev = cur;
rs_cidx = (rs_cidx + 1) & (ntxd-1);
if (rs_cidx == txq->tx_rs_pidx)
break;
cur = txq->tx_rsq[rs_cidx];
MPASS(cur != QIDX_INVALID);
is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
} while (is_done);
txq->tx_rs_cidx = rs_cidx;
txq->tx_cidx_processed = prev;
return (processed);
}
static int
ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
return _ice_ift_txd_credits_update(sc, txq, clear);
}
static int
_ice_ift_rxd_available(struct ice_rx_queue *rxq, qidx_t pidx, qidx_t budget)
{
union ice_32b_rx_flex_desc *rxd;
uint16_t status0;
int cnt, i, nrxd;
nrxd = rxq->desc_count;
for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) {
rxd = &rxq->rx_base[i];
status0 = le16toh(rxd->wb.status_error0);
if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0)
break;
if (++i == nrxd)
i = 0;
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))
cnt++;
}
return (cnt);
}
static int
ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
return _ice_ift_rxd_available(rxq, pidx, budget);
}
static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
return _ice_ift_rxd_pkt_get(rxq, ri);
}
static int
_ice_ift_rxd_pkt_get(struct ice_rx_queue *rxq, if_rxd_info_t ri)
{
union ice_32b_rx_flex_desc *cur;
u16 status0, plen, ptype;
bool eop;
size_t cidx;
int i;
cidx = ri->iri_cidx;
i = 0;
do {
MPASS(i < ICE_MAX_RX_SEGS);
cur = &rxq->rx_base[cidx];
status0 = le16toh(cur->wb.status_error0);
plen = le16toh(cur->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0);
ri->iri_len += plen;
cur->wb.status_error0 = 0;
eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S));
ri->iri_frags[i].irf_flid = 0;
ri->iri_frags[i].irf_idx = cidx;
ri->iri_frags[i].irf_len = plen;
if (++cidx == rxq->desc_count)
cidx = 0;
i++;
} while (!eop);
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S)) {
rxq->stats.desc_errs++;
return (EBADMSG);
}
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
ri->iri_vtag = le16toh(cur->wb.l2tag1);
ri->iri_flags |= M_VLANTAG;
}
rxq->stats.rx_packets++;
rxq->stats.rx_bytes += ri->iri_len;
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
if ((if_getcapenable(ri->iri_ifp) & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
ri->iri_rsstype = ice_ptype_to_hash(ptype);
ri->iri_nfrags = i;
return (0);
}
static void
ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq;
uint64_t *paddrs;
uint32_t pidx;
uint16_t qsidx, count;
paddrs = iru->iru_paddrs;
pidx = iru->iru_pidx;
qsidx = iru->iru_qsidx;
count = iru->iru_count;
rxq = &(sc->pf_vsi.rx_queues[qsidx]);
_ice_ift_rxd_refill(rxq, pidx, paddrs, count);
}
static void
_ice_ift_rxd_refill(struct ice_rx_queue *rxq, uint32_t pidx,
uint64_t *paddrs, uint16_t count)
{
uint32_t next_pidx;
int i;
for (i = 0, next_pidx = pidx; i < count; i++) {
rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
if (++next_pidx == (uint32_t)rxq->desc_count)
next_pidx = 0;
}
}
static void
ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
qidx_t pidx)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
_ice_ift_rxd_flush(sc, rxq, (uint32_t)pidx);
}
static void
_ice_ift_rxd_flush(struct ice_softc *sc, struct ice_rx_queue *rxq, uint32_t pidx)
{
wr32(&sc->hw, rxq->tail, pidx);
}
static qidx_t
ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_vsi *vsi = &sc->pf_vsi;
u16 tc_base_queue, tc_qcount;
u8 up, tc;
#ifdef ALTQ
struct ifnet *ifp = (struct ifnet *)iflib_get_ifp(sc->ctx);
if (if_altq_is_enabled(ifp))
return (0);
#endif
if (!ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) {
if (M_HASHTYPE_GET(m)) {
return (m->m_pkthdr.flowid % sc->pf_vsi.num_tx_queues);
} else
return (0);
}
tc = 0;
local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg;
#if defined(INET) || defined(INET6)
if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) &&
(pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) {
u8 dscp_val = pi->ipi_ip_tos >> 2;
tc = local_dcbx_cfg->dscp_map[dscp_val];
} else
#endif
if (m->m_flags & M_VLANTAG) {
up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag);
tc = local_dcbx_cfg->etscfg.prio_table[up];
}
tc_base_queue = vsi->tc_info[tc].qoffset;
tc_qcount = vsi->tc_info[tc].qcount_tx;
if (M_HASHTYPE_GET(m))
return ((m->m_pkthdr.flowid % tc_qcount) + tc_base_queue);
else
return (tc_base_queue);
}
static int
ice_ift_txd_credits_update_subif(void *arg, uint16_t txqid, bool clear)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_softc *sc = mif->back;
struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
return _ice_ift_txd_credits_update(sc, txq, clear);
}
static int
ice_ift_txd_encap_subif(void *arg, if_pkt_info_t pi)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_tx_queue *txq = &mif->vsi->tx_queues[pi->ipi_qsidx];
return _ice_ift_txd_encap(txq, pi);
}
static void
ice_ift_txd_flush_subif(void *arg, uint16_t txqid, qidx_t pidx)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_tx_queue *txq = &mif->vsi->tx_queues[txqid];
struct ice_hw *hw = &mif->back->hw;
wr32(hw, txq->tail, pidx);
}
static int
ice_ift_rxd_available_subif(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
return _ice_ift_rxd_available(rxq, pidx, budget);
}
static int
ice_ift_rxd_pkt_get_subif(void *arg, if_rxd_info_t ri)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[ri->iri_qsidx];
return _ice_ift_rxd_pkt_get(rxq, ri);
}
static void
ice_ift_rxd_refill_subif(void *arg, if_rxd_update_t iru)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[iru->iru_qsidx];
uint64_t *paddrs;
uint32_t pidx;
uint16_t count;
paddrs = iru->iru_paddrs;
pidx = iru->iru_pidx;
count = iru->iru_count;
_ice_ift_rxd_refill(rxq, pidx, paddrs, count);
}
static void
ice_ift_rxd_flush_subif(void *arg, uint16_t rxqid, uint8_t flidx __unused,
qidx_t pidx)
{
struct ice_mirr_if *mif = (struct ice_mirr_if *)arg;
struct ice_rx_queue *rxq = &mif->vsi->rx_queues[rxqid];
_ice_ift_rxd_flush(mif->back, rxq, pidx);
}