#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/taskqueue.h>
#include <sys/sysctl.h>
#include <sys/buf_ring.h>
#include <sys/smp.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/atomic.h>
#include <machine/vmparam.h>
#include <net/ethernet.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <dev/pci/pcivar.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/mdio/mdio.h>
#include "opt_acpi.h"
#include "opt_platform.h"
#include "pcib_if.h"
#include "pci_if.h"
#include "miibus_if.h"
#include "memac_mdio_if.h"
#include "dpaa2_types.h"
#include "dpaa2_mc.h"
#include "dpaa2_mc_if.h"
#include "dpaa2_mcp.h"
#include "dpaa2_swp.h"
#include "dpaa2_swp_if.h"
#include "dpaa2_cmd_if.h"
#include "dpaa2_ni.h"
#include "dpaa2_channel.h"
#include "dpaa2_buf.h"
#include "dpaa2_frame.h"
#define BIT(x) (1ul << (x))
#define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0)
#define ALIGN_UP(x, y) roundup2((x), (y))
#define ALIGN_DOWN(x, y) rounddown2((x), (y))
#define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE)
#define DPNI_LOCK(__sc) do { \
mtx_assert(&(__sc)->lock, MA_NOTOWNED); \
mtx_lock(&(__sc)->lock); \
} while (0)
#define DPNI_UNLOCK(__sc) do { \
mtx_assert(&(__sc)->lock, MA_OWNED); \
mtx_unlock(&(__sc)->lock); \
} while (0)
#define DPNI_LOCK_ASSERT(__sc) do { \
mtx_assert(&(__sc)->lock, MA_OWNED); \
} while (0)
#define DPAA2_TX_RING(sc, chan, tc) \
(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
#define DPAA2_CLEAN_BUDGET 64
#define DPAA2_TX_BUDGET 128
#define DPAA2_RX_BUDGET 256
#define DPNI_IRQ_INDEX 0
#define DPNI_IRQ_LINK_CHANGED 1
#define DPNI_IRQ_EP_CHANGED 2
#define DPAA2_ETH_MFL (ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN - \
ETHER_CRC_LEN)
#define DPNI_VER_MAJOR 7
#define DPNI_VER_MINOR 0
#define BUF_ALIGN_V1 256
#define BUF_ALIGN 64
#define BUF_SWA_SIZE 64
#define BUF_RX_HWA_SIZE 64
#define BUF_TX_HWA_SIZE 128
#define DPAA2_RX_BUFRING_SZ (4096u)
#define DPAA2_RXE_BUFRING_SZ (1024u)
#define DPAA2_TXC_BUFRING_SZ (4096u)
#define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE)
#define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
#define BUF_LOPT_TIMESTAMP 0x1
#define BUF_LOPT_PARSER_RESULT 0x2
#define BUF_LOPT_FRAME_STATUS 0x4
#define BUF_LOPT_PRIV_DATA_SZ 0x8
#define BUF_LOPT_DATA_ALIGN 0x10
#define BUF_LOPT_DATA_HEAD_ROOM 0x20
#define BUF_LOPT_DATA_TAIL_ROOM 0x40
#define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul)
#define DPAA2_NI_BUF_CHAN_MASK (0xFu)
#define DPAA2_NI_BUF_CHAN_SHIFT (60)
#define DPAA2_NI_BUF_IDX_MASK (0x7FFFu)
#define DPAA2_NI_BUF_IDX_SHIFT (49)
#define DPAA2_NI_TX_IDX_MASK (0x7u)
#define DPAA2_NI_TX_IDX_SHIFT (57)
#define DPAA2_NI_TXBUF_IDX_MASK (0xFFu)
#define DPAA2_NI_TXBUF_IDX_SHIFT (49)
#define DPNI_OPT_HAS_KEY_MASKING 0x10
#define DPAA2_ETH_DIST_ETHDST BIT(0)
#define DPAA2_ETH_DIST_ETHSRC BIT(1)
#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
#define DPAA2_ETH_DIST_VLAN BIT(3)
#define DPAA2_ETH_DIST_IPSRC BIT(4)
#define DPAA2_ETH_DIST_IPDST BIT(5)
#define DPAA2_ETH_DIST_IPPROTO BIT(6)
#define DPAA2_ETH_DIST_L4SRC BIT(7)
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
#define RXH_L2DA (1 << 1)
#define RXH_VLAN (1 << 2)
#define RXH_L3_PROTO (1 << 3)
#define RXH_IP_SRC (1 << 4)
#define RXH_IP_DST (1 << 5)
#define RXH_L4_B_0_1 (1 << 6)
#define RXH_L4_B_2_3 (1 << 7)
#define RXH_DISCARD (1 << 31)
#define DPAA2_CSUM_TX_OFFLOAD (CSUM_IP | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)
#define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
struct resource_spec dpaa2_ni_spec[] = {
{ DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
{ DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
RESOURCE_SPEC_END
};
static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
.rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA,
.id = DPAA2_ETH_DIST_ETHDST,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
.id = DPAA2_ETH_DIST_ETHSRC,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
.id = DPAA2_ETH_DIST_ETHTYPE,
.size = 2,
}, {
.rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN,
.cls_field = NH_FLD_VLAN_TCI,
.id = DPAA2_ETH_DIST_VLAN,
.size = 2,
}, {
.rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_SRC,
.id = DPAA2_ETH_DIST_IPSRC,
.size = 4,
}, {
.rxnfc_field = RXH_IP_DST,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_DST,
.id = DPAA2_ETH_DIST_IPDST,
.size = 4,
}, {
.rxnfc_field = RXH_L3_PROTO,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_PROTO,
.id = DPAA2_ETH_DIST_IPPROTO,
.size = 1,
}, {
.rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC,
.id = DPAA2_ETH_DIST_L4SRC,
.size = 2,
}, {
.rxnfc_field = RXH_L4_B_2_3,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_DST,
.id = DPAA2_ETH_DIST_L4DST,
.size = 2,
},
};
static struct dpni_stat {
int page;
int cnt;
char *name;
char *desc;
} dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
{ 0, 0, "in_all_frames", "All accepted ingress frames" },
{ 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" },
{ 0, 2, "in_multi_frames", "Multicast accepted ingress frames" },
{ 1, 0, "eg_all_frames", "All egress frames transmitted" },
{ 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" },
{ 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" },
{ 2, 0, "in_filtered_frames", "All ingress frames discarded due to "
"filtering" },
{ 2, 1, "in_discarded_frames", "All frames discarded due to errors" },
{ 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer "
"depletion in DPNI buffer pools" },
};
struct dpaa2_ni_rx_ctx {
struct mbuf *head;
struct mbuf *tail;
int cnt;
bool last;
};
static int dpaa2_ni_probe(device_t);
static int dpaa2_ni_attach(device_t);
static int dpaa2_ni_detach(device_t);
static int dpaa2_ni_setup(device_t);
static int dpaa2_ni_setup_channels(device_t);
static int dpaa2_ni_bind(device_t);
static int dpaa2_ni_setup_rx_dist(device_t);
static int dpaa2_ni_setup_irqs(device_t);
static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
static int dpaa2_ni_set_buf_layout(device_t);
static int dpaa2_ni_set_pause_frame(device_t);
static int dpaa2_ni_set_qos_table(device_t);
static int dpaa2_ni_set_mac_addr(device_t);
static int dpaa2_ni_set_hash(device_t, uint64_t);
static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
static void dpaa2_ni_init(void *);
static int dpaa2_ni_transmit(if_t , struct mbuf *);
static void dpaa2_ni_qflush(if_t );
static int dpaa2_ni_ioctl(if_t , u_long, caddr_t);
static int dpaa2_ni_update_mac_filters(if_t );
static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
static void dpaa2_ni_intr(void *);
static void dpaa2_ni_miibus_statchg(device_t);
static int dpaa2_ni_media_change(if_t );
static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
static void dpaa2_ni_media_tick(void *);
static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
struct dpaa2_ni_tx_ring *, struct mbuf *);
static void dpaa2_ni_cleanup_task(void *, int);
static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
uint32_t *);
static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *);
static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
struct dpaa2_fd *);
static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
static int
dpaa2_ni_probe(device_t dev)
{
device_set_desc(dev, "DPAA2 Network Interface");
return (BUS_PROBE_DEFAULT);
}
static int
dpaa2_ni_attach(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
device_t mcp_dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *mcp_dinfo;
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
if_t ifp;
char tq_name[32];
int error;
sc->dev = dev;
sc->ifp = NULL;
sc->miibus = NULL;
sc->mii = NULL;
sc->media_status = 0;
sc->if_flags = 0;
sc->link_state = LINK_STATE_UNKNOWN;
sc->buf_align = 0;
sc->rx_anomaly_frames = 0;
sc->rx_single_buf_frames = 0;
sc->rx_sg_buf_frames = 0;
sc->rx_enq_rej_frames = 0;
sc->rx_ieoi_err_frames = 0;
sc->tx_single_buf_frames = 0;
sc->tx_sg_frames = 0;
DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
sc->rxd_dmat = NULL;
sc->qos_dmat = NULL;
sc->qos_kcfg.dmap = NULL;
sc->qos_kcfg.paddr = 0;
sc->qos_kcfg.vaddr = NULL;
sc->rxd_kcfg.dmap = NULL;
sc->rxd_kcfg.paddr = 0;
sc->rxd_kcfg.vaddr = NULL;
sc->mac.dpmac_id = 0;
sc->mac.phy_dev = NULL;
memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
if (error) {
device_printf(dev, "%s: failed to allocate resources: "
"error=%d\n", __func__, error);
goto err_exit;
}
mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
mcp_dinfo = device_get_ivars(mcp_dev);
dinfo->portal = mcp_dinfo->portal;
mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
ifp = if_alloc(IFT_ETHER);
sc->ifp = ifp;
if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
if_setinitfn(ifp, dpaa2_ni_init);
if_setioctlfn(ifp, dpaa2_ni_ioctl);
if_settransmitfn(ifp, dpaa2_ni_transmit);
if_setqflushfn(ifp, dpaa2_ni_qflush);
if_sethwassist(sc->ifp, DPAA2_CSUM_TX_OFFLOAD);
if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU);
if_setcapenable(ifp, if_getcapabilities(ifp));
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
bzero(tq_name, sizeof(tq_name));
snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
taskqueue_thread_enqueue, &sc->bp_taskq);
taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
error = dpaa2_ni_setup(dev);
if (error) {
device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
__func__, error);
goto close_ni;
}
error = dpaa2_ni_setup_channels(dev);
if (error) {
device_printf(dev, "%s: failed to setup QBMan channels: "
"error=%d\n", __func__, error);
goto close_ni;
}
error = dpaa2_ni_bind(dev);
if (error) {
device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
__func__, error);
goto close_ni;
}
error = dpaa2_ni_setup_irqs(dev);
if (error) {
device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
__func__, error);
goto close_ni;
}
error = dpaa2_ni_setup_sysctls(sc);
if (error) {
device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
__func__, error);
goto close_ni;
}
error = dpaa2_ni_setup_if_caps(sc);
if (error) {
device_printf(dev, "%s: failed to setup interface capabilities: "
"error=%d\n", __func__, error);
goto close_ni;
}
ether_ifattach(sc->ifp, sc->mac.addr);
callout_init(&sc->mii_callout, 0);
return (0);
close_ni:
DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (ENXIO);
}
static void
dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
DPNI_LOCK(sc);
ifmr->ifm_count = 0;
ifmr->ifm_mask = 0;
ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
ifmr->ifm_current = ifmr->ifm_active =
sc->fixed_ifmedia.ifm_cur->ifm_media;
if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
if_link_state_change(ifp, LINK_STATE_UP);
}
DPNI_UNLOCK(sc);
return;
}
static void
dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
{
sc->fixed_link = true;
ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
dpaa2_ni_fixed_media_status);
ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
}
static int
dpaa2_ni_detach(device_t dev)
{
return (0);
}
static int
dpaa2_ni_setup(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ep_desc ep1_desc, ep2_desc;
struct dpaa2_cmd cmd;
uint8_t eth_bca[ETHER_ADDR_LEN];
uint16_t rc_token, ni_token, mac_token;
struct dpaa2_mac_attr attr;
enum dpaa2_mac_link_type link_type;
uint32_t link;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
&sc->api_minor);
if (error) {
device_printf(dev, "%s: failed to get DPNI API version\n",
__func__);
goto close_ni;
}
if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
device_printf(dev, "%s: DPNI API version %u.%u not supported, "
"need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
DPNI_VER_MAJOR, DPNI_VER_MINOR);
error = ENODEV;
goto close_ni;
}
error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
if (error) {
device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
__func__, dinfo->id);
goto close_ni;
}
error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
if (error) {
device_printf(dev, "%s: failed to obtain DPNI attributes: "
"id=%d\n", __func__, dinfo->id);
goto close_ni;
}
if (bootverbose) {
device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
"wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
sc->attr.num.channels, sc->attr.wriop_ver);
device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
"cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
sc->attr.num.cgs);
device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
"fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
sc->attr.entries.qos, sc->attr.entries.fs);
device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
sc->attr.key_size.qos, sc->attr.key_size.fs);
}
error = dpaa2_ni_set_buf_layout(dev);
if (error) {
device_printf(dev, "%s: failed to configure buffer layout\n",
__func__);
goto close_ni;
}
error = dpaa2_ni_setup_dma(sc);
if (error) {
device_printf(dev, "%s: failed to setup DMA\n", __func__);
goto close_ni;
}
ep1_desc.obj_id = dinfo->id;
ep1_desc.if_id = 0;
ep1_desc.type = dinfo->dtype;
error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
&ep1_desc, &ep2_desc, &link);
if (error) {
device_printf(dev, "%s: failed to obtain an object DPNI is "
"connected to: error=%d\n", __func__, error);
} else {
device_printf(dev, "connected to %s (id=%d)\n",
dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
error = dpaa2_ni_set_mac_addr(dev);
if (error) {
device_printf(dev, "%s: failed to set MAC address: "
"error=%d\n", __func__, error);
}
if (ep2_desc.type == DPAA2_DEV_MAC) {
sc->mac.dpmac_id = ep2_desc.obj_id;
link_type = DPAA2_MAC_LINK_TYPE_NONE;
error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
&mac_token);
if (error) {
device_printf(dev, "%s: failed to open "
"connected DPMAC: %d (assuming in other DPRC)\n", __func__,
sc->mac.dpmac_id);
link_type = DPAA2_MAC_LINK_TYPE_FIXED;
} else {
error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
&cmd, &attr);
if (error) {
device_printf(dev, "%s: failed to get "
"DPMAC attributes: id=%d, "
"error=%d\n", __func__, dinfo->id,
error);
} else {
link_type = attr.link_type;
}
}
DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
device_printf(dev, "connected DPMAC is in FIXED "
"mode\n");
dpaa2_ni_setup_fixed_link(sc);
} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
device_printf(dev, "connected DPMAC is in PHY "
"mode\n");
error = DPAA2_MC_GET_PHY_DEV(dev,
&sc->mac.phy_dev, sc->mac.dpmac_id);
if (error == 0) {
error = MEMAC_MDIO_SET_NI_DEV(
sc->mac.phy_dev, dev);
if (error != 0) {
device_printf(dev, "%s: failed "
"to set dpni dev on memac "
"mdio dev %s: error=%d\n",
__func__,
device_get_nameunit(
sc->mac.phy_dev), error);
}
}
if (error == 0) {
error = MEMAC_MDIO_GET_PHY_LOC(
sc->mac.phy_dev, &sc->mac.phy_loc);
if (error == ENODEV) {
error = 0;
}
if (error != 0) {
device_printf(dev, "%s: failed "
"to get phy location from "
"memac mdio dev %s: error=%d\n",
__func__, device_get_nameunit(
sc->mac.phy_dev), error);
}
}
if (error == 0) {
error = mii_attach(sc->mac.phy_dev,
&sc->miibus, sc->ifp,
dpaa2_ni_media_change,
dpaa2_ni_media_status,
BMSR_DEFCAPMASK, sc->mac.phy_loc,
MII_OFFSET_ANY, 0);
if (error != 0) {
device_printf(dev, "%s: failed "
"to attach to miibus: "
"error=%d\n",
__func__, error);
}
}
if (error == 0) {
sc->mii = device_get_softc(sc->miibus);
}
} else {
device_printf(dev, "%s: DPMAC link type is not "
"supported\n", __func__);
}
} else if (ep2_desc.type == DPAA2_DEV_NI ||
ep2_desc.type == DPAA2_DEV_MUX ||
ep2_desc.type == DPAA2_DEV_SW) {
dpaa2_ni_setup_fixed_link(sc);
}
}
error = dpaa2_ni_set_pause_frame(dev);
if (error) {
device_printf(dev, "%s: failed to configure Rx/Tx pause "
"frames\n", __func__);
goto close_ni;
}
error = dpaa2_ni_set_qos_table(dev);
if (error) {
device_printf(dev, "%s: failed to configure QoS table: "
"error=%d\n", __func__, error);
goto close_ni;
}
memset(eth_bca, 0xff, ETHER_ADDR_LEN);
error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
ni_token), eth_bca);
if (error) {
device_printf(dev, "%s: failed to add broadcast physical "
"address to the MAC filtering table\n", __func__);
goto close_ni;
}
error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
if (error) {
device_printf(dev, "%s: failed to set maximum length for "
"received frames\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_channels(device_t dev)
{
device_t iodev, condev, bpdev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
uint32_t i, num_chan;
int error;
for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
if (!sc->res[DPAA2_NI_IO_RID(i)]) {
break;
}
}
num_chan = i;
for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
if (!sc->res[DPAA2_NI_CON_RID(i)]) {
break;
}
}
num_chan = i < num_chan ? i : num_chan;
sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
? DPAA2_MAX_CHANNELS : num_chan;
sc->chan_n = sc->chan_n > sc->attr.num.queues
? sc->attr.num.queues : sc->chan_n;
KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
"chan_n=%d", __func__, sc->chan_n));
device_printf(dev, "channels=%d\n", sc->chan_n);
for (i = 0; i < sc->chan_n; i++) {
iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
&sc->channels[i], i, dpaa2_ni_cleanup_task);
if (error != 0) {
device_printf(dev, "%s: dpaa2_chan_setup() failed: "
"error=%d, chan_id=%d\n", __func__, error, i);
return (error);
}
}
error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
if (error != 0) {
device_printf(dev, "%s: failed to prepare RxError queue: "
"error=%d\n", __func__, error);
return (error);
}
return (0);
}
static int
dpaa2_ni_bind(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
device_t bp_dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *bp_info;
struct dpaa2_cmd cmd;
struct dpaa2_ni_pools_cfg pools_cfg;
struct dpaa2_ni_err_cfg err_cfg;
struct dpaa2_channel *chan;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bp_info = device_get_ivars(bp_dev);
pools_cfg.pools_num = 1;
pools_cfg.pools[0].bp_obj_id = bp_info->id;
pools_cfg.pools[0].backup_flag = 0;
pools_cfg.pools[0].buf_sz = sc->buf_sz;
error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
if (error) {
device_printf(dev, "%s: failed to set buffer pools\n", __func__);
goto close_ni;
}
error = dpaa2_ni_setup_rx_dist(dev);
if (error && error != EOPNOTSUPP) {
device_printf(dev, "%s: failed to setup ingress traffic "
"distribution\n", __func__);
goto close_ni;
}
if (bootverbose && error == EOPNOTSUPP) {
device_printf(dev, "Ingress traffic distribution not "
"supported\n");
}
err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
err_cfg.set_err_fas = false;
err_cfg.action = DPAA2_NI_ERR_DISCARD;
error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
if (error) {
device_printf(dev, "%s: failed to set errors behavior\n",
__func__);
goto close_ni;
}
for (uint32_t i = 0; i < sc->chan_n; i++) {
chan = sc->channels[i];
for (uint32_t j = 0; j < chan->rxq_n; j++) {
error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
if (error) {
device_printf(dev, "%s: failed to setup Rx "
"flow: error=%d\n", __func__, error);
goto close_ni;
}
}
error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
if (error) {
device_printf(dev, "%s: failed to setup Tx "
"flow: error=%d\n", __func__, error);
goto close_ni;
}
}
error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
if (error) {
device_printf(dev, "%s: failed to setup RxError flow: "
"error=%d\n", __func__, error);
goto close_ni;
}
error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
&sc->tx_qdid);
if (error) {
device_printf(dev, "%s: failed to get Tx queuing destination "
"ID\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_rx_dist(device_t dev)
{
return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
}
static int
dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *con_info;
struct dpaa2_cmd cmd;
struct dpaa2_ni_queue_cfg queue_cfg = {0};
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
con_info = device_get_ivars(fq->chan->con_dev);
queue_cfg.type = DPAA2_NI_QUEUE_RX;
queue_cfg.tc = fq->tc;
queue_cfg.idx = fq->flowid;
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain Rx queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
fq->fqid = queue_cfg.fqid;
queue_cfg.dest_id = con_info->id;
queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
queue_cfg.priority = 1;
queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
queue_cfg.options =
DPAA2_NI_QUEUE_OPT_USER_CTX |
DPAA2_NI_QUEUE_OPT_DEST;
error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to update Rx queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
if (bootverbose) {
device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
"user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
fq->fqid, (uint64_t) fq);
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_channel *ch = fq->chan;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *con_info;
struct dpaa2_ni_queue_cfg queue_cfg = {0};
struct dpaa2_ni_tx_ring *tx;
struct dpaa2_buf *buf;
struct dpaa2_cmd cmd;
uint32_t tx_rings_n = 0;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
con_info = device_get_ivars(fq->chan->con_dev);
KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
sc->attr.num.tx_tcs));
KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
("%s: too many Tx buffers (%d): max=%d\n", __func__,
DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
queue_cfg.type = DPAA2_NI_QUEUE_TX;
queue_cfg.tc = i;
queue_cfg.idx = fq->flowid;
queue_cfg.chan_id = fq->chan->id;
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain Tx queue "
"configuration: tc=%d, flowid=%d\n", __func__,
queue_cfg.tc, queue_cfg.idx);
goto close_ni;
}
tx = &fq->tx_rings[i];
tx->fq = fq;
tx->fqid = queue_cfg.fqid;
tx->txid = tx_rings_n;
if (bootverbose) {
device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
"fqid=%d\n", fq->flowid, i, fq->chan->id,
queue_cfg.fqid);
}
mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
&tx->lock);
if (tx->br == NULL) {
device_printf(dev, "%s: failed to setup Tx ring buffer"
" (2) fqid=%d\n", __func__, tx->fqid);
goto close_ni;
}
for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
error = dpaa2_buf_seed_txb(dev, buf);
buf_ring_enqueue(tx->br, buf);
}
tx_rings_n++;
}
fq->tx_qdbin = queue_cfg.qdbin;
queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
queue_cfg.tc = 0;
queue_cfg.idx = fq->flowid;
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain TxConf queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
fq->fqid = queue_cfg.fqid;
queue_cfg.dest_id = con_info->id;
queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
queue_cfg.priority = 0;
queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
queue_cfg.options =
DPAA2_NI_QUEUE_OPT_USER_CTX |
DPAA2_NI_QUEUE_OPT_DEST;
error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to update TxConf queue "
"configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
queue_cfg.idx);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_devinfo *con_info;
struct dpaa2_ni_queue_cfg queue_cfg = {0};
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
con_info = device_get_ivars(fq->chan->con_dev);
queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
queue_cfg.tc = fq->tc;
queue_cfg.idx = fq->flowid;
error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain RxErr queue "
"configuration\n", __func__);
goto close_ni;
}
fq->fqid = queue_cfg.fqid;
queue_cfg.dest_id = con_info->id;
queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
queue_cfg.priority = 1;
queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
queue_cfg.options =
DPAA2_NI_QUEUE_OPT_USER_CTX |
DPAA2_NI_QUEUE_OPT_DEST;
error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
if (error) {
device_printf(dev, "%s: failed to update RxErr queue "
"configuration\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_irqs(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = dpaa2_ni_setup_msi(sc);
if (error) {
device_printf(dev, "%s: failed to allocate MSI\n", __func__);
goto close_ni;
}
if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
device_printf(dev, "%s: failed to allocate IRQ resource\n",
__func__);
goto close_ni;
}
if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
NULL, dpaa2_ni_intr, sc, &sc->intr)) {
device_printf(dev, "%s: failed to setup IRQ resource\n",
__func__);
goto close_ni;
}
error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
if (error) {
device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
__func__);
goto close_ni;
}
error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
true);
if (error) {
device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
{
int val;
val = pci_msi_count(sc->dev);
if (val < DPAA2_NI_MSI_COUNT)
device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
DPAA2_IO_MSI_COUNT);
val = MIN(val, DPAA2_NI_MSI_COUNT);
if (pci_alloc_msi(sc->dev, &val) != 0)
return (EINVAL);
for (int i = 0; i < val; i++)
sc->irq_rid[i] = i + 1;
return (0);
}
static int
dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
{
bool en_rxcsum, en_txcsum;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
en_rxcsum = if_getcapenable(sc->ifp) &
(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
en_txcsum = if_getcapenable(sc->ifp) &
(IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
if (error) {
device_printf(dev, "%s: failed to %s L3 checksum validation\n",
__func__, en_rxcsum ? "enable" : "disable");
goto close_ni;
}
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
if (error) {
device_printf(dev, "%s: failed to %s L4 checksum validation\n",
__func__, en_rxcsum ? "enable" : "disable");
goto close_ni;
}
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
if (error) {
device_printf(dev, "%s: failed to %s L3 checksum generation\n",
__func__, en_txcsum ? "enable" : "disable");
goto close_ni;
}
error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
if (error) {
device_printf(dev, "%s: failed to %s L4 checksum generation\n",
__func__, en_txcsum ? "enable" : "disable");
goto close_ni;
}
if (bootverbose) {
device_printf(dev, "%s: L3/L4 checksum validation %s\n",
__func__, en_rxcsum ? "enabled" : "disabled");
device_printf(dev, "%s: L3/L4 checksum generation %s\n",
__func__, en_txcsum ? "enabled" : "disabled");
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
{
const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
en_promisc ? true : en_allmulti);
if (error) {
device_printf(dev, "%s: failed to %s multicast promiscuous "
"mode\n", __func__, en_allmulti ? "enable" : "disable");
goto close_ni;
}
error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
if (error) {
device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
__func__, en_promisc ? "enable" : "disable");
goto close_ni;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (0);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid *node, *node2;
struct sysctl_oid_list *parent, *parent2;
char cbuf[128];
int i;
ctx = device_get_sysctl_ctx(sc->dev);
parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
parent = SYSCTL_CHILDREN(node);
for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
"IU", dpni_stat_sysctls[i].desc);
}
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
CTLFLAG_RD, &sc->rx_anomaly_frames,
"Rx frames in the buffers outside of the buffer pools");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
CTLFLAG_RD, &sc->rx_single_buf_frames,
"Rx frames in single buffers");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
CTLFLAG_RD, &sc->rx_sg_buf_frames,
"Rx frames in scatter/gather list");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
CTLFLAG_RD, &sc->rx_enq_rej_frames,
"Enqueue rejected by QMan");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
CTLFLAG_RD, &sc->rx_ieoi_err_frames,
"QMan IEOI error");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
CTLFLAG_RD, &sc->tx_single_buf_frames,
"Tx single buffer frames");
SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
CTLFLAG_RD, &sc->tx_sg_frames,
"Tx S/G frames");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
"IU", "number of Rx buffers in the buffer pool");
SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
"IU", "number of free Rx buffers in the buffer pool");
parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
parent = SYSCTL_CHILDREN(node);
for (int i = 0; i < sc->chan_n; i++) {
snprintf(cbuf, sizeof(cbuf), "%d", i);
node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
parent2 = SYSCTL_CHILDREN(node2);
SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
CTLFLAG_RD, &sc->channels[i]->tx_frames,
"Tx frames counter");
SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
CTLFLAG_RD, &sc->channels[i]->tx_dropped,
"Tx dropped counter");
}
return (0);
}
static int
dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
{
device_t dev = sc->dev;
int error;
KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
("unexpected buffer alignment: %d\n", sc->buf_align));
error = bus_dma_tag_create(
bus_get_dma_tag(dev),
PAGE_SIZE, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
DPAA2_CLASSIFIER_DMA_SIZE, 1,
DPAA2_CLASSIFIER_DMA_SIZE, 0,
NULL, NULL,
&sc->rxd_dmat);
if (error) {
device_printf(dev, "%s: failed to create DMA tag for Rx "
"distribution key\n", __func__);
return (error);
}
error = bus_dma_tag_create(
bus_get_dma_tag(dev),
PAGE_SIZE, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
ETH_QOS_KCFG_BUF_SIZE, 1,
ETH_QOS_KCFG_BUF_SIZE, 0,
NULL, NULL,
&sc->qos_dmat);
if (error) {
device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
__func__);
return (error);
}
return (0);
}
static int
dpaa2_ni_set_buf_layout(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_ni_buf_layout buf_layout = {0};
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
? BUF_ALIGN_V1 : BUF_ALIGN;
sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
if (bootverbose) {
device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
sc->buf_sz, sc->buf_align);
}
buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
buf_layout.pd_size = BUF_SWA_SIZE;
buf_layout.pass_timestamp = true;
buf_layout.pass_frame_status = true;
buf_layout.options =
BUF_LOPT_PRIV_DATA_SZ |
BUF_LOPT_TIMESTAMP |
BUF_LOPT_FRAME_STATUS;
error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
if (error) {
device_printf(dev, "%s: failed to set Tx buffer layout\n",
__func__);
goto close_ni;
}
buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
buf_layout.options =
BUF_LOPT_TIMESTAMP |
BUF_LOPT_FRAME_STATUS;
error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
if (error) {
device_printf(dev, "%s: failed to set TxConf buffer layout\n",
__func__);
goto close_ni;
}
error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
if (error) {
device_printf(dev, "%s: failed to obtain Tx data offset\n",
__func__);
goto close_ni;
}
if (bootverbose) {
device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
}
if ((sc->tx_data_off % 64) != 0) {
device_printf(dev, "Tx data offset (%d) is not a multiplication "
"of 64 bytes\n", sc->tx_data_off);
}
buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
buf_layout.pd_size = BUF_SWA_SIZE;
buf_layout.fd_align = sc->buf_align;
buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
buf_layout.tail_size = 0;
buf_layout.pass_frame_status = true;
buf_layout.pass_parser_result = true;
buf_layout.pass_timestamp = true;
buf_layout.options =
BUF_LOPT_PRIV_DATA_SZ |
BUF_LOPT_DATA_ALIGN |
BUF_LOPT_DATA_HEAD_ROOM |
BUF_LOPT_DATA_TAIL_ROOM |
BUF_LOPT_FRAME_STATUS |
BUF_LOPT_PARSER_RESULT |
BUF_LOPT_TIMESTAMP;
error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
if (error) {
device_printf(dev, "%s: failed to set Rx buffer layout\n",
__func__);
goto close_ni;
}
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_set_pause_frame(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_ni_link_cfg link_cfg = {0};
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
if (error) {
device_printf(dev, "%s: failed to obtain link configuration: "
"error=%d\n", __func__, error);
goto close_ni;
}
link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
if (error) {
device_printf(dev, "%s: failed to set link configuration: "
"error=%d\n", __func__, error);
goto close_ni;
}
sc->link_options = link_cfg.options;
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_set_qos_table(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_ni_qos_table tbl;
struct dpaa2_buf *buf = &sc->qos_kcfg;
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
if (sc->attr.num.rx_tcs == 1 ||
!(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
if (bootverbose) {
device_printf(dev, "Ingress traffic classification is "
"not supported\n");
}
return (0);
}
if (__predict_true(buf->dmat == NULL)) {
buf->dmat = sc->qos_dmat;
}
error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
if (error) {
device_printf(dev, "%s: failed to allocate a buffer for QoS key "
"configuration\n", __func__);
goto err_exit;
}
error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(dev, "%s: failed to map QoS key configuration "
"buffer into bus space\n", __func__);
goto err_exit;
}
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
tbl.default_tc = 0;
tbl.discard_on_miss = false;
tbl.keep_entries = false;
tbl.kcfg_busaddr = buf->paddr;
error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
if (error) {
device_printf(dev, "%s: failed to set QoS table\n", __func__);
goto close_ni;
}
error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
if (error) {
device_printf(dev, "%s: failed to clear QoS table\n", __func__);
goto close_ni;
}
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_set_mac_addr(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
if_t ifp = sc->ifp;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
struct ether_addr rnd_mac_addr;
uint16_t rc_token, ni_token;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
"error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
if (error) {
device_printf(dev, "%s: failed to obtain the MAC address "
"associated with the physical port\n", __func__);
goto close_ni;
}
error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
if (error) {
device_printf(dev, "%s: failed to obtain primary MAC address\n",
__func__);
goto close_ni;
}
if (!ETHER_IS_ZERO(mac_addr)) {
error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
mac_addr);
if (error) {
device_printf(dev, "%s: failed to set primary MAC "
"address\n", __func__);
goto close_ni;
}
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
sc->mac.addr[i] = mac_addr[i];
}
} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
ether_gen_addr(ifp, &rnd_mac_addr);
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
mac_addr[i] = rnd_mac_addr.octet[i];
}
error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
mac_addr);
if (error) {
device_printf(dev, "%s: failed to set random primary "
"MAC address\n", __func__);
goto close_ni;
}
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
sc->mac.addr[i] = mac_addr[i];
}
} else {
for (int i = 0; i < ETHER_ADDR_LEN; i++) {
sc->mac.addr[i] = dpni_mac_addr[i];
}
}
error = 0;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static void
dpaa2_ni_miibus_statchg(device_t dev)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_mac_link_state mac_link = { 0 };
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_cmd cmd;
uint16_t rc_token, mac_token;
int error, link_state;
if (sc->fixed_link || sc->mii == NULL) {
return;
}
if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
return;
}
if (sc->mii->mii_media_status & IFM_AVALID) {
if (sc->mii->mii_media_status & IFM_ACTIVE) {
link_state = LINK_STATE_UP;
} else {
link_state = LINK_STATE_DOWN;
}
} else {
link_state = LINK_STATE_UNKNOWN;
}
if (link_state != sc->link_state) {
sc->link_state = link_state;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
&rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource "
"container: id=%d, error=%d\n", __func__, rcinfo->id,
error);
goto err_exit;
}
error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
&mac_token);
if (error) {
device_printf(sc->dev, "%s: failed to open DPMAC: "
"id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
error);
goto close_rc;
}
if (link_state == LINK_STATE_UP ||
link_state == LINK_STATE_DOWN) {
mac_link.supported = sc->mii->mii_media.ifm_media;
mac_link.advert = sc->mii->mii_media.ifm_media;
mac_link.rate = 1000;
mac_link.options =
DPAA2_MAC_LINK_OPT_AUTONEG |
DPAA2_MAC_LINK_OPT_PAUSE;
mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
mac_link.state_valid = true;
error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
&mac_link);
if (error) {
device_printf(sc->dev, "%s: failed to set DPMAC "
"link state: id=%d, error=%d\n", __func__,
sc->mac.dpmac_id, error);
}
}
(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
}
return;
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return;
}
static int
dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
{
DPNI_LOCK_ASSERT(sc);
if (sc->mii) {
mii_mediachg(sc->mii);
sc->media_status = sc->mii->mii_media.ifm_media;
} else if (sc->fixed_link) {
if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
__func__);
}
return (0);
}
static int
dpaa2_ni_media_change(if_t ifp)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
int error;
DPNI_LOCK(sc);
error = dpaa2_ni_media_change_locked(sc);
DPNI_UNLOCK(sc);
return (error);
}
static void
dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
DPNI_LOCK(sc);
if (sc->mii) {
mii_pollstat(sc->mii);
ifmr->ifm_active = sc->mii->mii_media_active;
ifmr->ifm_status = sc->mii->mii_media_status;
}
DPNI_UNLOCK(sc);
}
static void
dpaa2_ni_media_tick(void *arg)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
if (sc->mii) {
mii_tick(sc->mii);
if (sc->media_status != sc->mii->mii_media.ifm_media) {
printf("%s: media type changed (ifm_media=%x)\n",
__func__, sc->mii->mii_media.ifm_media);
dpaa2_ni_media_change(sc->ifp);
}
}
callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
}
static void
dpaa2_ni_init(void *arg)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
if_t ifp = sc->ifp;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPNI_LOCK(sc);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
DPNI_UNLOCK(sc);
return;
}
DPNI_UNLOCK(sc);
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
if (error) {
device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
__func__, error);
}
DPNI_LOCK(sc);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
if (sc->mii) {
dpaa2_ni_media_change_locked(sc);
}
callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
DPNI_UNLOCK(sc);
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return;
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return;
}
static int
dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
struct dpaa2_channel *ch;
uint32_t fqid;
bool found = false;
int chidx = 0, error;
if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
return (0);
}
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
fqid = m->m_pkthdr.flowid;
for (int i = 0; i < sc->chan_n; i++) {
ch = sc->channels[i];
for (int j = 0; j < ch->rxq_n; j++) {
if (fqid == ch->rx_queues[j].fqid) {
chidx = ch->flowid;
found = true;
break;
}
}
if (found) {
break;
}
}
}
ch = sc->channels[chidx];
error = buf_ring_enqueue(ch->xmit_br, m);
if (__predict_false(error != 0)) {
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
m_freem(m);
} else {
taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
}
return (error);
}
static void
dpaa2_ni_qflush(if_t ifp)
{
if_qflush(ifp);
}
static int
dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint32_t changed = 0;
uint16_t rc_token, ni_token;
int mtu, error, rc = 0;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
switch (c) {
case SIOCSIFMTU:
DPNI_LOCK(sc);
mtu = ifr->ifr_mtu;
if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
DPNI_UNLOCK(sc);
error = EINVAL;
goto close_ni;
}
if_setmtu(ifp, mtu);
DPNI_UNLOCK(sc);
mtu += ETHER_HDR_LEN;
if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
mtu += ETHER_VLAN_ENCAP_LEN;
error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, mtu);
if (error) {
device_printf(dev, "%s: failed to update maximum frame "
"length: error=%d\n", __func__, error);
goto close_ni;
}
break;
case SIOCSIFCAP:
changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
if ((changed & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
if ((changed & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
if_togglehwassist(ifp, DPAA2_CSUM_TX_OFFLOAD);
}
rc = dpaa2_ni_setup_if_caps(sc);
if (rc) {
printf("%s: failed to update iface capabilities: "
"error=%d\n", __func__, rc);
rc = ENXIO;
}
break;
case SIOCSIFFLAGS:
DPNI_LOCK(sc);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
changed = if_getflags(ifp) ^ sc->if_flags;
if (changed & IFF_PROMISC ||
changed & IFF_ALLMULTI) {
rc = dpaa2_ni_setup_if_flags(sc);
}
} else {
DPNI_UNLOCK(sc);
dpaa2_ni_init(sc);
DPNI_LOCK(sc);
}
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
}
sc->if_flags = if_getflags(ifp);
DPNI_UNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
DPNI_LOCK(sc);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
DPNI_UNLOCK(sc);
rc = dpaa2_ni_update_mac_filters(ifp);
if (rc) {
device_printf(dev, "%s: failed to update MAC "
"filters: error=%d\n", __func__, rc);
}
DPNI_LOCK(sc);
}
DPNI_UNLOCK(sc);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
if (sc->mii)
rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
else if(sc->fixed_link) {
rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
}
break;
default:
rc = ether_ioctl(ifp, c, data);
break;
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
return (rc);
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static int
dpaa2_ni_update_mac_filters(if_t ifp)
{
struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
struct dpaa2_ni_mcaddr_ctx ctx;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
if (error) {
device_printf(dev, "%s: failed to clear multicast MAC filters: "
"error=%d\n", __func__, error);
goto close_ni;
}
ctx.ifp = ifp;
ctx.error = 0;
ctx.nent = 0;
if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
error = ctx.error;
close_ni:
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return (error);
}
static u_int
dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
struct dpaa2_ni_mcaddr_ctx *ctx = arg;
struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int error;
if (ctx->error != 0) {
return (0);
}
if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
&rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource "
"container: id=%d, error=%d\n", __func__, rcinfo->id,
error);
return (0);
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
&ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
return (0);
}
ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
LLADDR(sdl));
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
ni_token));
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
if (ctx->error != 0) {
device_printf(dev, "%s: can't add more then %d MAC "
"addresses, switching to the multicast promiscuous "
"mode\n", __func__, ctx->nent);
DPNI_LOCK(sc);
if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
sc->if_flags |= IFF_ALLMULTI;
ctx->error = dpaa2_ni_setup_if_flags(sc);
DPNI_UNLOCK(sc);
return (0);
}
ctx->nent++;
}
return (1);
}
static void
dpaa2_ni_intr(void *arg)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint32_t status = ~0u;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
&status);
if (error) {
device_printf(sc->dev, "%s: failed to obtain IRQ status: "
"error=%d\n", __func__, error);
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
err_exit:
return;
}
static void
dpaa2_ni_cleanup_task(void *arg, int count)
{
struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
int error, rxc, txc;
for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
rxc = dpaa2_ni_rx_cleanup(ch);
txc = dpaa2_ni_tx_cleanup(ch);
if (__predict_false((if_getdrvflags(sc->ifp) &
IFF_DRV_RUNNING) == 0)) {
return;
}
if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
break;
}
}
error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
if (error != 0) {
panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
__func__, ch->id, error);
}
}
static int
dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
{
struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
struct dpaa2_swp *swp = iosc->swp;
struct dpaa2_ni_fq *fq;
struct dpaa2_buf *buf = &ch->store;
int budget = DPAA2_RX_BUDGET;
int error, consumed = 0;
do {
error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
if (error) {
device_printf(ch->ni_dev, "%s: failed to pull frames: "
"chan_id=%d, error=%d\n", __func__, ch->id, error);
break;
}
error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
if (error == ENOENT || error == EALREADY) {
break;
}
if (error == ETIMEDOUT) {
device_printf(ch->ni_dev, "%s: timeout to consume "
"frames: chan_id=%d\n", __func__, ch->id);
}
} while (--budget);
return (DPAA2_RX_BUDGET - budget);
}
static int
dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
{
struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
struct mbuf *m = NULL;
int budget = DPAA2_TX_BUDGET;
do {
mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
mtx_lock(&ch->xmit_mtx);
m = buf_ring_dequeue_sc(ch->xmit_br);
mtx_unlock(&ch->xmit_mtx);
if (__predict_false(m == NULL)) {
break;
} else {
dpaa2_ni_tx(sc, ch, tx, m);
}
} while (--budget);
return (DPAA2_TX_BUDGET - budget);
}
static void
dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
{
device_t dev = sc->dev;
struct dpaa2_ni_fq *fq = tx->fq;
struct dpaa2_buf *buf, *sgt;
struct dpaa2_fd fd;
struct mbuf *md;
bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
int rc, nsegs;
int error;
int len;
bool mcast;
mtx_assert(&tx->lock, MA_NOTOWNED);
mtx_lock(&tx->lock);
buf = buf_ring_dequeue_sc(tx->br);
mtx_unlock(&tx->lock);
if (__predict_false(buf == NULL)) {
m_freem(m);
return;
} else {
DPAA2_BUF_ASSERT_TXREADY(buf);
buf->m = m;
sgt = buf->sgt;
}
len = m->m_pkthdr.len;
mcast = (m->m_flags & M_MCAST) != 0;
#if defined(INVARIANTS)
struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
#endif
BPF_MTAP(sc->ifp, m);
error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
if (md == NULL) {
device_printf(dev, "%s: m_collapse() failed\n", __func__);
fq->chan->tx_dropped++;
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
goto err;
}
buf->m = m = md;
error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
"failed: error=%d\n", __func__, error);
fq->chan->tx_dropped++;
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
goto err;
}
}
error = dpaa2_fd_build(dev, sc->tx_data_off, buf, segs, nsegs, &fd);
if (__predict_false(error != 0)) {
device_printf(dev, "%s: failed to build frame descriptor: "
"error=%d\n", __func__, error);
fq->chan->tx_dropped++;
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
goto err_unload;
} else
sc->tx_sg_frames++;
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
if (rc == 1) {
break;
}
}
if (rc != 1) {
fq->chan->tx_dropped++;
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
goto err_unload;
} else {
if (mcast)
if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, len);
fq->chan->tx_frames++;
}
return;
err_unload:
bus_dmamap_unload(buf->dmat, buf->dmap);
if (sgt->paddr != 0) {
bus_dmamap_unload(sgt->dmat, sgt->dmap);
}
err:
m_freem(buf->m);
buf_ring_enqueue(tx->br, buf);
}
static int
dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
uint32_t *consumed)
{
struct dpaa2_ni_fq *fq = NULL;
struct dpaa2_dq *dq;
struct dpaa2_fd *fd;
struct dpaa2_ni_rx_ctx ctx = {
.head = NULL,
.tail = NULL,
.cnt = 0,
.last = false
};
int rc, frames = 0;
do {
rc = dpaa2_chan_next_frame(chan, &dq);
if (rc == EINPROGRESS) {
if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
fd = &dq->fdr.fd;
fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
switch (fq->type) {
case DPAA2_NI_QUEUE_RX:
(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
break;
case DPAA2_NI_QUEUE_RX_ERR:
(void)dpaa2_ni_rx_err(chan, fq, fd);
break;
case DPAA2_NI_QUEUE_TX_CONF:
(void)dpaa2_ni_tx_conf(chan, fq, fd);
break;
default:
panic("%s: unknown queue type (1)",
__func__);
}
frames++;
}
} else if (rc == EALREADY || rc == ENOENT) {
if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
fd = &dq->fdr.fd;
fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
switch (fq->type) {
case DPAA2_NI_QUEUE_RX:
ctx.last = true;
(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
break;
case DPAA2_NI_QUEUE_RX_ERR:
(void)dpaa2_ni_rx_err(chan, fq, fd);
break;
case DPAA2_NI_QUEUE_TX_CONF:
(void)dpaa2_ni_tx_conf(chan, fq, fd);
break;
default:
panic("%s: unknown queue type (2)",
__func__);
}
frames++;
}
break;
} else {
panic("%s: should not reach here: rc=%d", __func__, rc);
}
} while (true);
KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
"store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
if (src != NULL) {
*src = fq;
}
if (consumed != NULL) {
*consumed = frames;
}
return (rc);
}
static int
dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd, struct dpaa2_ni_rx_ctx *ctx)
{
bus_addr_t paddr;
struct dpaa2_swa *swa;
struct dpaa2_buf *buf;
struct dpaa2_channel *bch;
struct dpaa2_ni_softc *sc;
struct dpaa2_bp_softc *bpsc;
struct mbuf *m;
device_t bpdev;
bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
void *buf_data;
int buf_len, error, released_n = 0;
error = dpaa2_fa_get_swa(fd, &swa);
if (__predict_false(error != 0))
panic("%s: frame has no software annotation: error=%d",
__func__, error);
paddr = (bus_addr_t)fd->addr;
buf = swa->buf;
bch = (struct dpaa2_channel *)buf->opt;
sc = device_get_softc(bch->ni_dev);
KASSERT(swa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
if (__predict_false(paddr != buf->paddr)) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
__func__, paddr, buf->paddr);
}
switch (dpaa2_fd_err(fd)) {
case 1:
sc->rx_enq_rej_frames++;
break;
case 2:
sc->rx_ieoi_err_frames++;
break;
default:
break;
}
switch (dpaa2_fd_format(fd)) {
case DPAA2_FD_SINGLE:
sc->rx_single_buf_frames++;
break;
case DPAA2_FD_SG:
sc->rx_sg_buf_frames++;
break;
default:
break;
}
mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
mtx_lock(&bch->dma_mtx);
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(buf->dmat, buf->dmap);
m = buf->m;
buf_len = dpaa2_fd_data_len(fd);
buf_data = (uint8_t *)buf->vaddr + dpaa2_fd_offset(fd);
buf->m = NULL;
buf->paddr = 0;
buf->vaddr = NULL;
buf->seg.ds_addr = 0;
buf->seg.ds_len = 0;
buf->nseg = 0;
mtx_unlock(&bch->dma_mtx);
m->m_flags |= M_PKTHDR;
m->m_data = buf_data;
m->m_len = buf_len;
m->m_pkthdr.len = buf_len;
m->m_pkthdr.rcvif = sc->ifp;
m->m_pkthdr.flowid = fq->fqid;
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
if (ctx->head == NULL) {
KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
ctx->head = m;
ctx->tail = m;
} else {
KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
ctx->tail->m_nextpkt = m;
ctx->tail = m;
}
ctx->cnt++;
if (ctx->last) {
ctx->tail->m_nextpkt = NULL;
if_input(sc->ifp, ctx->head);
}
ch->recycled[ch->recycled_n++] = buf;
if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
for (int i = 0; i < ch->recycled_n; i++) {
buf = ch->recycled[i];
bch = (struct dpaa2_channel *)buf->opt;
mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
mtx_lock(&bch->dma_mtx);
error = dpaa2_buf_seed_rxb(sc->dev, buf,
DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
mtx_unlock(&bch->dma_mtx);
if (__predict_false(error != 0)) {
panic("%s: failed to recycle buffer: error=%d",
__func__, error);
}
released[released_n++] = buf->paddr;
}
bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bpsc = device_get_softc(bpdev);
error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
released, released_n);
if (__predict_false(error != 0)) {
device_printf(sc->dev, "%s: failed to release buffers "
"to the pool: error=%d\n", __func__, error);
return (error);
}
ch->recycled_n = 0;
}
return (0);
}
static int
dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
bus_addr_t paddr;
struct dpaa2_swa *swa;
struct dpaa2_buf *buf;
struct dpaa2_channel *bch;
struct dpaa2_ni_softc *sc;
device_t bpdev;
struct dpaa2_bp_softc *bpsc;
int error;
error = dpaa2_fa_get_swa(fd, &swa);
if (__predict_false(error != 0))
panic("%s: frame has no software annotation: error=%d",
__func__, error);
paddr = (bus_addr_t)fd->addr;
buf = swa->buf;
bch = (struct dpaa2_channel *)buf->opt;
sc = device_get_softc(bch->ni_dev);
KASSERT(swa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
if (__predict_false(paddr != buf->paddr)) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
__func__, paddr, buf->paddr);
}
bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
bpsc = device_get_softc(bpdev);
error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
if (error != 0) {
device_printf(sc->dev, "%s: failed to release frame buffer to "
"the pool: error=%d\n", __func__, error);
return (error);
}
return (0);
}
static int
dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
struct dpaa2_fd *fd)
{
bus_addr_t paddr;
struct dpaa2_swa *swa;
struct dpaa2_buf *buf;
struct dpaa2_buf *sgt;
struct dpaa2_ni_tx_ring *tx;
struct dpaa2_channel *bch;
int error;
error = dpaa2_fa_get_swa(fd, &swa);
if (__predict_false(error != 0))
panic("%s: frame has no software annotation: error=%d",
__func__, error);
paddr = (bus_addr_t)fd->addr;
buf = swa->buf;
sgt = buf->sgt;
tx = (struct dpaa2_ni_tx_ring *)buf->opt;
bch = tx->fq->chan;
KASSERT(swa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
if (paddr != buf->paddr) {
panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
__func__, paddr, buf->paddr);
}
mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
mtx_lock(&bch->dma_mtx);
bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(buf->dmat, buf->dmap);
bus_dmamap_unload(sgt->dmat, sgt->dmap);
m_freem(buf->m);
buf->m = NULL;
buf->paddr = 0;
buf->vaddr = NULL;
sgt->paddr = 0;
mtx_unlock(&bch->dma_mtx);
buf_ring_enqueue(tx->br, buf);
return (0);
}
static int
dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
uint16_t minor)
{
if (sc->api_major == major) {
return sc->api_minor - minor;
}
return sc->api_major - major;
}
static int
dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
device_t pdev = device_get_parent(sc->dev);
device_t dev = sc->dev;
device_t child = dev;
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpaa2_cmd cmd;
uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
uint64_t result = 0;
uint16_t rc_token, ni_token;
int error;
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource container: "
"id=%d, error=%d\n", __func__, rcinfo->id, error);
goto exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
if (error) {
device_printf(dev, "%s: failed to open network interface: "
"id=%d, error=%d\n", __func__, dinfo->id, error);
goto close_rc;
}
error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
if (!error) {
result = cnt[stat->cnt];
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
exit:
return (sysctl_handle_64(oidp, &result, 0, req));
}
static int
dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
return (sysctl_handle_32(oidp, &buf_num, 0, req));
}
static int
dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
{
struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
return (sysctl_handle_32(oidp, &buf_free, 0, req));
}
static int
dpaa2_ni_set_hash(device_t dev, uint64_t flags)
{
struct dpaa2_ni_softc *sc = device_get_softc(dev);
uint64_t key = 0;
int i;
if (!(sc->attr.num.queues > 1)) {
return (EOPNOTSUPP);
}
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
if (dist_fields[i].rxnfc_field & flags) {
key |= dist_fields[i].id;
}
}
return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
}
static int
dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
{
device_t pdev = device_get_parent(dev);
device_t child = dev;
struct dpaa2_ni_softc *sc = device_get_softc(dev);
struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
struct dpkg_profile_cfg cls_cfg;
struct dpkg_extract *key;
struct dpaa2_buf *buf = &sc->rxd_kcfg;
struct dpaa2_cmd cmd;
uint16_t rc_token, ni_token;
int i, error = 0;
if (__predict_true(buf->dmat == NULL)) {
buf->dmat = sc->rxd_dmat;
}
memset(&cls_cfg, 0, sizeof(cls_cfg));
for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
key = &cls_cfg.extracts[cls_cfg.num_extracts];
if (!(flags & dist_fields[i].id)) {
continue;
}
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
device_printf(dev, "%s: failed to add key extraction "
"rule\n", __func__);
return (E2BIG);
}
key->type = DPKG_EXTRACT_FROM_HDR;
key->extract.from_hdr.prot = dist_fields[i].cls_prot;
key->extract.from_hdr.type = DPKG_FULL_FIELD;
key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
}
error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
if (error != 0) {
device_printf(dev, "%s: failed to allocate a buffer for Rx "
"traffic distribution key configuration\n", __func__);
return (error);
}
error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
if (error != 0) {
device_printf(dev, "%s: failed to prepare key configuration: "
"error=%d\n", __func__, error);
return (error);
}
error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->dev, "%s: failed to map a buffer for Rx "
"traffic distribution key configuration\n", __func__);
return (error);
}
if (type == DPAA2_NI_DIST_MODE_HASH) {
DPAA2_CMD_INIT(&cmd);
error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
&rc_token);
if (error) {
device_printf(dev, "%s: failed to open resource "
"container: id=%d, error=%d\n", __func__, rcinfo->id,
error);
goto err_exit;
}
error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
&ni_token);
if (error) {
device_printf(dev, "%s: failed to open network "
"interface: id=%d, error=%d\n", __func__, dinfo->id,
error);
goto close_rc;
}
error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
if (error != 0) {
device_printf(dev, "%s: failed to set distribution mode "
"and size for the traffic class\n", __func__);
}
(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
ni_token));
close_rc:
(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
rc_token));
}
err_exit:
return (error);
}
static int
dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
{
struct dpni_ext_set_rx_tc_dist *dpni_ext;
struct dpni_dist_extract *extr;
int i, j;
if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
return (EINVAL);
dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
dpni_ext->num_extracts = cfg->num_extracts;
for (i = 0; i < cfg->num_extracts; i++) {
extr = &dpni_ext->extracts[i];
switch (cfg->extracts[i].type) {
case DPKG_EXTRACT_FROM_HDR:
extr->prot = cfg->extracts[i].extract.from_hdr.prot;
extr->efh_type =
cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
extr->size = cfg->extracts[i].extract.from_hdr.size;
extr->offset = cfg->extracts[i].extract.from_hdr.offset;
extr->field = cfg->extracts[i].extract.from_hdr.field;
extr->hdr_index =
cfg->extracts[i].extract.from_hdr.hdr_index;
break;
case DPKG_EXTRACT_FROM_DATA:
extr->size = cfg->extracts[i].extract.from_data.size;
extr->offset =
cfg->extracts[i].extract.from_data.offset;
break;
case DPKG_EXTRACT_FROM_PARSE:
extr->size = cfg->extracts[i].extract.from_parse.size;
extr->offset =
cfg->extracts[i].extract.from_parse.offset;
break;
default:
return (EINVAL);
}
extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
extr->extract_type = cfg->extracts[i].type & 0x0Fu;
for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
extr->masks[j].offset =
cfg->extracts[i].masks[j].offset;
}
}
return (0);
}
static device_method_t dpaa2_ni_methods[] = {
DEVMETHOD(device_probe, dpaa2_ni_probe),
DEVMETHOD(device_attach, dpaa2_ni_attach),
DEVMETHOD(device_detach, dpaa2_ni_detach),
DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg),
DEVMETHOD_END
};
static driver_t dpaa2_ni_driver = {
"dpaa2_ni",
dpaa2_ni_methods,
sizeof(struct dpaa2_ni_softc),
};
DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
#ifdef DEV_ACPI
MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
#endif
#ifdef FDT
MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
#endif