dpaa_fq
static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
priv = netdev_priv(dpaa_fq->net_dev);
dev = dpaa_fq->net_dev->dev.parent;
if (dpaa_fq->fqid == 0)
dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
fq = &dpaa_fq->fq_base;
if (dpaa_fq->init) {
if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
if (dpaa_fq->fq_type == FQ_TYPE_TX ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
if (dpaa_fq->fq_type == FQ_TYPE_TX) {
queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
(dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
dpaa_fq->fqid = qman_fq_fqid(fq);
if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
dpaa_fq->fq_type == FQ_TYPE_RX_PCD) {
err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
dpaa_fq->fqid, 0);
err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq,
xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
struct dpaa_fq *dpaa_fq;
dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
priv = netdev_priv(dpaa_fq->net_dev);
if (dpaa_fq->init) {
if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
dpaa_fq->fq_type == FQ_TYPE_RX_PCD) &&
xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq))
xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
list_del(&dpaa_fq->list);
struct dpaa_fq *dpaa_fq, *tmp;
list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
struct dpaa_fq *defq,
struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *pcdq,
struct dpaa_fq *tx_defq;
struct dpaa_fq *tx_errq;
struct dpaa_fq *rx_defq;
struct dpaa_fq *rx_errq;
struct dpaa_fq *rx_pcdq;
struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
net_dev = dpaa_fq->net_dev;
struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
&dpaa_fq->xdp_rxq);
struct dpaa_fq *dpaa_fq;
dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
net_dev = dpaa_fq->net_dev;
dpaa_fq, &xdp_meta_len);
net_dev = ((struct dpaa_fq *)fq)->net_dev;
net_dev = ((struct dpaa_fq *)fq)->net_dev;
net_dev = ((struct dpaa_fq *)fq)->net_dev;
struct dpaa_fq *dpaa_fq, *tmp;
list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
err = dpaa_fq_init(dpaa_fq, false);
static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
struct dpaa_fq *dpaa_fq;
dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
if (!dpaa_fq)
dpaa_fq[i].fq_type = fq_type;
dpaa_fq[i].fqid = start ? start + i : 0;
list_add_tail(&dpaa_fq[i].list, list);
dpaa_assign_wq(dpaa_fq + i, i);
return dpaa_fq;
struct dpaa_fq *dpaa_fq;
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
if (!dpaa_fq)
port_fqs->rx_errq = &dpaa_fq[0];
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
if (!dpaa_fq)
port_fqs->rx_defq = &dpaa_fq[0];
dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
if (!dpaa_fq)
port_fqs->rx_pcdq = &dpaa_fq[0];
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
if (!dpaa_fq)
port_fqs->tx_errq = &dpaa_fq[0];
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
if (!dpaa_fq)
port_fqs->tx_defq = &dpaa_fq[0];
struct dpaa_fq *fq,
struct dpaa_fq *fq,
struct dpaa_fq *fq;
struct dpaa_fq *prev = NULL;
struct dpaa_fq *tmp;
struct dpaa_fq *fq;