#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/endian.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
#include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
int
qcom_ess_edma_tx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
{
return (queue / mp_ncpus);
}
int
qcom_ess_edma_tx_ring_setup(struct qcom_ess_edma_softc *sc,
struct qcom_ess_edma_desc_ring *ring)
{
struct qcom_ess_edma_sw_desc_tx *txd;
int i, ret;
for (i = 0; i < EDMA_TX_RING_SIZE; i++) {
txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, i);
if (txd == NULL) {
device_printf(sc->sc_dev,
"ERROR; couldn't get sw desc (idx %d)\n", i);
return (EINVAL);
}
txd->m = NULL;
ret = bus_dmamap_create(ring->buffer_dma_tag,
BUS_DMA_NOWAIT,
&txd->m_dmamap);
if (ret != 0) {
device_printf(sc->sc_dev,
"%s: failed to create dmamap (%d)\n",
__func__, ret);
}
}
return (0);
}
int
qcom_ess_edma_tx_ring_clean(struct qcom_ess_edma_softc *sc,
struct qcom_ess_edma_desc_ring *ring)
{
device_printf(sc->sc_dev, "%s: TODO\n", __func__);
return (0);
}
static int
qcom_ess_edma_tx_unmap_and_clean(struct qcom_ess_edma_softc *sc,
struct qcom_ess_edma_desc_ring *ring, uint16_t idx)
{
struct qcom_ess_edma_sw_desc_tx *txd;
struct qcom_ess_edma_tx_desc *ds;
txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
if (txd == NULL) {
device_printf(sc->sc_dev,
"ERROR; couldn't get sw desc (idx %d)\n", idx);
return (EINVAL);
}
ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
if (ds == NULL) {
device_printf(sc->sc_dev,
"ERROR; couldn't get hw desc (idx %d)\n", idx);
return (EINVAL);
}
if (txd->m != NULL) {
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
"%s: idx %d, unmap/free\n", __func__, idx);
bus_dmamap_unload(ring->buffer_dma_tag, txd->m_dmamap);
m_freem(txd->m);
txd->m = NULL;
txd->is_first = txd->is_last = 0;
}
#ifdef ESS_EDMA_DEBUG_CLEAR_DESC
memset(ds, 0, sizeof(struct qcom_ess_edma_tx_desc));
#endif
return (0);
}
int
qcom_ess_edma_tx_ring_complete(struct qcom_ess_edma_softc *sc, int queue)
{
struct qcom_ess_edma_desc_ring *ring;
uint32_t n;
uint16_t sw_next_to_clean, hw_next_to_clean;
ring = &sc->sc_tx_ring[queue];
EDMA_RING_LOCK_ASSERT(ring);
qcom_ess_edma_desc_ring_flush_postupdate(sc, ring);
sw_next_to_clean = ring->next_to_clean;
hw_next_to_clean = 0;
n = 0;
(void) qcom_ess_edma_hw_tx_read_tpd_cons_idx(sc, queue,
&hw_next_to_clean);
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
"%s: called; sw=%d, hw=%d\n", __func__,
sw_next_to_clean, hw_next_to_clean);
while (sw_next_to_clean != hw_next_to_clean) {
qcom_ess_edma_tx_unmap_and_clean(sc, ring, sw_next_to_clean);
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
"%s cleaning %d\n", __func__, sw_next_to_clean);
sw_next_to_clean++;
if (sw_next_to_clean >= ring->ring_count)
sw_next_to_clean = 0;
n++;
}
ring->stats.num_cleaned += n;
ring->stats.num_tx_complete++;
ring->next_to_clean = sw_next_to_clean;
qcom_ess_edma_hw_tx_update_cons_idx(sc, queue, sw_next_to_clean);
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING_COMPLETE,
"%s: cleaned %d descriptors\n", __func__, n);
return (0);
}
int
qcom_ess_edma_tx_ring_frame(struct qcom_ess_edma_softc *sc, int queue,
struct mbuf **m0, uint16_t port_bitmap, int default_vlan)
{
struct qcom_ess_edma_sw_desc_tx *txd_first;
struct qcom_ess_edma_desc_ring *ring;
struct ether_vlan_header *eh;
bus_dma_segment_t txsegs[QCOM_ESS_EDMA_MAX_TXFRAGS];
uint32_t word1, word3;
uint32_t eop;
int vlan_id;
int num_left, ret, nsegs, i;
uint16_t next_to_fill;
uint16_t svlan_tag;
struct mbuf *m;
ring = &sc->sc_tx_ring[queue];
EDMA_RING_LOCK_ASSERT(ring);
m = *m0;
num_left = qcom_ess_edma_desc_ring_get_num_available(sc, ring);
if (num_left < 2) {
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: num_left=%d\n", __func__, num_left);
ring->stats.num_enqueue_full++;
return (ENOBUFS);
}
next_to_fill = ring->next_to_fill;
txd_first = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring,
next_to_fill);
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: starting at idx %d\n", __func__, next_to_fill);
ret = bus_dmamap_load_mbuf_sg(ring->buffer_dma_tag,
txd_first->m_dmamap,
m,
txsegs,
&nsegs,
BUS_DMA_NOWAIT);
if (ret != 0) {
ring->stats.num_tx_mapfail++;
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: map failed (%d)\n", __func__, ret);
return (ENOBUFS);
}
if (nsegs == 0) {
ring->stats.num_tx_maxfrags++;
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: too many segs\n", __func__);
return (ENOBUFS);
}
if (nsegs + 2 > num_left) {
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: nsegs=%d, num_left=%d\n", __func__, nsegs, num_left);
bus_dmamap_unload(ring->buffer_dma_tag, txd_first->m_dmamap);
ring->stats.num_enqueue_full++;
return (ENOBUFS);
}
bus_dmamap_sync(ring->buffer_dma_tag, txd_first->m_dmamap,
BUS_DMASYNC_PREWRITE);
m->m_pkthdr.rcvif = NULL;
svlan_tag = 0;
word1 = 0;
word3 = 0;
word3 |= (port_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT);
eh = mtod(m, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
vlan_id = -1;
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: no vlan id\n", __func__);
} else if ((m->m_flags & M_VLANTAG) != 0) {
vlan_id = m->m_pkthdr.ether_vtag & 0x0fff;
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: header tag vlan id=%d\n", __func__, vlan_id);
} else {
vlan_id = default_vlan;
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: no vlan tag/hdr; vlan id=%d\n", __func__,
vlan_id);
}
if (vlan_id != -1) {
word3 |= (1U << EDMA_TX_INS_CVLAN);
word3 |= (vlan_id << EDMA_TX_CVLAN_TAG_SHIFT);
}
eop = 0;
for (i = 0; i < nsegs; i++) {
struct qcom_ess_edma_sw_desc_tx *txd;
struct qcom_ess_edma_tx_desc *ds;
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: filling idx %d\n", __func__, next_to_fill);
txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, next_to_fill);
ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, next_to_fill);
txd->m = NULL;
if (i == 0) {
txd->is_first = 1;
}
if (i == (nsegs - 1)) {
bus_dmamap_t dm;
txd->is_last = 1;
eop = EDMA_TPD_EOP;
txd->m = m;
dm = txd_first->m_dmamap;
txd_first->m_dmamap = txd->m_dmamap;
txd->m_dmamap = dm;
}
ds->word1 = word1 | eop;
ds->word3 = word3;
ds->svlan_tag = svlan_tag;
ds->addr = htole32(txsegs[i].ds_addr);
ds->len = htole16(txsegs[i].ds_len);
QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
"%s: addr=0x%lx len=%ld eop=0x%x\n",
__func__,
txsegs[i].ds_addr,
txsegs[i].ds_len,
eop);
next_to_fill++;
if (next_to_fill >= ring->ring_count)
next_to_fill = 0;
}
ring->stats.num_added += nsegs;
ring->next_to_fill = next_to_fill;
ring->stats.num_tx_ok++;
return (0);
}
int
qcom_ess_edma_tx_ring_frame_update(struct qcom_ess_edma_softc *sc, int queue)
{
struct qcom_ess_edma_desc_ring *ring;
ring = &sc->sc_tx_ring[queue];
EDMA_RING_LOCK_ASSERT(ring);
qcom_ess_edma_desc_ring_flush_preupdate(sc, ring);
(void) qcom_ess_edma_hw_tx_update_tpd_prod_idx(sc, queue,
ring->next_to_fill);
return (0);
}