#include "bpfilter.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/timeout.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/errno.h>
#include <sys/device.h>
#include <net/if.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <net/if_media.h>
#if NBPFILTER > 0
#include <net/bpf.h>
#endif
#include <machine/bus.h>
#include <machine/intr.h>
#include <dev/mii/miivar.h>
#include <dev/mii/lxtphyreg.h>
#include <dev/ic/smc83c170reg.h>
#include <dev/ic/smc83c170var.h>
void epic_start(struct ifnet *);
void epic_watchdog(struct ifnet *);
int epic_ioctl(struct ifnet *, u_long, caddr_t);
int epic_init(struct ifnet *);
void epic_stop(struct ifnet *, int);
void epic_reset(struct epic_softc *);
void epic_rxdrain(struct epic_softc *);
int epic_add_rxbuf(struct epic_softc *, int);
void epic_read_eeprom(struct epic_softc *, int, int, u_int16_t *);
void epic_set_mchash(struct epic_softc *);
void epic_fixup_clock_source(struct epic_softc *);
int epic_mii_read(struct device *, int, int);
void epic_mii_write(struct device *, int, int, int);
int epic_mii_wait(struct epic_softc *, u_int32_t);
void epic_tick(void *);
void epic_statchg(struct device *);
int epic_mediachange(struct ifnet *);
void epic_mediastatus(struct ifnet *, struct ifmediareq *);
struct cfdriver epic_cd = {
NULL, "epic", DV_IFNET
};
#define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
int epic_copy_small = 0;
#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
void
epic_attach(struct epic_softc *sc, const char *intrstr)
{
bus_space_tag_t st = sc->sc_st;
bus_space_handle_t sh = sc->sc_sh;
struct ifnet *ifp = &sc->sc_arpcom.ac_if;
int rseg, error, miiflags;
u_int i;
bus_dma_segment_t seg;
u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
char *nullbuf;
timeout_set(&sc->sc_mii_timeout, epic_tick, sc);
if ((error = bus_dmamem_alloc(sc->sc_dmat,
sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0,
&seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
printf(": unable to allocate control data, error = %d\n",
error);
goto fail_0;
}
if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
sizeof(struct epic_control_data) + ETHER_PAD_LEN,
(caddr_t *)&sc->sc_control_data,
BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
printf(": unable to map control data, error = %d\n", error);
goto fail_1;
}
nullbuf =
(char *)sc->sc_control_data + sizeof(struct epic_control_data);
memset(nullbuf, 0, ETHER_PAD_LEN);
if ((error = bus_dmamap_create(sc->sc_dmat,
sizeof(struct epic_control_data), 1,
sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
&sc->sc_cddmamap)) != 0) {
printf(": unable to create control data DMA map, error = %d\n",
error);
goto fail_2;
}
if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
sc->sc_control_data, sizeof(struct epic_control_data), NULL,
BUS_DMA_NOWAIT)) != 0) {
printf(": unable to load control data DMA map, error = %d\n",
error);
goto fail_3;
}
for (i = 0; i < EPIC_NTXDESC; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
&EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
printf(": unable to create tx DMA map %d, error = %d\n",
i, error);
goto fail_4;
}
}
for (i = 0; i < EPIC_NRXDESC; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
MCLBYTES, 0, BUS_DMA_NOWAIT,
&EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
printf(": unable to create rx DMA map %d, error = %d\n",
i, error);
goto fail_5;
}
EPIC_DSRX(sc, i)->ds_mbuf = NULL;
}
if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
printf(": unable to create pad buffer DMA map, error = %d\n",
error);
goto fail_5;
}
if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
printf(": unable to load pad buffer DMA map, error = %d\n",
error);
goto fail_6;
}
bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
BUS_DMASYNC_PREWRITE);
bus_space_write_4(st, sh, EPIC_GENCTL, 0);
epic_reset(sc);
epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
enaddr[i * 2] = myea[i] & 0xff;
enaddr[i * 2 + 1] = myea[i] >> 8;
}
epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
mydevname);
for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
devname[i * 2] = mydevname[i] & 0xff;
devname[i * 2 + 1] = mydevname[i] >> 8;
}
devname[sizeof(devname) - 1] = ' ';
for (i = sizeof(devname) - 1; devname[i] == ' '; i--) {
devname[i] = '\0';
if (i == 0)
break;
}
printf(", %s : %s, address %s\n", devname, intrstr,
ether_sprintf(enaddr));
miiflags = 0;
if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
miiflags |= MIIF_HAVEFIBER;
sc->sc_mii.mii_ifp = ifp;
sc->sc_mii.mii_readreg = epic_mii_read;
sc->sc_mii.mii_writereg = epic_mii_write;
sc->sc_mii.mii_statchg = epic_statchg;
ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange,
epic_mediastatus);
mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, miiflags);
if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
} else
ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
if (sc->sc_hwflags & EPIC_HAS_BNC) {
sc->sc_serinst = sc->sc_mii.mii_instance++;
ifmedia_add(&sc->sc_mii.mii_media,
IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
sc->sc_serinst),
0, NULL);
} else
sc->sc_serinst = -1;
bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = epic_ioctl;
ifp->if_start = epic_start;
ifp->if_watchdog = epic_watchdog;
ifq_init_maxlen(&ifp->if_snd, EPIC_NTXDESC - 1);
ifp->if_capabilities = IFCAP_VLAN_MTU;
if_attach(ifp);
ether_ifattach(ifp);
return;
fail_6:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
fail_5:
for (i = 0; i < EPIC_NRXDESC; i++) {
if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
EPIC_DSRX(sc, i)->ds_dmamap);
}
fail_4:
for (i = 0; i < EPIC_NTXDESC; i++) {
if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
EPIC_DSTX(sc, i)->ds_dmamap);
}
bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
fail_3:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
fail_2:
bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
sizeof(struct epic_control_data));
fail_1:
bus_dmamem_free(sc->sc_dmat, &seg, rseg);
fail_0:
return;
}
void
epic_start(struct ifnet *ifp)
{
struct epic_softc *sc = ifp->if_softc;
struct mbuf *m0, *m;
struct epic_txdesc *txd;
struct epic_descsoft *ds;
struct epic_fraglist *fr;
bus_dmamap_t dmamap;
int error, firsttx, nexttx, opending, seg;
u_int len;
opending = sc->sc_txpending;
firsttx = EPIC_NEXTTX(sc->sc_txlast);
while (sc->sc_txpending < EPIC_NTXDESC) {
m0 = ifq_deq_begin(&ifp->if_snd);
if (m0 == NULL)
break;
m = NULL;
nexttx = EPIC_NEXTTX(sc->sc_txlast);
txd = EPIC_CDTX(sc, nexttx);
fr = EPIC_CDFL(sc, nexttx);
ds = EPIC_DSTX(sc, nexttx);
dmamap = ds->ds_dmamap;
if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
(m0->m_pkthdr.len < ETHER_PAD_LEN &&
dmamap-> dm_nsegs == EPIC_NFRAGS)) {
if (error == 0)
bus_dmamap_unload(sc->sc_dmat, dmamap);
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
ifq_deq_rollback(&ifp->if_snd, m0);
break;
}
if (m0->m_pkthdr.len > MHLEN) {
MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
ifq_deq_rollback(&ifp->if_snd, m0);
break;
}
}
m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
if (error) {
ifq_deq_rollback(&ifp->if_snd, m0);
break;
}
}
ifq_deq_commit(&ifp->if_snd, m0);
if (m != NULL) {
m_freem(m0);
m0 = m;
}
for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
fr->ef_frags[seg].ef_addr =
dmamap->dm_segs[seg].ds_addr;
fr->ef_frags[seg].ef_length =
dmamap->dm_segs[seg].ds_len;
}
len = m0->m_pkthdr.len;
if (len < ETHER_PAD_LEN) {
fr->ef_frags[seg].ef_addr = sc->sc_nulldma;
fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len;
len = ETHER_PAD_LEN;
seg++;
}
fr->ef_nfrags = seg;
EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
BUS_DMASYNC_PREWRITE);
ds->ds_mbuf = m0;
txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
if (nexttx == firsttx)
txd->et_txstatus = TXSTAT_TXLENGTH(len);
else
txd->et_txstatus =
TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER;
EPIC_CDTXSYNC(sc, nexttx,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
sc->sc_txpending++;
sc->sc_txlast = nexttx;
#if NBPFILTER > 0
if (ifp->if_bpf)
bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
#endif
}
if (sc->sc_txpending == EPIC_NTXDESC) {
ifq_set_oactive(&ifp->if_snd);
}
if (sc->sc_txpending != opending) {
if (opending == 0)
sc->sc_txdirty = firsttx;
EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
EPIC_CDTXSYNC(sc, sc->sc_txlast,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER;
EPIC_CDTXSYNC(sc, firsttx,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
COMMAND_TXQUEUED);
ifp->if_timer = 5;
}
}
void
epic_watchdog(struct ifnet *ifp)
{
struct epic_softc *sc = ifp->if_softc;
printf("%s: device timeout\n", sc->sc_dev.dv_xname);
ifp->if_oerrors++;
(void) epic_init(ifp);
}
int
epic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct epic_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
int s, error = 0;
s = splnet();
switch (cmd) {
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
epic_init(ifp);
break;
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP)
epic_init(ifp);
else if (ifp->if_flags & IFF_RUNNING)
epic_stop(ifp, 1);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
break;
default:
error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
}
if (error == ENETRESET) {
if (ifp->if_flags & IFF_RUNNING) {
mii_pollstat(&sc->sc_mii);
epic_set_mchash(sc);
}
error = 0;
}
splx(s);
return (error);
}
int
epic_intr(void *arg)
{
struct epic_softc *sc = arg;
struct ifnet *ifp = &sc->sc_arpcom.ac_if;
struct epic_rxdesc *rxd;
struct epic_txdesc *txd;
struct epic_descsoft *ds;
struct mbuf_list ml = MBUF_LIST_INITIALIZER();
struct mbuf *m;
u_int32_t intstat, rxstatus, txstatus;
int i, claimed = 0;
u_int len;
intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
if ((intstat & INTSTAT_INT_ACTV) == 0)
return (claimed);
claimed = 1;
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
intstat & INTMASK);
if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
rxd = EPIC_CDRX(sc, i);
ds = EPIC_DSRX(sc, i);
EPIC_CDRXSYNC(sc, i,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
rxstatus = rxd->er_rxstatus;
if (rxstatus & ER_RXSTAT_OWNER) {
break;
}
if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
if (rxstatus & ER_RXSTAT_CRCERROR)
printf("%s: CRC error\n",
sc->sc_dev.dv_xname);
if (rxstatus & ER_RXSTAT_ALIGNERROR)
printf("%s: alignment error\n",
sc->sc_dev.dv_xname);
ifp->if_ierrors++;
EPIC_INIT_RXDESC(sc, i);
continue;
}
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN;
if (len < sizeof(struct ether_header)) {
ifp->if_ierrors++;
EPIC_INIT_RXDESC(sc, i);
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize,
BUS_DMASYNC_PREREAD);
continue;
}
if (epic_copy_small != 0 && len <= MHLEN) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL)
goto dropit;
memcpy(mtod(m, caddr_t),
mtod(ds->ds_mbuf, caddr_t), len);
EPIC_INIT_RXDESC(sc, i);
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize,
BUS_DMASYNC_PREREAD);
} else {
m = ds->ds_mbuf;
if (epic_add_rxbuf(sc, i) != 0) {
dropit:
ifp->if_ierrors++;
EPIC_INIT_RXDESC(sc, i);
bus_dmamap_sync(sc->sc_dmat,
ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize,
BUS_DMASYNC_PREREAD);
continue;
}
}
m->m_pkthdr.len = m->m_len = len;
ml_enqueue(&ml, m);
}
sc->sc_rxptr = i;
if (intstat & INTSTAT_RQE) {
printf("%s: receiver queue empty\n",
sc->sc_dev.dv_xname);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
EPIC_CDRXADDR(sc, sc->sc_rxptr));
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
COMMAND_RXQUEUED | COMMAND_START_RX);
}
}
if_input(ifp, &ml);
if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
ifq_clr_oactive(&ifp->if_snd);
for (i = sc->sc_txdirty; sc->sc_txpending != 0;
i = EPIC_NEXTTX(i), sc->sc_txpending--) {
txd = EPIC_CDTX(sc, i);
ds = EPIC_DSTX(sc, i);
EPIC_CDTXSYNC(sc, i,
BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
txstatus = txd->et_txstatus;
if (txstatus & ET_TXSTAT_OWNER)
break;
EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
0, ds->ds_dmamap->dm_mapsize,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
m_freem(ds->ds_mbuf);
ds->ds_mbuf = NULL;
if ((txstatus & ET_TXSTAT_PACKETTX) == 0)
ifp->if_oerrors++;
ifp->if_collisions +=
TXSTAT_COLLISIONS(txstatus);
if (txstatus & ET_TXSTAT_CARSENSELOST)
printf("%s: lost carrier\n",
sc->sc_dev.dv_xname);
}
sc->sc_txdirty = i;
if (sc->sc_txpending == 0)
ifp->if_timer = 0;
if (intstat & INTSTAT_TXU) {
printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
bus_space_write_4(sc->sc_st, sc->sc_sh,
EPIC_COMMAND, COMMAND_TXUGO);
if (sc->sc_txpending)
bus_space_write_4(sc->sc_st, sc->sc_sh,
EPIC_COMMAND, COMMAND_TXQUEUED);
}
epic_start(ifp);
}
if (intstat & INTSTAT_FATAL_INT) {
if (intstat & INTSTAT_PTA)
printf("%s: PCI target abort error\n",
sc->sc_dev.dv_xname);
else if (intstat & INTSTAT_PMA)
printf("%s: PCI master abort error\n",
sc->sc_dev.dv_xname);
else if (intstat & INTSTAT_APE)
printf("%s: PCI address parity error\n",
sc->sc_dev.dv_xname);
else if (intstat & INTSTAT_DPE)
printf("%s: PCI data parity error\n",
sc->sc_dev.dv_xname);
else
printf("%s: unknown fatal error\n",
sc->sc_dev.dv_xname);
(void) epic_init(ifp);
}
return (claimed);
}
void
epic_tick(void *arg)
{
struct epic_softc *sc = arg;
int s;
s = splnet();
mii_tick(&sc->sc_mii);
splx(s);
timeout_add_sec(&sc->sc_mii_timeout, 1);
}
void
epic_fixup_clock_source(struct epic_softc *sc)
{
int i;
for (i = 0; i < 16; i++)
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
TEST_CLOCKTEST);
}
void
epic_reset(struct epic_softc *sc)
{
epic_fixup_clock_source(sc);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
delay(100);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
delay(100);
epic_fixup_clock_source(sc);
}
int
epic_init(struct ifnet *ifp)
{
struct epic_softc *sc = ifp->if_softc;
bus_space_tag_t st = sc->sc_st;
bus_space_handle_t sh = sc->sc_sh;
struct epic_txdesc *txd;
struct epic_descsoft *ds;
u_int32_t genctl, reg0;
int i, error = 0;
epic_stop(ifp, 0);
epic_reset(sc);
bus_space_write_4(st, sh, EPIC_TXTEST, 0);
genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
#if BYTE_ORDER == BIG_ENDIAN
genctl |= GENCTL_BIG_ENDIAN;
#endif
bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
delay(100);
bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
delay(1000);
bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
reg0 = sc->sc_arpcom.ac_enaddr[1] << 8 | sc->sc_arpcom.ac_enaddr[0];
bus_space_write_4(st, sh, EPIC_LAN0, reg0);
reg0 = sc->sc_arpcom.ac_enaddr[3] << 8 | sc->sc_arpcom.ac_enaddr[2];
bus_space_write_4(st, sh, EPIC_LAN1, reg0);
reg0 = sc->sc_arpcom.ac_enaddr[5] << 8 | sc->sc_arpcom.ac_enaddr[4];
bus_space_write_4(st, sh, EPIC_LAN2, reg0);
reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
(RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
if (ifp->if_flags & IFF_PROMISC)
reg0 |= RXCON_PROMISCMODE;
bus_space_write_4(st, sh, EPIC_RXCON, reg0);
epic_mediachange(ifp);
epic_set_mchash(sc);
for (i = 0; i < EPIC_NTXDESC; i++) {
txd = EPIC_CDTX(sc, i);
memset(txd, 0, sizeof(struct epic_txdesc));
txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
sc->sc_txpending = 0;
sc->sc_txdirty = 0;
sc->sc_txlast = EPIC_NTXDESC - 1;
for (i = 0; i < EPIC_NRXDESC; i++) {
ds = EPIC_DSRX(sc, i);
if (ds->ds_mbuf == NULL) {
if ((error = epic_add_rxbuf(sc, i)) != 0) {
printf("%s: unable to allocate or map rx "
"buffer %d error = %d\n",
sc->sc_dev.dv_xname, i, error);
epic_rxdrain(sc);
goto out;
}
} else
EPIC_INIT_RXDESC(sc, i);
}
sc->sc_rxptr = 0;
bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
bus_space_write_4(st, sh, EPIC_PTCDAR,
EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
bus_space_write_4(st, sh, EPIC_PRCDAR,
EPIC_CDRXADDR(sc, sc->sc_rxptr));
bus_space_write_4(st, sh, EPIC_COMMAND,
COMMAND_RXQUEUED | COMMAND_START_RX);
ifp->if_flags |= IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
timeout_add_sec(&sc->sc_mii_timeout, 1);
epic_start(ifp);
out:
if (error)
printf("%s: interface not running\n", sc->sc_dev.dv_xname);
return (error);
}
void
epic_rxdrain(struct epic_softc *sc)
{
struct epic_descsoft *ds;
int i;
for (i = 0; i < EPIC_NRXDESC; i++) {
ds = EPIC_DSRX(sc, i);
if (ds->ds_mbuf != NULL) {
bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
m_freem(ds->ds_mbuf);
ds->ds_mbuf = NULL;
}
}
}
void
epic_stop(struct ifnet *ifp, int disable)
{
struct epic_softc *sc = ifp->if_softc;
bus_space_tag_t st = sc->sc_st;
bus_space_handle_t sh = sc->sc_sh;
struct epic_descsoft *ds;
u_int32_t reg;
int i;
timeout_del(&sc->sc_mii_timeout);
ifp->if_flags &= ~IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
ifp->if_timer = 0;
mii_down(&sc->sc_mii);
epic_fixup_clock_source(sc);
reg = bus_space_read_4(st, sh, EPIC_GENCTL);
bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
bus_space_write_4(st, sh, EPIC_INTMASK, 0);
bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
COMMAND_STOP_TDMA | COMMAND_STOP_RX);
for (i = 0; i < EPIC_NTXDESC; i++) {
ds = EPIC_DSTX(sc, i);
if (ds->ds_mbuf != NULL) {
bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
m_freem(ds->ds_mbuf);
ds->ds_mbuf = NULL;
}
}
if (disable)
epic_rxdrain(sc);
}
void
epic_read_eeprom(struct epic_softc *sc, int word, int wordcnt, u_int16_t *data)
{
bus_space_tag_t st = sc->sc_st;
bus_space_handle_t sh = sc->sc_sh;
u_int16_t reg;
int i, x;
#define EEPROM_WAIT_READY(st, sh) \
while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
EEPROM_WAIT_READY(st, sh);
for (i = 0; i < wordcnt; i++) {
bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
EEPROM_WAIT_READY(st, sh);
for (x = 3; x > 0; x--) {
reg = EECTL_ENABLE|EECTL_EECS;
if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
reg |= EECTL_EEDI;
bus_space_write_4(st, sh, EPIC_EECTL, reg);
EEPROM_WAIT_READY(st, sh);
bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
EEPROM_WAIT_READY(st, sh);
bus_space_write_4(st, sh, EPIC_EECTL, reg);
EEPROM_WAIT_READY(st, sh);
}
for (x = 6; x > 0; x--) {
reg = EECTL_ENABLE|EECTL_EECS;
if ((word + i) & (1 << (x - 1)))
reg |= EECTL_EEDI;
bus_space_write_4(st, sh, EPIC_EECTL, reg);
EEPROM_WAIT_READY(st, sh);
bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
EEPROM_WAIT_READY(st, sh);
bus_space_write_4(st, sh, EPIC_EECTL, reg);
EEPROM_WAIT_READY(st, sh);
}
reg = EECTL_ENABLE|EECTL_EECS;
data[i] = 0;
for (x = 16; x > 0; x--) {
bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
EEPROM_WAIT_READY(st, sh);
if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
data[i] |= (1 << (x - 1));
bus_space_write_4(st, sh, EPIC_EECTL, reg);
EEPROM_WAIT_READY(st, sh);
}
bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
EEPROM_WAIT_READY(st, sh);
}
bus_space_write_4(st, sh, EPIC_EECTL, 0);
#undef EEPROM_WAIT_READY
}
int
epic_add_rxbuf(struct epic_softc *sc, int idx)
{
struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
struct mbuf *m;
int error;
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL)
return (ENOBUFS);
MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
return (ENOBUFS);
}
if (ds->ds_mbuf != NULL)
bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
ds->ds_mbuf = m;
error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
BUS_DMA_READ|BUS_DMA_NOWAIT);
if (error) {
printf("%s: can't load rx DMA map %d, error = %d\n",
sc->sc_dev.dv_xname, idx, error);
panic("epic_add_rxbuf");
}
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
EPIC_INIT_RXDESC(sc, idx);
return (0);
}
void
epic_set_mchash(struct epic_softc *sc)
{
struct arpcom *ac = &sc->sc_arpcom;
struct ifnet *ifp = &sc->sc_arpcom.ac_if;
struct ether_multi *enm;
struct ether_multistep step;
u_int32_t hash, mchash[4];
if (ifp->if_flags & IFF_PROMISC)
goto allmulti;
if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
goto allmulti;
}
if (ac->ac_multirangecnt > 0)
goto allmulti;
mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
ETHER_FIRST_MULTI(step, ac, enm);
while (enm != NULL) {
hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
hash >>= 26;
mchash[hash >> 4] |= 1 << (hash & 0xf);
ETHER_NEXT_MULTI(step, enm);
}
ifp->if_flags &= ~IFF_ALLMULTI;
goto sethash;
allmulti:
ifp->if_flags |= IFF_ALLMULTI;
mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
sethash:
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
}
int
epic_mii_wait(struct epic_softc *sc, u_int32_t rw)
{
int i;
for (i = 0; i < 50; i++) {
if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
== 0)
break;
delay(2);
}
if (i == 50) {
printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
return (1);
}
return (0);
}
int
epic_mii_read(struct device *self, int phy, int reg)
{
struct epic_softc *sc = (struct epic_softc *)self;
if (epic_mii_wait(sc, MMCTL_WRITE))
return (0);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
MMCTL_ARG(phy, reg, MMCTL_READ));
if (epic_mii_wait(sc, MMCTL_READ))
return (0);
return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
MMDATA_MASK);
}
void
epic_mii_write(struct device *self, int phy, int reg, int val)
{
struct epic_softc *sc = (struct epic_softc *)self;
if (epic_mii_wait(sc, MMCTL_WRITE))
return;
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
MMCTL_ARG(phy, reg, MMCTL_WRITE));
}
void
epic_statchg(struct device *self)
{
struct epic_softc *sc = (struct epic_softc *)self;
u_int32_t txcon, miicfg;
txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
if (sc->sc_mii.mii_media_active & IFM_FDX)
txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
else
txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
miicfg |= MIICFG_ENABLE;
else
miicfg &= ~MIICFG_ENABLE;
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
}
epic_set_mchash(sc);
}
void
epic_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct epic_softc *sc = ifp->if_softc;
mii_pollstat(&sc->sc_mii);
ifmr->ifm_status = sc->sc_mii.mii_media_status;
ifmr->ifm_active = sc->sc_mii.mii_media_active;
}
int
epic_mediachange(struct ifnet *ifp)
{
struct epic_softc *sc = ifp->if_softc;
struct mii_data *mii = &sc->sc_mii;
struct ifmedia *ifm = &mii->mii_media;
uint64_t media = ifm->ifm_cur->ifm_media;
u_int32_t miicfg;
struct mii_softc *miisc;
int cfg;
if (!(ifp->if_flags & IFF_UP))
return (0);
if (IFM_INST(media) != sc->sc_serinst) {
#ifdef EPICMEDIADEBUG
printf("%s: parallel mode\n", ifp->if_xname);
#endif
miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
miicfg &= ~MIICFG_SERMODEENA;
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
}
mii_mediachg(mii);
if (IFM_INST(media) == sc->sc_serinst) {
#ifdef EPICMEDIADEBUG
printf("%s: serial mode\n", ifp->if_xname);
#endif
miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
mii->mii_media_active = media;
mii->mii_media_status = 0;
epic_statchg(&sc->sc_dev);
return (0);
}
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
if (IFM_INST(media) == miisc->mii_inst)
break;
}
if (!miisc) {
printf("epic_mediachange: can't happen\n");
return (0);
}
#ifdef EPICMEDIADEBUG
printf("%s: using phy %s\n", ifp->if_xname,
miisc->mii_dev.dv_xname);
#endif
if (miisc->mii_flags & MIIF_HAVEFIBER) {
cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
if (IFM_SUBTYPE(media) == IFM_100_FX) {
#ifdef EPICMEDIADEBUG
printf("%s: power up fiber\n", ifp->if_xname);
#endif
cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
} else {
#ifdef EPICMEDIADEBUG
printf("%s: power down fiber\n", ifp->if_xname);
#endif
cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
}
PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
}
return (0);
}