#include "bpfilter.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/timeout.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/device.h>
#include <net/if.h>
#include <net/if_media.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#if NBPFILTER > 0
#include <net/bpf.h>
#endif
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcidevs.h>
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
#include <dev/pci/if_bcereg.h>
#include <uvm/uvm.h>
struct bce_dma_slot {
u_int32_t ctrl;
u_int32_t addr;
};
#define CTRL_BC_MASK 0x1fff
#define CTRL_EOT 0x10000000
#define CTRL_IOC 0x20000000
#define CTRL_EOF 0x40000000
#define CTRL_SOF 0x80000000
#define BCE_RXBUF_LEN (MCLBYTES - 4)
struct rx_pph {
u_int16_t len;
u_int16_t flags;
u_int16_t pad[12];
};
#define BCE_PREPKT_HEADER_SIZE 30
#define RXF_NO 0x8
#define RXF_RXER 0x4
#define RXF_CRC 0x2
#define RXF_OV 0x1
#define BCE_NRXDESC 64
#define BCE_NTXDESC 64
#define BCE_TIMEOUT 100
struct bce_softc {
struct device bce_dev;
bus_space_tag_t bce_btag;
bus_space_handle_t bce_bhandle;
bus_dma_tag_t bce_dmatag;
struct arpcom bce_ac;
void *bce_intrhand;
struct pci_attach_args bce_pa;
struct mii_data bce_mii;
u_int32_t bce_phy;
struct bce_dma_slot *bce_rx_ring;
struct bce_dma_slot *bce_tx_ring;
caddr_t bce_data;
bus_dmamap_t bce_ring_map;
bus_dmamap_t bce_rxdata_map;
bus_dmamap_t bce_txdata_map;
u_int32_t bce_intmask;
u_int32_t bce_rxin;
u_int32_t bce_txin;
int bce_txsfree;
int bce_txsnext;
struct timeout bce_timeout;
};
int bce_probe(struct device *, void *, void *);
void bce_attach(struct device *, struct device *, void *);
int bce_activate(struct device *, int);
int bce_ioctl(struct ifnet *, u_long, caddr_t);
void bce_start(struct ifnet *);
void bce_watchdog(struct ifnet *);
int bce_intr(void *);
void bce_rxintr(struct bce_softc *);
void bce_txintr(struct bce_softc *);
int bce_init(struct ifnet *);
void bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
void bce_add_rxbuf(struct bce_softc *, int);
void bce_stop(struct ifnet *);
void bce_reset(struct bce_softc *);
void bce_iff(struct ifnet *);
int bce_mii_read(struct device *, int, int);
void bce_mii_write(struct device *, int, int, int);
void bce_statchg(struct device *);
int bce_mediachange(struct ifnet *);
void bce_mediastatus(struct ifnet *, struct ifmediareq *);
void bce_tick(void *);
#ifdef BCE_DEBUG
#define DPRINTF(x) do { \
if (bcedebug) \
printf x; \
} while ( 0)
#define DPRINTFN(n,x) do { \
if (bcedebug >= (n)) \
printf x; \
} while ( 0)
int bcedebug = 0;
#else
#define DPRINTF(x)
#define DPRINTFN(n,x)
#endif
const struct cfattach bce_ca = {
sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate
};
struct cfdriver bce_cd = {
NULL, "bce", DV_IFNET
};
const struct pci_matchid bce_devices[] = {
{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
};
int
bce_probe(struct device *parent, void *match, void *aux)
{
return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
nitems(bce_devices)));
}
void
bce_attach(struct device *parent, struct device *self, void *aux)
{
struct bce_softc *sc = (struct bce_softc *) self;
struct pci_attach_args *pa = aux;
pci_chipset_tag_t pc = pa->pa_pc;
pci_intr_handle_t ih;
const char *intrstr = NULL;
caddr_t kva;
bus_dma_segment_t seg, dseg;
int rseg, drseg;
struct ifnet *ifp;
pcireg_t memtype;
bus_addr_t memaddr;
bus_size_t memsize;
int pmreg;
pcireg_t pmode;
int error;
sc->bce_pa = *pa;
sc->bce_dmatag = pa->pa_dmat;
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
&sc->bce_bhandle, &memaddr, &memsize, 0)) {
printf(": unable to find mem space\n");
return;
}
if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
if (pmode == 3) {
printf(": unable to wake up from power state D3\n");
return;
}
if (pmode != 0) {
printf(": waking up from power state D%d\n",
pmode);
pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
}
}
if (pci_intr_map(pa, &ih)) {
printf(": couldn't map interrupt\n");
return;
}
intrstr = pci_intr_string(pc, ih);
sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
self->dv_xname);
if (sc->bce_intrhand == NULL) {
printf(": couldn't establish interrupt");
if (intrstr != NULL)
printf(" at %s", intrstr);
printf("\n");
return;
}
bce_reset(sc);
if ((error = bus_dmamem_alloc_range(sc->bce_dmatag,
(BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0, 0, &dseg, 1, &drseg,
BUS_DMA_NOWAIT, (bus_addr_t)0,
(bus_addr_t)(0x40000000 - 1))) != 0) {
printf(": unable to alloc space for data, error = %d", error);
return;
}
if ((error = bus_dmamem_map(sc->bce_dmatag, &dseg, drseg,
(BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, &sc->bce_data,
BUS_DMA_NOWAIT))) {
printf(": unable to map data, error = %d\n", error);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,
1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
&sc->bce_rxdata_map))) {
printf(": unable to create ring DMA map, error = %d\n", error);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,
BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) {
printf(": unable to load rx ring DMA map\n");
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,
1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
&sc->bce_txdata_map))) {
printf(": unable to create ring DMA map, error = %d\n", error);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,
sc->bce_data + BCE_NRXDESC * MCLBYTES,
BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) {
printf(": unable to load tx ring DMA map\n");
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if ((error = bus_dmamem_alloc_range(sc->bce_dmatag, 2 * PAGE_SIZE,
PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT,
(bus_addr_t)0, (bus_addr_t)0x3fffffff))) {
printf(": unable to alloc space for ring descriptors, "
"error = %d\n", error);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
printf(": unable to map DMA buffers, error = %d\n", error);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,
2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) {
printf(": unable to create ring DMA map, error = %d\n", error);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
printf(": unable to load ring DMA map\n");
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
bus_dmamem_free(sc->bce_dmatag, &dseg, drseg);
return;
}
sc->bce_rx_ring = (struct bce_dma_slot *)kva;
sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE);
ifp = &sc->bce_ac.ac_if;
strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = bce_ioctl;
ifp->if_start = bce_start;
ifp->if_watchdog = bce_watchdog;
ifp->if_capabilities = IFCAP_VLAN_MTU;
sc->bce_ac.ac_enaddr[0] =
bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
sc->bce_ac.ac_enaddr[1] =
bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
sc->bce_ac.ac_enaddr[2] =
bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
sc->bce_ac.ac_enaddr[3] =
bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
sc->bce_ac.ac_enaddr[4] =
bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
sc->bce_ac.ac_enaddr[5] =
bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
printf(": %s, address %s\n", intrstr,
ether_sprintf(sc->bce_ac.ac_enaddr));
sc->bce_mii.mii_ifp = ifp;
sc->bce_mii.mii_readreg = bce_mii_read;
sc->bce_mii.mii_writereg = bce_mii_write;
sc->bce_mii.mii_statchg = bce_statchg;
ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
bce_mediastatus);
mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, 0);
if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
} else
ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
BCE_PHY) & 0x1f;
bce_mii_write((struct device *) sc, 1, 26,
bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);
bce_mii_write((struct device *) sc, 1, 27,
bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));
if_attach(ifp);
ether_ifattach(ifp);
timeout_set(&sc->bce_timeout, bce_tick, sc);
}
int
bce_activate(struct device *self, int act)
{
struct bce_softc *sc = (struct bce_softc *)self;
struct ifnet *ifp = &sc->bce_ac.ac_if;
switch (act) {
case DVACT_SUSPEND:
if (ifp->if_flags & IFF_RUNNING)
bce_stop(ifp);
break;
case DVACT_RESUME:
bce_reset(sc);
if (ifp->if_flags & IFF_UP) {
bce_init(ifp);
bce_start(ifp);
}
break;
}
return (0);
}
int
bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct bce_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
int s, error = 0;
s = splnet();
switch (cmd) {
case SIOCSIFADDR:
ifp->if_flags |= IFF_UP;
if (!(ifp->if_flags & IFF_RUNNING))
bce_init(ifp);
break;
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP) {
if (ifp->if_flags & IFF_RUNNING)
error = ENETRESET;
else
bce_init(ifp);
} else {
if (ifp->if_flags & IFF_RUNNING)
bce_stop(ifp);
}
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
break;
default:
error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
}
if (error == ENETRESET) {
if (ifp->if_flags & IFF_RUNNING)
bce_iff(ifp);
error = 0;
}
splx(s);
return error;
}
void
bce_start(struct ifnet *ifp)
{
struct bce_softc *sc = ifp->if_softc;
struct mbuf *m0;
u_int32_t ctrl;
int txstart;
int txsfree;
int newpkts = 0;
if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
return;
if (sc->bce_txsnext >= sc->bce_txin)
txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
else
txsfree = sc->bce_txin - sc->bce_txsnext - 1;
while (txsfree > 0) {
m0 = ifq_dequeue(&ifp->if_snd);
if (m0 == NULL)
break;
m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data +
(sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES);
ctrl = m0->m_pkthdr.len & CTRL_BC_MASK;
ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC;
#if NBPFILTER > 0
if (ifp->if_bpf)
bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
#endif
m_freem(m0);
bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE);
txstart = sc->bce_txsnext;
if (sc->bce_txsnext == BCE_NTXDESC - 1)
ctrl |= CTRL_EOT;
sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
sc->bce_tx_ring[sc->bce_txsnext].addr =
htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +
sc->bce_txsnext * MCLBYTES + 0x40000000);
if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
sc->bce_txsnext = 0;
else
sc->bce_txsnext++;
txsfree--;
bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
sizeof(struct bce_dma_slot),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
sc->bce_txsnext * sizeof(struct bce_dma_slot));
newpkts++;
}
if (txsfree == 0) {
ifq_set_oactive(&ifp->if_snd);
}
if (newpkts) {
ifp->if_timer = 5;
}
}
void
bce_watchdog(struct ifnet *ifp)
{
struct bce_softc *sc = ifp->if_softc;
printf("%s: device timeout\n", sc->bce_dev.dv_xname);
ifp->if_oerrors++;
(void) bce_init(ifp);
bce_start(ifp);
}
int
bce_intr(void *xsc)
{
struct bce_softc *sc;
struct ifnet *ifp;
u_int32_t intstatus;
int wantinit;
int handled = 0;
sc = xsc;
ifp = &sc->bce_ac.ac_if;
for (wantinit = 0; wantinit == 0;) {
intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_INT_STS);
intstatus &= sc->bce_intmask;
if (intstatus == 0)
break;
handled = 1;
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
intstatus);
if (intstatus & I_RI)
bce_rxintr(sc);
if (intstatus & I_XI)
bce_txintr(sc);
if (intstatus & ~(I_RI | I_XI)) {
if (intstatus & I_XU)
printf("%s: transmit fifo underflow\n",
sc->bce_dev.dv_xname);
if (intstatus & I_RO) {
printf("%s: receive fifo overflow\n",
sc->bce_dev.dv_xname);
ifp->if_ierrors++;
}
if (intstatus & I_RU)
printf("%s: receive descriptor underflow\n",
sc->bce_dev.dv_xname);
if (intstatus & I_DE)
printf("%s: descriptor protocol error\n",
sc->bce_dev.dv_xname);
if (intstatus & I_PD)
printf("%s: data error\n",
sc->bce_dev.dv_xname);
if (intstatus & I_PC)
printf("%s: descriptor error\n",
sc->bce_dev.dv_xname);
if (intstatus & I_TO)
printf("%s: general purpose timeout\n",
sc->bce_dev.dv_xname);
wantinit = 1;
}
}
if (handled) {
if (wantinit)
bce_init(ifp);
bce_start(ifp);
}
return (handled);
}
void
bce_rxintr(struct bce_softc *sc)
{
struct ifnet *ifp = &sc->bce_ac.ac_if;
struct mbuf_list ml = MBUF_LIST_INITIALIZER();
struct rx_pph *pph;
struct mbuf *m;
int curr;
int len;
int i;
curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
& RS_CD_MASK;
curr = curr / sizeof(struct bce_dma_slot);
if (curr >= BCE_NRXDESC)
curr = BCE_NRXDESC - 1;
for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) {
bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,
i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD);
pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES);
if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
ifp->if_ierrors++;
pph->len = 0;
pph->flags = 0;
continue;
}
len = pph->len;
if (len == 0)
continue;
pph->len = 0;
pph->flags = 0;
len -= ETHER_CRC_LEN;
m = m_devget(sc->bce_data + i * MCLBYTES +
BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN);
ml_enqueue(&ml, m);
curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_DMA_RXSTATUS) & RS_CD_MASK) /
sizeof(struct bce_dma_slot);
if (curr >= BCE_NRXDESC)
curr = BCE_NRXDESC - 1;
}
if_input(ifp, &ml);
sc->bce_rxin = curr;
}
void
bce_txintr(struct bce_softc *sc)
{
struct ifnet *ifp = &sc->bce_ac.ac_if;
int curr;
int i;
ifq_clr_oactive(&ifp->if_snd);
curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_DMA_TXSTATUS) & RS_CD_MASK;
curr = curr / sizeof(struct bce_dma_slot);
if (curr >= BCE_NTXDESC)
curr = BCE_NTXDESC - 1;
for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) {
bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE);
}
sc->bce_txin = curr;
if (sc->bce_txsnext == sc->bce_txin)
ifp->if_timer = 0;
}
int
bce_init(struct ifnet *ifp)
{
struct bce_softc *sc = ifp->if_softc;
u_int32_t reg_win;
int i;
bce_stop(ifp);
reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
BCE_REG_WIN);
pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
BCE_SONICS_WIN);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
SBIV_ENET0);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
SBTOPCI_PREF | SBTOPCI_BURST);
pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
reg_win);
bce_reset(sc);
memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
sc->bce_txsnext = 0;
sc->bce_txin = 0;
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
~BCE_EMC_PDOWN);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24);
bce_iff(ifp);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000);
sc->bce_rxin = 0;
memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000);
for (i = 0; i < BCE_NRXDESC; i++)
bce_add_rxbuf(sc, i);
sc->bce_intmask =
I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
sc->bce_intmask);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
BCE_NRXDESC * sizeof(struct bce_dma_slot));
mii_mediachg(&sc->bce_mii);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_ENET_CTL) | EC_EE);
timeout_add_sec(&sc->bce_timeout, 1);
ifp->if_flags |= IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
return 0;
}
void
bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
{
int i;
u_int32_t rval;
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
mac[0] << 8 | mac[1] | 0x10000);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
idx << 16 | 8);
for (i = 0; i < 100; i++) {
rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_FILT_CTL);
if (!(rval & 0x80000000))
break;
delay(10);
}
if (i == 100) {
printf("%s: timed out writing pkt filter ctl\n",
sc->bce_dev.dv_xname);
}
}
void
bce_add_rxbuf(struct bce_softc *sc, int idx)
{
struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,
MCLBYTES, BUS_DMASYNC_PREREAD);
*(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0;
bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +
idx * MCLBYTES + 0x40000000);
if (idx != (BCE_NRXDESC - 1))
bced->ctrl = htole32(BCE_RXBUF_LEN);
else
bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT);
bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
sizeof(struct bce_dma_slot) * idx,
sizeof(struct bce_dma_slot),
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
void
bce_stop(struct ifnet *ifp)
{
struct bce_softc *sc = ifp->if_softc;
int i;
u_int32_t val;
timeout_del(&sc->bce_timeout);
ifp->if_flags &= ~IFF_RUNNING;
ifq_clr_oactive(&ifp->if_snd);
ifp->if_timer = 0;
mii_down(&sc->bce_mii);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
sc->bce_intmask = 0;
delay(10);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
for (i = 0; i < 200; i++) {
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_ENET_CTL);
if (!(val & EC_ED))
break;
delay(10);
}
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
delay(10);
}
void
bce_reset(struct bce_softc *sc)
{
u_int32_t val;
u_int32_t sbval;
int i;
sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBTMSTATELOW);
if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
0);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
EC_ED);
for (i = 0; i < 200; i++) {
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_ENET_CTL);
if (!(val & EC_ED))
break;
delay(10);
}
if (i == 200)
printf("%s: timed out disabling ethernet mac\n",
sc->bce_dev.dv_xname);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,
0);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_DMA_RXSTATUS);
if (val & RS_ERROR) {
for (i = 0; i < 100; i++) {
val = bus_space_read_4(sc->bce_btag,
sc->bce_bhandle, BCE_DMA_RXSTATUS);
if (val & RS_DMA_IDLE)
break;
delay(10);
}
if (i == 100)
printf("%s: receive dma did not go idle after"
" error\n", sc->bce_dev.dv_xname);
}
bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
BCE_DMA_RXSTATUS, 0);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
EC_ES);
for (i = 0; i < 200; i++) {
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_ENET_CTL);
if (!(val & EC_ES))
break;
delay(10);
}
if (i == 200)
printf("%s: timed out resetting ethernet mac\n",
sc->bce_dev.dv_xname);
} else {
u_int32_t reg_win;
reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
BCE_REG_WIN);
pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
BCE_REG_WIN, BCE_SONICS_WIN);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBINTVEC) | SBIV_ENET0);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST);
pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
reg_win);
}
if (!(sbval & SBTML_RESET)) {
bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
for (i = 0; i < 200; i++) {
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBTMSTATELOW);
if (val & SBTML_REJ)
break;
delay(1);
}
if (i == 200)
printf("%s: while resetting core, reject did not set\n",
sc->bce_dev.dv_xname);
for (i = 0; i < 200; i++) {
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBTMSTATEHI);
if (!(val & 0x4))
break;
delay(1);
}
if (i == 200)
printf("%s: while resetting core, busy did not clear\n",
sc->bce_dev.dv_xname);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBTMSTATELOW,
SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBTMSTATELOW);
delay(10);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
delay(1);
}
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
SBTML_FGC | SBTML_CLK | SBTML_RESET);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
delay(1);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
if (val & 1)
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
0);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
if (val & SBIM_ERRORBITS)
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
val & ~SBIM_ERRORBITS);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
SBTML_FGC | SBTML_CLK);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
delay(1);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
SBTML_CLK);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
delay(1);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d);
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
if (!(val & BCE_DC_IP)) {
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
EC_EP);
} else if (val & BCE_DC_ER) {
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
val & ~BCE_DC_ER);
delay(100);
}
}
void
bce_iff(struct ifnet *ifp)
{
struct bce_softc *sc = ifp->if_softc;
struct arpcom *ac = &sc->bce_ac;
u_int32_t rxctl;
rxctl = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL);
rxctl &= ~(ERC_AM | ERC_DB | ERC_PE);
ifp->if_flags |= IFF_ALLMULTI;
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 0);
bce_add_mac(sc, ac->ac_enaddr, 0);
if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
ifp->if_flags |= IFF_ALLMULTI;
if (ifp->if_flags & IFF_PROMISC)
rxctl |= ERC_PE;
else
rxctl |= ERC_AM;
}
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, rxctl);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL) | 1);
}
int
bce_mii_read(struct device *self, int phy, int reg)
{
struct bce_softc *sc = (struct bce_softc *) self;
int i;
u_int32_t val;
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
BCE_MIINTR);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
(MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) |
(MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg));
for (i = 0; i < BCE_TIMEOUT; i++) {
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_MI_STS);
if (val & BCE_MIINTR)
break;
delay(10);
}
val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
if (i == BCE_TIMEOUT) {
printf("%s: PHY read timed out reading phy %d, reg %d, val = "
"0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
return (0);
}
return (val & BCE_MICOMM_DATA);
}
void
bce_mii_write(struct device *self, int phy, int reg, int val)
{
struct bce_softc *sc = (struct bce_softc *) self;
int i;
u_int32_t rval;
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
BCE_MIINTR);
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
(MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) |
(MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) |
BCE_MIPHY(phy) | BCE_MIREG(reg));
for (i = 0; i < BCE_TIMEOUT; i++) {
rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
BCE_MI_STS);
if (rval & BCE_MIINTR)
break;
delay(10);
}
rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
if (i == BCE_TIMEOUT) {
printf("%s: PHY timed out writing phy %d, reg %d, val "
"= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
}
}
void
bce_statchg(struct device *self)
{
struct bce_softc *sc = (struct bce_softc *) self;
u_int32_t reg;
reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
reg | EXC_FD);
else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
reg & ~EXC_FD);
bce_mii_write((struct device *) sc, 1, 26,
bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);
bce_mii_write((struct device *) sc, 1, 26,
bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));
}
int
bce_mediachange(struct ifnet *ifp)
{
struct bce_softc *sc = ifp->if_softc;
if (ifp->if_flags & IFF_UP)
mii_mediachg(&sc->bce_mii);
return (0);
}
void
bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct bce_softc *sc = ifp->if_softc;
mii_pollstat(&sc->bce_mii);
ifmr->ifm_active = sc->bce_mii.mii_media_active;
ifmr->ifm_status = sc->bce_mii.mii_media_status;
}
void
bce_tick(void *v)
{
struct bce_softc *sc = v;
int s;
s = splnet();
mii_tick(&sc->bce_mii);
splx(s);
timeout_add_sec(&sc->bce_timeout, 1);
}