#include <sys/types.h>
#include <sys/conf.h>
#include <sys/debug.h>
#include <sys/kmem.h>
#include <sys/modctl.h>
#include <sys/errno.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/byteorder.h>
#include <sys/ethernet.h>
#include <sys/pci.h>
#include "sfe_mii.h"
#include "sfe_util.h"
#include "sfereg.h"
char ident[] = "sis900/dp83815 driver v" "2.6.1t30os";
#ifdef DEBUG_LEVEL
static int sfe_debug = DEBUG_LEVEL;
#if DEBUG_LEVEL > 4
#define CONS "^"
#else
#define CONS "!"
#endif
#define DPRINTF(n, args) if (sfe_debug > (n)) cmn_err args
#else
#define CONS "!"
#define DPRINTF(n, args)
#endif
#define ONESEC (drv_usectohz(1*1000000))
#define ROUNDUP2(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define MAXTXFRAGS 1
#define MAXRXFRAGS 1
#ifndef TX_BUF_SIZE
#define TX_BUF_SIZE 64
#endif
#ifndef TX_RING_SIZE
#if MAXTXFRAGS == 1
#define TX_RING_SIZE TX_BUF_SIZE
#else
#define TX_RING_SIZE (TX_BUF_SIZE * 4)
#endif
#endif
#ifndef RX_BUF_SIZE
#define RX_BUF_SIZE 256
#endif
#ifndef RX_RING_SIZE
#define RX_RING_SIZE RX_BUF_SIZE
#endif
#define OUR_INTR_BITS \
(ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR | \
ISR_TXURN | ISR_TXDESC | ISR_TXERR | \
ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
#define USE_MULTICAST_HASHTBL
static int sfe_tx_copy_thresh = 256;
static int sfe_rx_copy_thresh = 256;
#define MII_CONFIG1 0x0010
#define MII_CONFIG2 0x0011
#define MII_MASK 0x0013
#define MII_RESV 0x0014
#define PHY_MASK 0xfffffff0
#define PHY_SIS900_INTERNAL 0x001d8000
#define PHY_ICS1893 0x0015f440
#define SFE_DESC_SIZE 16
struct chip_info {
uint16_t venid;
uint16_t devid;
char *chip_name;
int chip_type;
#define CHIPTYPE_DP83815 0
#define CHIPTYPE_SIS900 1
};
struct sfe_dev {
struct chip_info *chip;
uint32_t our_intr_bits;
uint32_t isr_pended;
uint32_t cr;
uint_t tx_drain_threshold;
uint_t tx_fill_threshold;
uint_t rx_drain_threshold;
uint_t rx_fill_threshold;
uint8_t revid;
boolean_t (*get_mac_addr)(struct gem_dev *);
uint8_t mac_addr[ETHERADDRL];
uint8_t bridge_revid;
};
struct chip_info sfe_chiptbl[] = {
{ 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, },
{ 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, },
{ 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, },
};
#define CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
static void sfe_mii_sync_dp83815(struct gem_dev *);
static void sfe_mii_sync_sis900(struct gem_dev *);
static uint16_t sfe_mii_read_dp83815(struct gem_dev *, uint_t);
static uint16_t sfe_mii_read_sis900(struct gem_dev *, uint_t);
static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t);
static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t);
static void sfe_set_eq_sis630(struct gem_dev *);
static int sfe_reset_chip_sis900(struct gem_dev *);
static int sfe_reset_chip_dp83815(struct gem_dev *);
static int sfe_init_chip(struct gem_dev *);
static int sfe_start_chip(struct gem_dev *);
static int sfe_stop_chip(struct gem_dev *);
static int sfe_set_media(struct gem_dev *);
static int sfe_set_rx_filter_dp83815(struct gem_dev *);
static int sfe_set_rx_filter_sis900(struct gem_dev *);
static int sfe_get_stats(struct gem_dev *);
static int sfe_attach_chip(struct gem_dev *);
static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags);
static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
ddi_dma_cookie_t *dmacookie, int frags);
static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
static uint_t sfe_interrupt(struct gem_dev *dp);
static struct ddi_device_acc_attr sfe_dev_attr = {
DDI_DEVICE_ATTR_V0,
DDI_STRUCTURE_LE_ACC,
DDI_STRICTORDER_ACC
};
static struct ddi_device_acc_attr sfe_buf_attr = {
DDI_DEVICE_ATTR_V0,
DDI_NEVERSWAP_ACC,
DDI_STRICTORDER_ACC
};
static ddi_dma_attr_t sfe_dma_attr_buf = {
DMA_ATTR_V0,
0,
0xffffffffull,
0x00000fffull,
0,
0x000003fc,
1,
0x00000fffull,
0xffffffffull,
0,
1,
0
};
static ddi_dma_attr_t sfe_dma_attr_desc = {
DMA_ATTR_V0,
16,
0xffffffffull,
0xffffffffull,
16,
0x000003fc,
1,
0xffffffffull,
0xffffffffull,
1,
1,
0
};
uint32_t sfe_use_pcimemspace = 0;
#define SFE_EEPROM_DELAY(dp) \
{ (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
#define EE_CMD_READ 6
#define EE_CMD_SHIFT 6
static uint16_t
sfe_read_eeprom(struct gem_dev *dp, uint_t offset)
{
int eedi;
int i;
uint16_t ret;
OUTL(dp, EROMAR, 0);
SFE_EEPROM_DELAY(dp);
OUTL(dp, EROMAR, EROMAR_EESK);
SFE_EEPROM_DELAY(dp);
offset |= EE_CMD_READ << EE_CMD_SHIFT;
for (i = 8; i >= 0; i--) {
eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT;
OUTL(dp, EROMAR, EROMAR_EECS | eedi);
SFE_EEPROM_DELAY(dp);
OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK);
SFE_EEPROM_DELAY(dp);
}
OUTL(dp, EROMAR, EROMAR_EECS);
ret = 0;
for (i = 0; i < 16; i++) {
OUTL(dp, EROMAR, EROMAR_EECS);
SFE_EEPROM_DELAY(dp);
OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK);
SFE_EEPROM_DELAY(dp);
ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1);
}
OUTL(dp, EROMAR, 0);
SFE_EEPROM_DELAY(dp);
return (ret);
}
#undef SFE_EEPROM_DELAY
static boolean_t
sfe_get_mac_addr_dp83815(struct gem_dev *dp)
{
uint8_t *mac;
uint_t val;
int i;
#define BITSET(p, ix, v) (p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
mac = dp->dev_addr.ether_addr_octet;
bzero(mac, ETHERADDRL);
val = sfe_read_eeprom(dp, 0x6);
BITSET(mac, 0, val & 1);
val = sfe_read_eeprom(dp, 0x7);
for (i = 0; i < 16; i++) {
BITSET(mac, 1 + i, val & (1 << (15 - i)));
}
val = sfe_read_eeprom(dp, 0x8);
for (i = 0; i < 16; i++) {
BITSET(mac, 17 + i, val & (1 << (15 - i)));
}
val = sfe_read_eeprom(dp, 0x9);
for (i = 0; i < 15; i++) {
BITSET(mac, 33 + i, val & (1 << (15 - i)));
}
return (B_TRUE);
#undef BITSET
}
static boolean_t
sfe_get_mac_addr_sis900(struct gem_dev *dp)
{
uint_t val;
int i;
uint8_t *mac;
mac = dp->dev_addr.ether_addr_octet;
for (i = 0; i < ETHERADDRL/2; i++) {
val = sfe_read_eeprom(dp, 0x8 + i);
*mac++ = (uint8_t)val;
*mac++ = (uint8_t)(val >> 8);
}
return (B_TRUE);
}
static dev_info_t *
sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id)
{
dev_info_t *child_id;
dev_info_t *ret;
int vid, did;
if (cur_node == NULL) {
return (NULL);
}
do {
vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
DDI_PROP_DONTPASS, "vendor-id", -1);
did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
DDI_PROP_DONTPASS, "device-id", -1);
if (vid == vendor_id && did == device_id) {
return (cur_node);
}
if ((child_id = ddi_get_child(cur_node)) != NULL) {
if ((ret = sfe_search_pci_dev_subr(child_id,
vendor_id, device_id)) != NULL) {
return (ret);
}
}
} while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL);
return (NULL);
}
static dev_info_t *
sfe_search_pci_dev(int vendor_id, int device_id)
{
return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id));
}
static boolean_t
sfe_get_mac_addr_sis962(struct gem_dev *dp)
{
boolean_t ret;
int i;
ret = B_FALSE;
OUTL(dp, MEAR, EROMAR_EEREQ);
for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) {
if (i > 200) {
cmn_err(CE_NOTE,
CONS "%s: failed to access eeprom", dp->name);
goto x;
}
drv_usecwait(10);
}
ret = sfe_get_mac_addr_sis900(dp);
x:
OUTL(dp, MEAR, EROMAR_EEDONE);
return (ret);
}
static int
sfe_reset_chip_sis900(struct gem_dev *dp)
{
int i;
uint32_t done;
uint32_t val;
struct sfe_dev *lp = dp->private;
DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
bzero(lp->mac_addr, sizeof (lp->mac_addr));
lp->cr = 0;
OUTL(dp, IMR, 0);
lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
OUTLINL(dp, RFCR, 0);
OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR);
drv_usecwait(10);
done = 0;
for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) {
if (i > 1000) {
cmn_err(CE_WARN, "%s: chip reset timeout", dp->name);
return (GEM_FAILURE);
}
done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP);
drv_usecwait(10);
}
if (lp->revid == SIS630ET_900_REV) {
lp->cr |= CR_ACCESSMODE;
OUTL(dp, CR, lp->cr | INL(dp, CR));
}
DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
dp->name, INL(dp, CFG), CFG_BITS_SIS900));
val = 0;
if (lp->revid >= SIS635A_900_REV ||
lp->revid == SIS900B_900_REV) {
val |= CFG_RND_CNT;
}
OUTL(dp, CFG, val);
DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
INL(dp, CFG), CFG_BITS_SIS900));
return (GEM_SUCCESS);
}
static int
sfe_reset_chip_dp83815(struct gem_dev *dp)
{
int i;
uint32_t val;
struct sfe_dev *lp = dp->private;
DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
bzero(lp->mac_addr, sizeof (lp->mac_addr));
lp->cr = 0;
OUTL(dp, IMR, 0);
lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
OUTL(dp, RFCR, 0);
OUTL(dp, CR, CR_RST);
drv_usecwait(10);
for (i = 0; INL(dp, CR) & CR_RST; i++) {
if (i > 100) {
cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name);
return (GEM_FAILURE);
}
drv_usecwait(10);
}
DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10));
OUTL(dp, CCSR, CCSR_PMESTS);
OUTL(dp, CCSR, 0);
DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
dp->name, INL(dp, CFG), CFG_BITS_DP83815));
val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
OUTL(dp, CFG, val | CFG_PAUSE_ADV);
DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
INL(dp, CFG), CFG_BITS_DP83815));
return (GEM_SUCCESS);
}
static int
sfe_init_chip(struct gem_dev *dp)
{
OUTL(dp, IMR, 0);
OUTL(dp, TXDP, dp->tx_ring_dma);
OUTL(dp, RXDP, dp->rx_ring_dma);
return (GEM_SUCCESS);
}
static uint_t
sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr)
{
return (gem_ether_crc_be(addr, ETHERADDRL));
}
#ifdef DEBUG_LEVEL
static void
sfe_rxfilter_dump(struct gem_dev *dp, int start, int end)
{
int i;
int j;
uint16_t ram[0x10];
cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name);
#define WORDS_PER_LINE 4
for (i = start; i < end; i += WORDS_PER_LINE*2) {
for (j = 0; j < WORDS_PER_LINE; j++) {
OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2);
ram[j] = INL(dp, RFDR);
}
cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
i, ram[0], ram[1], ram[2], ram[3]);
}
#undef WORDS_PER_LINE
}
#endif
static uint_t sfe_rf_perfect_base_dp83815[] = {
RFADDR_PMATCH0_DP83815,
RFADDR_PMATCH1_DP83815,
RFADDR_PMATCH2_DP83815,
RFADDR_PMATCH3_DP83815,
};
static int
sfe_set_rx_filter_dp83815(struct gem_dev *dp)
{
int i;
int j;
uint32_t mode;
uint8_t *mac = dp->cur_addr.ether_addr_octet;
uint16_t hash_tbl[32];
struct sfe_dev *lp = dp->private;
DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b",
dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS));
#if DEBUG_LEVEL > 0
for (i = 0; i < dp->mc_count; i++) {
cmn_err(CE_CONT,
"!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
dp->name, i,
dp->mc_list[i].addr.ether_addr_octet[0],
dp->mc_list[i].addr.ether_addr_octet[1],
dp->mc_list[i].addr.ether_addr_octet[2],
dp->mc_list[i].addr.ether_addr_octet[3],
dp->mc_list[i].addr.ether_addr_octet[4],
dp->mc_list[i].addr.ether_addr_octet[5]);
}
#endif
if ((dp->rxmode & RXMODE_ENABLE) == 0) {
OUTL(dp, RFCR, 0);
return (GEM_SUCCESS);
}
if (dp->rxmode & RXMODE_PROMISC) {
mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
} else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) {
mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815;
} else if (dp->mc_count > 4) {
mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815;
bzero(hash_tbl, sizeof (hash_tbl));
for (i = 0; i < dp->mc_count; i++) {
j = dp->mc_list[i].hash >> (32 - 9);
hash_tbl[j / 16] |= 1 << (j % 16);
}
} else {
mode = RFCR_AAB | RFCR_APM_DP83815 |
(((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT);
}
#if DEBUG_LEVEL > 1
cmn_err(CE_CONT,
"!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
" cache %02x:%02x:%02x:%02x:%02x:%02x",
dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
lp->mac_addr[0], lp->mac_addr[1],
lp->mac_addr[2], lp->mac_addr[3],
lp->mac_addr[4], lp->mac_addr[5]);
#endif
if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
for (i = 0; i < ETHERADDRL; i += 2) {
OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i);
OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
}
bcopy(mac, lp->mac_addr, ETHERADDRL);
}
#if DEBUG_LEVEL > 3
for (j = 0x200; j < 0x380; j += 2) {
OUTL(dp, RFCR, j);
OUTL(dp, RFDR, 0);
}
#endif
if (mode & RFCR_APAT_DP83815) {
for (j = 0; j < dp->mc_count; j++) {
mac = &dp->mc_list[j].addr.ether_addr_octet[0];
for (i = 0; i < ETHERADDRL; i += 2) {
OUTL(dp, RFCR,
sfe_rf_perfect_base_dp83815[j] + i*2);
OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
}
}
OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815);
OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815);
OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
}
if (mode & RFCR_MHEN_DP83815) {
for (i = 0; i < 32; i++) {
OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2);
OUTL(dp, RFDR, hash_tbl[i]);
}
}
#if DEBUG_LEVEL > 2
sfe_rxfilter_dump(dp, 0, 0x10);
sfe_rxfilter_dump(dp, 0x200, 0x380);
#endif
OUTL(dp, RFCR, RFCR_RFEN | mode);
return (GEM_SUCCESS);
}
static int
sfe_set_rx_filter_sis900(struct gem_dev *dp)
{
int i;
uint32_t mode;
uint16_t hash_tbl[16];
uint8_t *mac = dp->cur_addr.ether_addr_octet;
int hash_size;
int hash_shift;
struct sfe_dev *lp = dp->private;
DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
if ((dp->rxmode & RXMODE_ENABLE) == 0) {
OUTLINL(dp, RFCR, 0);
return (GEM_SUCCESS);
}
hash_shift = 25;
if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) {
hash_shift = 24;
}
hash_size = (1 << (32 - hash_shift)) / 16;
bzero(hash_tbl, sizeof (hash_tbl));
if (dp->rxmode & RXMODE_PROMISC) {
mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
} else if ((dp->rxmode & RXMODE_ALLMULTI) ||
dp->mc_count > hash_size*16/2) {
mode = RFCR_AAB | RFCR_AAM;
} else {
mode = RFCR_AAB;
}
for (i = 0; i < dp->mc_count; i++) {
uint_t h;
h = dp->mc_list[i].hash >> hash_shift;
hash_tbl[h / 16] |= 1 << (h % 16);
}
if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
for (i = 0; i < ETHERADDRL/2; i++) {
OUTLINL(dp, RFCR,
(RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900);
OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]);
}
bcopy(mac, lp->mac_addr, ETHERADDRL);
}
for (i = 0; i < hash_size; i++) {
OUTLINL(dp, RFCR,
(RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900);
OUTLINL(dp, RFDR, hash_tbl[i]);
}
OUTLINL(dp, RFCR, RFCR_RFEN | mode);
return (GEM_SUCCESS);
}
static int
sfe_start_chip(struct gem_dev *dp)
{
struct sfe_dev *lp = dp->private;
DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
lp->our_intr_bits = OUR_INTR_BITS;
if ((dp->misc_flag & GEM_NOINTR) == 0) {
OUTL(dp, IER, 1);
OUTL(dp, IMR, lp->our_intr_bits);
}
OUTL(dp, CR, lp->cr | CR_RXE);
return (GEM_SUCCESS);
}
static int
sfe_stop_chip(struct gem_dev *dp)
{
struct sfe_dev *lp = dp->private;
uint32_t done;
int i;
uint32_t val;
DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
OUTL(dp, IMR, 0);
OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR);
done = 0;
for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
if (i > 1000) {
cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout",
dp->name, __func__);
return (GEM_FAILURE);
}
val = INL(dp, ISR);
done |= val & (ISR_RXRCMP | ISR_TXRCMP);
lp->isr_pended |= val & lp->our_intr_bits;
drv_usecwait(10);
}
return (GEM_SUCCESS);
}
#ifndef __sparc
static int
sfe_stop_chip_quiesce(struct gem_dev *dp)
{
struct sfe_dev *lp = dp->private;
uint32_t done;
int i;
uint32_t val;
OUTL(dp, IMR, 0);
OUTL(dp, CR, CR_TXR | CR_RXR);
done = 0;
for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
if (i > 1000) {
return (DDI_FAILURE);
}
val = INL(dp, ISR);
done |= val & (ISR_RXRCMP | ISR_TXRCMP);
lp->isr_pended |= val & lp->our_intr_bits;
drv_usecwait(10);
}
return (DDI_SUCCESS);
}
#endif
static uint_t
sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
static uint_t
sfe_encode_mxdma(uint_t burstsize)
{
int i;
if (burstsize > 256) {
return (0);
}
for (i = 1; i < 8; i++) {
if (burstsize <= sfe_mxdma_value[i]) {
break;
}
}
return (i);
}
static int
sfe_set_media(struct gem_dev *dp)
{
uint32_t txcfg;
uint32_t rxcfg;
uint32_t pcr;
uint32_t val;
uint32_t txmxdma;
uint32_t rxmxdma;
struct sfe_dev *lp = dp->private;
#ifdef DEBUG_LEVEL
extern int gem_speed_value[];
#endif
DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps",
dp->name, __func__,
dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed]));
txcfg = TXCFG_ATP;
if (dp->full_duplex) {
txcfg |= (TXCFG_CSI | TXCFG_HBI);
}
rxcfg = RXCFG_AEP | RXCFG_ARP;
if (dp->full_duplex) {
rxcfg |= RXCFG_ATX;
}
if (lp->chip->chip_type == CHIPTYPE_SIS900) {
#ifdef DEBUG_SIS900_EDB
val = CFG_EDB_MASTER;
#else
val = INL(dp, CFG) & CFG_EDB_MASTER;
#endif
if (val) {
txmxdma = 64;
rxmxdma = 64;
} else {
txmxdma = 512;
rxmxdma = 512;
}
} else {
txmxdma = max(dp->txmaxdma, 256);
rxmxdma = max(dp->rxmaxdma, 256);
}
lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT);
lp->tx_fill_threshold =
TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT;
for (; ; ) {
val = sfe_encode_mxdma(txmxdma);
txmxdma = sfe_mxdma_value[val];
if (txmxdma <= lp->tx_fill_threshold) {
break;
}
txmxdma = txmxdma / 2;
}
txcfg |= val << TXCFG_MXDMA_SHIFT;
val = sfe_encode_mxdma(rxmxdma);
rxcfg |= val << RXCFG_MXDMA_SHIFT;
rxmxdma = sfe_mxdma_value[val];
val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT);
lp->rx_drain_threshold =
min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT);
DPRINTF(0, (CE_CONT,
"%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
" rx: drain:%d mxdma:%d",
dp->name, __func__,
lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold,
lp->tx_fill_threshold, txmxdma,
lp->rx_drain_threshold, rxmxdma));
ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT);
ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT);
ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT);
txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT)
| (lp->tx_drain_threshold/TXCFG_FIFO_UNIT);
OUTL(dp, TXCFG, txcfg);
rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT);
if (lp->chip->chip_type == CHIPTYPE_DP83815) {
rxcfg |= RXCFG_ALP_DP83815;
}
OUTL(dp, RXCFG, rxcfg);
DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b",
dp->name, __func__,
txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS));
if (lp->chip->chip_type == CHIPTYPE_DP83815) {
pcr = INL(dp, PCR);
switch (dp->flow_control) {
case FLOW_CONTROL_SYMMETRIC:
case FLOW_CONTROL_RX_PAUSE:
OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST);
break;
default:
OUTL(dp, PCR,
pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA));
break;
}
DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name,
INL(dp, PCR), PCR_BITS));
} else if (lp->chip->chip_type == CHIPTYPE_SIS900) {
switch (dp->flow_control) {
case FLOW_CONTROL_SYMMETRIC:
case FLOW_CONTROL_RX_PAUSE:
OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN);
break;
default:
OUTL(dp, FLOWCTL, 0);
break;
}
DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b",
dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS));
}
return (GEM_SUCCESS);
}
static int
sfe_get_stats(struct gem_dev *dp)
{
return (GEM_SUCCESS);
}
static int
sfe_tx_desc_write(struct gem_dev *dp, int slot,
ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags)
{
uint32_t mark;
struct sfe_desc *tdp;
ddi_dma_cookie_t *dcp;
uint32_t tmp0;
#if DEBUG_LEVEL > 2
int i;
cmn_err(CE_CONT,
CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
dp->name, ddi_get_lbolt(), __func__,
dp->tx_desc_tail, slot, frags, flags);
for (i = 0; i < frags; i++) {
cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x",
i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
}
#endif
#if DEBUG_LEVEL > 3
flags |= GEM_TXFLAG_INTR;
#endif
mark = (flags & GEM_TXFLAG_INTR)
? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN;
ASSERT(frags == 1);
dcp = &dmacookie[0];
if (flags & GEM_TXFLAG_HEAD) {
mark &= ~CMDSTS_OWN;
}
tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
tmp0 = (uint32_t)dcp->dmac_address;
mark |= (uint32_t)dcp->dmac_size;
tdp->d_bufptr = LE_32(tmp0);
tdp->d_cmdsts = LE_32(mark);
return (frags);
}
static void
sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot)
{
uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
struct sfe_desc *tdp;
struct sfe_dev *lp = dp->private;
if (nslot > 1) {
gem_tx_desc_dma_sync(dp,
SLOT(start_slot + 1, tx_ring_size),
nslot - 1, DDI_DMA_SYNC_FORDEV);
}
tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot];
tdp->d_cmdsts |= LE_32(CMDSTS_OWN);
gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV);
if (dp->mac_active) {
OUTL(dp, CR, lp->cr | CR_TXE);
}
}
static void
sfe_rx_desc_write(struct gem_dev *dp, int slot,
ddi_dma_cookie_t *dmacookie, int frags)
{
struct sfe_desc *rdp;
uint32_t tmp0;
uint32_t tmp1;
#if DEBUG_LEVEL > 2
int i;
ASSERT(frags == 1);
cmn_err(CE_CONT, CONS
"%s: %s seqnum: %d, slot %d, frags: %d",
dp->name, __func__, dp->rx_active_tail, slot, frags);
for (i = 0; i < frags; i++) {
cmn_err(CE_CONT, CONS " frag: %d addr: 0x%llx, len: 0x%lx",
i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
}
#endif
rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
tmp0 = (uint32_t)dmacookie->dmac_address;
tmp1 = CMDSTS_INTR | (uint32_t)dmacookie->dmac_size;
rdp->d_bufptr = LE_32(tmp0);
rdp->d_cmdsts = LE_32(tmp1);
}
static uint_t
sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
{
uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
struct sfe_desc *tdp;
uint32_t status;
int cols;
struct sfe_dev *lp = dp->private;
#ifdef DEBUG_LEVEL
int i;
clock_t delay;
#endif
tdp = (void *)
&dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)];
status = tdp->d_cmdsts;
status = LE_32(status);
DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
dp->name, ddi_get_lbolt(), __func__,
slot, status, TXSTAT_BITS));
if (status & CMDSTS_OWN) {
if (lp->chip->chip_type == CHIPTYPE_DP83815 &&
dp->mac_active) {
OUTL(dp, CR, lp->cr | CR_TXE);
}
return (0);
}
if (status & CMDSTS_MORE) {
cmn_err(CE_NOTE, CONS
"%s: tx status bits incorrect: slot:%d, status:0x%x",
dp->name, slot, status);
}
#if DEBUG_LEVEL > 3
delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10;
if (delay >= 50) {
DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d",
dp->name, delay, slot));
}
#endif
#if DEBUG_LEVEL > 3
for (i = 0; i < nfrag-1; i++) {
uint32_t s;
int n;
n = SLOT(slot + i, tx_ring_size);
s = LE_32(
((struct sfe_desc *)((void *)
&dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts);
ASSERT(s & CMDSTS_MORE);
ASSERT((s & CMDSTS_OWN) == 0);
}
#endif
if ((status & CMDSTS_OK) == 0) {
DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b",
dp->name, status, TXSTAT_BITS));
dp->stats.errxmt++;
if (status & CMDSTS_TFU) {
dp->stats.underflow++;
} else if (status & CMDSTS_CRS) {
dp->stats.nocarrier++;
} else if (status & CMDSTS_OWC) {
dp->stats.xmtlatecoll++;
} else if ((!dp->full_duplex) && (status & CMDSTS_EC)) {
dp->stats.excoll++;
dp->stats.collisions += 16;
} else {
dp->stats.xmit_internal_err++;
}
} else if (!dp->full_duplex) {
cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK;
if (cols > 0) {
if (cols == 1) {
dp->stats.first_coll++;
} else {
dp->stats.multi_coll++;
}
dp->stats.collisions += cols;
} else if (status & CMDSTS_TD) {
dp->stats.defer++;
}
}
return (GEM_TX_DONE);
}
static uint64_t
sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
{
struct sfe_desc *rdp;
uint_t len;
uint_t flag;
uint32_t status;
flag = GEM_RX_DONE;
rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
status = rdp->d_cmdsts;
status = LE_32(status);
DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
dp->name, ddi_get_lbolt(), __func__,
slot, status, RXSTAT_BITS));
if ((status & CMDSTS_OWN) == 0) {
return (0);
}
#define RX_ERR_BITS \
(CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
if (status & RX_ERR_BITS) {
DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet "
"received, buffer status: %b",
dp->name, status, RXSTAT_BITS));
dp->stats.errrcv++;
if (status & CMDSTS_RXO) {
dp->stats.overflow++;
} else if (status & (CMDSTS_LONG | CMDSTS_MORE)) {
dp->stats.frame_too_long++;
} else if (status & CMDSTS_RUNT) {
dp->stats.runt++;
} else if (status & (CMDSTS_ISE | CMDSTS_FAE)) {
dp->stats.frame++;
} else if (status & CMDSTS_CRCE) {
dp->stats.crc++;
} else {
dp->stats.rcv_internal_err++;
}
return (flag | GEM_RX_ERR);
}
if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) {
len -= ETHERFCSL;
}
#if DEBUG_LEVEL > 10
{
int i;
uint8_t *bp = dp->rx_buf_head->rxb_buf;
cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len);
for (i = 0; i < 60; i += 10) {
cmn_err(CE_CONT, CONS
"%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
bp[0], bp[1], bp[2], bp[3], bp[4],
bp[5], bp[6], bp[7], bp[8], bp[9]);
}
bp += 10;
}
#endif
return (flag | (len & GEM_RX_LEN));
}
static void
sfe_tx_desc_init(struct gem_dev *dp, int slot)
{
uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
struct sfe_desc *tdp;
uint32_t here;
tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
tdp->d_cmdsts = 0;
here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot;
tdp = (void *)
&dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)];
tdp->d_link = LE_32(here);
}
static void
sfe_rx_desc_init(struct gem_dev *dp, int slot)
{
uint_t rx_ring_size = dp->gc.gc_rx_ring_size;
struct sfe_desc *rdp;
uint32_t here;
rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
rdp->d_cmdsts = LE_32(CMDSTS_OWN);
here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot;
rdp = (void *)
&dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)];
rdp->d_link = LE_32(here);
}
static void
sfe_tx_desc_clean(struct gem_dev *dp, int slot)
{
struct sfe_desc *tdp;
tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
tdp->d_cmdsts = 0;
}
static void
sfe_rx_desc_clean(struct gem_dev *dp, int slot)
{
struct sfe_desc *rdp;
rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
rdp->d_cmdsts = LE_32(CMDSTS_OWN);
}
static uint_t
sfe_interrupt(struct gem_dev *dp)
{
uint_t rx_ring_size = dp->gc.gc_rx_ring_size;
uint32_t isr;
uint32_t isr_bogus;
uint_t flags = 0;
struct sfe_dev *lp = dp->private;
isr = INL(dp, ISR);
isr_bogus = lp->isr_pended;
lp->isr_pended = 0;
if (((isr | isr_bogus) & lp->our_intr_bits) == 0) {
return (DDI_INTR_UNCLAIMED);
}
DPRINTF(3, (CE_CONT,
CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
dp->name, ddi_get_lbolt(), __func__,
isr, INTR_BITS, dp->rx_active_head));
if (!dp->mac_active) {
lp->our_intr_bits = 0;
return (DDI_INTR_CLAIMED);
}
isr &= lp->our_intr_bits;
if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR |
ISR_RXDESC | ISR_RXOK)) {
(void) gem_receive(dp);
if (isr & (ISR_RXSOVR | ISR_RXORN)) {
DPRINTF(0, (CE_CONT,
CONS "%s: rx fifo overrun: isr %b",
dp->name, isr, INTR_BITS));
dp->stats.overflow++;
}
if (isr & ISR_RXIDLE) {
DPRINTF(0, (CE_CONT,
CONS "%s: rx buffer ran out: isr %b",
dp->name, isr, INTR_BITS));
dp->stats.norcvbuf++;
OUTL(dp, RXDP, dp->rx_ring_dma +
SFE_DESC_SIZE *
SLOT(dp->rx_active_head, rx_ring_size));
OUTL(dp, CR, lp->cr | CR_RXE);
}
}
if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC |
ISR_TXIDLE | ISR_TXOK)) {
if (gem_tx_done(dp)) {
flags |= INTR_RESTART_TX;
}
}
if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) {
cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.",
dp->name, isr, INTR_BITS);
(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
flags |= INTR_RESTART_TX;
}
DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b",
dp->name, __func__, isr, INTR_BITS));
return (DDI_INTR_CLAIMED | flags);
}
static void
sfe_mii_sync_dp83815(struct gem_dev *dp)
{
}
static uint16_t
sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset)
{
DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x",
dp->name, __func__, offset));
return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4));
}
static void
sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val)
{
DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x",
dp->name, __func__, offset, val));
OUTL(dp, MII_REGS_BASE + offset*4, val);
}
static int
sfe_mii_config_dp83815(struct gem_dev *dp)
{
uint32_t srr;
srr = INL(dp, SRR) & SRR_REV;
DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x",
dp->name, srr,
INW(dp, 0x00cc),
INW(dp, 0x00e4),
INW(dp, 0x00fc),
INW(dp, 0x00f4),
INW(dp, 0x00f8)));
if (srr == SRR_REV_DP83815CVNG) {
OUTW(dp, 0x00cc, 0x0001);
OUTW(dp, 0x00e4, 0x189c);
OUTW(dp, 0x00fc, 0x0000);
OUTW(dp, 0x00f4, 0x5040);
OUTW(dp, 0x00f8, 0x008c);
OUTW(dp, 0x00cc, 0x0000);
DPRINTF(0, (CE_CONT,
CONS "%s: PHY patched %04x %04x %04x %04x %04x",
dp->name,
INW(dp, 0x00cc),
INW(dp, 0x00e4),
INW(dp, 0x00fc),
INW(dp, 0x00f4),
INW(dp, 0x00f8)));
} else if (((srr ^ SRR_REV_DP83815DVNG) & 0xff00) == 0 ||
((srr ^ SRR_REV_DP83816AVNG) & 0xff00) == 0) {
OUTW(dp, 0x00cc, 0x0001);
OUTW(dp, 0x00e4, 0x189c);
OUTW(dp, 0x00cc, 0x0000);
DPRINTF(0, (CE_CONT,
CONS "%s: PHY patched %04x %04x",
dp->name,
INW(dp, 0x00cc),
INW(dp, 0x00e4)));
}
return (gem_mii_config_default(dp));
}
static int
sfe_mii_probe_dp83815(struct gem_dev *dp)
{
uint32_t val;
DPRINTF(0, (CE_CONT, CONS "%s: %s: trying external phy",
dp->name, __func__));
dp->mii_phy_addr = 0;
dp->gc.gc_mii_sync = &sfe_mii_sync_sis900;
dp->gc.gc_mii_read = &sfe_mii_read_sis900;
dp->gc.gc_mii_write = &sfe_mii_write_sis900;
val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
if (gem_mii_probe_default(dp) == GEM_SUCCESS) {
return (GEM_SUCCESS);
}
DPRINTF(0, (CE_CONT, CONS "%s: %s: switching to internal phy",
dp->name, __func__));
dp->mii_phy_addr = -1;
dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815;
dp->gc.gc_mii_read = &sfe_mii_read_dp83815;
dp->gc.gc_mii_write = &sfe_mii_write_dp83815;
val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST);
drv_usecwait(100);
OUTL(dp, CFG, val | CFG_PAUSE_ADV);
delay(drv_usectohz(10000));
return (gem_mii_probe_default(dp));
}
static int
sfe_mii_init_dp83815(struct gem_dev *dp)
{
uint32_t val;
val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
if (dp->mii_phy_addr == -1) {
OUTL(dp, CFG, val | CFG_PAUSE_ADV);
} else {
OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
}
return (GEM_SUCCESS);
}
#define MDIO_DELAY(dp) {(void) INL(dp, MEAR); (void) INL(dp, MEAR); }
static void
sfe_mii_sync_sis900(struct gem_dev *dp)
{
int i;
for (i = 0; i < 32; i++) {
OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO);
MDIO_DELAY(dp);
OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC);
MDIO_DELAY(dp);
}
}
static int
sfe_mii_config_sis900(struct gem_dev *dp)
{
struct sfe_dev *lp = dp->private;
if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) {
gem_mii_write(dp, 0x0018, 0xD200);
}
if (lp->revid == SIS630E_900_REV) {
gem_mii_write(dp, MII_AN_ADVERT, 0x05e1);
gem_mii_write(dp, MII_CONFIG1, 0x0022);
gem_mii_write(dp, MII_CONFIG2, 0xff00);
gem_mii_write(dp, MII_MASK, 0xffc0);
}
sfe_set_eq_sis630(dp);
return (gem_mii_config_default(dp));
}
static uint16_t
sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg)
{
uint32_t cmd;
uint16_t ret;
int i;
uint32_t data;
cmd = MII_READ_CMD(dp->mii_phy_addr, reg);
for (i = 31; i >= 18; i--) {
data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
OUTL(dp, MEAR, data | MEAR_MDDIR);
MDIO_DELAY(dp);
OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
MDIO_DELAY(dp);
}
OUTL(dp, MEAR, 0);
MDIO_DELAY(dp);
OUTL(dp, MEAR, MEAR_MDC);
MDIO_DELAY(dp);
OUTL(dp, MEAR, 0);
#if DEBUG_LEBEL > 0
(void) INL(dp, MEAR);
if (INL(dp, MEAR) & MEAR_MDIO) {
cmn_err(CE_WARN, "%s: PHY@%d not responded",
dp->name, dp->mii_phy_addr);
}
#else
MDIO_DELAY(dp);
#endif
OUTL(dp, MEAR, MEAR_MDC);
MDIO_DELAY(dp);
ret = 0;
for (i = 16; i > 0; i--) {
OUTL(dp, MEAR, 0);
(void) INL(dp, MEAR);
ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1);
OUTL(dp, MEAR, MEAR_MDC);
MDIO_DELAY(dp);
}
for (i = 0; i < 2; i++) {
OUTL(dp, MEAR, 0);
MDIO_DELAY(dp);
OUTL(dp, MEAR, MEAR_MDC);
MDIO_DELAY(dp);
}
return (ret);
}
static void
sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val)
{
uint32_t cmd;
int i;
uint32_t data;
cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val);
for (i = 31; i >= 0; i--) {
data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
OUTL(dp, MEAR, data | MEAR_MDDIR);
MDIO_DELAY(dp);
OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
MDIO_DELAY(dp);
}
for (i = 0; i < 2; i++) {
OUTL(dp, MEAR, 0);
MDIO_DELAY(dp);
OUTL(dp, MEAR, MEAR_MDC);
MDIO_DELAY(dp);
}
}
#undef MDIO_DELAY
static void
sfe_set_eq_sis630(struct gem_dev *dp)
{
uint16_t reg14h;
uint16_t eq_value;
uint16_t max_value;
uint16_t min_value;
int i;
uint8_t rev;
struct sfe_dev *lp = dp->private;
rev = lp->revid;
if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) {
return;
}
if (dp->mii_state == MII_STATE_LINKUP) {
reg14h = gem_mii_read(dp, MII_RESV);
gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
max_value = min_value = eq_value;
for (i = 1; i < 10; i++) {
eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
max_value = max(eq_value, max_value);
min_value = min(eq_value, min_value);
}
if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
rev == SIS630ET_900_REV) {
if (max_value < 5) {
eq_value = max_value;
} else if (5 <= max_value && max_value < 15) {
eq_value =
max(max_value + 1,
min_value + 2);
} else if (15 <= max_value) {
eq_value =
max(max_value + 5,
min_value + 6);
}
}
else
if (rev == SIS630A_900_REV &&
(lp->bridge_revid == SIS630B0 ||
lp->bridge_revid == SIS630B1)) {
if (max_value == 0) {
eq_value = 3;
} else {
eq_value = (max_value + min_value + 1)/2;
}
}
reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8;
reg14h |= 0x6000 | (eq_value << 3);
gem_mii_write(dp, MII_RESV, reg14h);
} else {
reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000;
if (rev == SIS630A_900_REV &&
(lp->bridge_revid == SIS630B0 ||
lp->bridge_revid == SIS630B1)) {
reg14h |= 0x0200;
}
gem_mii_write(dp, MII_RESV, reg14h);
}
}
static void
sfe_chipinfo_init_sis900(struct gem_dev *dp)
{
int rev;
struct sfe_dev *lp = (struct sfe_dev *)dp->private;
rev = lp->revid;
if (rev == SIS962_900_REV ) {
lp->get_mac_addr = &sfe_get_mac_addr_sis962;
} else {
lp->get_mac_addr = &sfe_get_mac_addr_sis900;
}
lp->bridge_revid = 0;
if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
rev == SIS630A_900_REV || rev == SIS630ET_900_REV) {
dev_info_t *bridge;
ddi_acc_handle_t bridge_handle;
if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) {
cmn_err(CE_WARN,
"%s: cannot find host bridge (pci1039,630)",
dp->name);
return;
}
if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) {
cmn_err(CE_WARN, "%s: pci_config_setup failed",
dp->name);
return;
}
lp->bridge_revid =
pci_config_get8(bridge_handle, PCI_CONF_REVID);
pci_config_teardown(&bridge_handle);
}
}
static int
sfe_attach_chip(struct gem_dev *dp)
{
struct sfe_dev *lp = (struct sfe_dev *)dp->private;
DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__));
if (lp->chip->chip_type == CHIPTYPE_SIS900) {
sfe_chipinfo_init_sis900(dp);
} else {
lp->get_mac_addr = &sfe_get_mac_addr_dp83815;
}
if (!(lp->get_mac_addr)(dp)) {
cmn_err(CE_WARN,
"!%s: %s: failed to get factory mac address"
" please specify a mac address in sfe.conf",
dp->name, __func__);
return (GEM_FAILURE);
}
if (lp->chip->chip_type == CHIPTYPE_DP83815) {
dp->mii_phy_addr = -1;
dp->misc_flag |= GEM_VLAN_SOFT;
dp->txthr += 4;
}
dp->txthr = min(dp->txthr, TXFIFOSIZE - 2);
return (GEM_SUCCESS);
}
static int
sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
int unit;
const char *drv_name;
int i;
ddi_acc_handle_t conf_handle;
uint16_t vid;
uint16_t did;
uint8_t rev;
#ifdef DEBUG_LEVEL
uint32_t iline;
uint8_t latim;
#endif
struct chip_info *p;
struct gem_dev *dp;
struct sfe_dev *lp;
caddr_t base;
ddi_acc_handle_t regs_ha;
struct gem_conf *gcp;
unit = ddi_get_instance(dip);
drv_name = ddi_driver_name(dip);
DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit));
if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) {
cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed",
drv_name, unit);
goto err;
}
vid = pci_config_get16(conf_handle, PCI_CONF_VENID);
did = pci_config_get16(conf_handle, PCI_CONF_DEVID);
rev = pci_config_get16(conf_handle, PCI_CONF_REVID);
#ifdef DEBUG_LEVEL
iline = pci_config_get32(conf_handle, PCI_CONF_ILINE);
latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER);
#endif
#ifdef DEBUG_BUILT_IN_SIS900
rev = SIS630E_900_REV;
#endif
for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) {
if (p->venid == vid && p->devid == did) {
goto chip_found;
}
}
cmn_err(CE_WARN,
"%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
drv_name, unit, vid, did);
pci_config_teardown(&conf_handle);
goto err;
chip_found:
pci_config_put16(conf_handle, PCI_CONF_COMM,
PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME |
pci_config_get16(conf_handle, PCI_CONF_COMM));
(void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0);
pci_config_teardown(&conf_handle);
switch (cmd) {
case DDI_RESUME:
return (gem_resume(dip));
case DDI_ATTACH:
DPRINTF(0, (CE_CONT,
CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x",
drv_name, unit, iline, latim));
if (gem_pci_regs_map_setup(dip,
(sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815)
? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK,
&sfe_dev_attr, &base, ®s_ha) != DDI_SUCCESS) {
cmn_err(CE_WARN,
"%s%d: ddi_regs_map_setup failed",
drv_name, unit);
goto err;
}
gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP);
(void) sprintf(gcp->gc_name, "%s%d", drv_name, unit);
gcp->gc_tx_buf_align = sizeof (uint8_t) - 1;
gcp->gc_tx_max_frags = MAXTXFRAGS;
gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags;
gcp->gc_tx_desc_unit_shift = 4;
gcp->gc_tx_buf_size = TX_BUF_SIZE;
gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size;
gcp->gc_tx_ring_size = TX_RING_SIZE;
gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size;
gcp->gc_tx_auto_pad = B_TRUE;
gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh;
gcp->gc_tx_desc_write_oo = B_TRUE;
gcp->gc_rx_buf_align = sizeof (uint8_t) - 1;
gcp->gc_rx_max_frags = MAXRXFRAGS;
gcp->gc_rx_desc_unit_shift = 4;
gcp->gc_rx_ring_size = RX_RING_SIZE;
gcp->gc_rx_buf_max = RX_BUF_SIZE;
gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh;
gcp->gc_dev_attr = sfe_dev_attr;
gcp->gc_buf_attr = sfe_buf_attr;
gcp->gc_desc_attr = sfe_buf_attr;
gcp->gc_dma_attr_desc = sfe_dma_attr_desc;
gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf;
gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1;
gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags;
gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf;
gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1;
gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags;
gcp->gc_tx_timeout = 3*ONESEC;
gcp->gc_tx_timeout_interval = ONESEC;
if (p->chip_type == CHIPTYPE_DP83815) {
gcp->gc_tx_timeout_interval = ONESEC/20;
}
gcp->gc_mii_link_watch_interval = ONESEC;
gcp->gc_mii_an_watch_interval = ONESEC/5;
gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT;
gcp->gc_mii_an_timeout = MII_AN_TIMEOUT;
gcp->gc_mii_an_wait = 0;
gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
gcp->gc_mii_an_delay = 0;
gcp->gc_mii_linkdown_action = MII_ACTION_RSA;
gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET;
gcp->gc_mii_dont_reset = B_FALSE;
gcp->gc_attach_chip = &sfe_attach_chip;
if (p->chip_type == CHIPTYPE_DP83815) {
gcp->gc_reset_chip = &sfe_reset_chip_dp83815;
} else {
gcp->gc_reset_chip = &sfe_reset_chip_sis900;
}
gcp->gc_init_chip = &sfe_init_chip;
gcp->gc_start_chip = &sfe_start_chip;
gcp->gc_stop_chip = &sfe_stop_chip;
#ifdef USE_MULTICAST_HASHTBL
gcp->gc_multicast_hash = &sfe_mcast_hash;
#endif
if (p->chip_type == CHIPTYPE_DP83815) {
gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815;
} else {
gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900;
}
gcp->gc_set_media = &sfe_set_media;
gcp->gc_get_stats = &sfe_get_stats;
gcp->gc_interrupt = &sfe_interrupt;
gcp->gc_tx_desc_write = &sfe_tx_desc_write;
gcp->gc_tx_start = &sfe_tx_start;
gcp->gc_rx_desc_write = &sfe_rx_desc_write;
gcp->gc_rx_start = NULL;
gcp->gc_tx_desc_stat = &sfe_tx_desc_stat;
gcp->gc_rx_desc_stat = &sfe_rx_desc_stat;
gcp->gc_tx_desc_init = &sfe_tx_desc_init;
gcp->gc_rx_desc_init = &sfe_rx_desc_init;
gcp->gc_tx_desc_clean = &sfe_tx_desc_clean;
gcp->gc_rx_desc_clean = &sfe_rx_desc_clean;
if (p->chip_type == CHIPTYPE_DP83815) {
gcp->gc_mii_probe = &sfe_mii_probe_dp83815;
gcp->gc_mii_init = &sfe_mii_init_dp83815;
gcp->gc_mii_config = &sfe_mii_config_dp83815;
gcp->gc_mii_sync = &sfe_mii_sync_dp83815;
gcp->gc_mii_read = &sfe_mii_read_dp83815;
gcp->gc_mii_write = &sfe_mii_write_dp83815;
gcp->gc_mii_tune_phy = NULL;
gcp->gc_flow_control = FLOW_CONTROL_NONE;
} else {
gcp->gc_mii_probe = &gem_mii_probe_default;
gcp->gc_mii_init = NULL;
gcp->gc_mii_config = &sfe_mii_config_sis900;
gcp->gc_mii_sync = &sfe_mii_sync_sis900;
gcp->gc_mii_read = &sfe_mii_read_sis900;
gcp->gc_mii_write = &sfe_mii_write_sis900;
gcp->gc_mii_tune_phy = &sfe_set_eq_sis630;
gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE;
}
lp = kmem_zalloc(sizeof (*lp), KM_SLEEP);
lp->chip = p;
lp->revid = rev;
lp->our_intr_bits = 0;
lp->isr_pended = 0;
cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x",
drv_name, unit, p->chip_name, rev);
dp = gem_do_attach(dip, 0, gcp, base, ®s_ha,
lp, sizeof (*lp));
kmem_free(gcp, sizeof (*gcp));
if (dp == NULL) {
goto err_freelp;
}
return (DDI_SUCCESS);
err_freelp:
kmem_free(lp, sizeof (struct sfe_dev));
err:
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
static int
sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
switch (cmd) {
case DDI_SUSPEND:
return (gem_suspend(dip));
case DDI_DETACH:
return (gem_do_detach(dip));
}
return (DDI_FAILURE);
}
#ifdef __sparc
#define sfe_quiesce ddi_quiesce_not_supported
#else
static int
sfe_quiesce(dev_info_t *dip)
{
struct gem_dev *dp;
int ret = 0;
dp = GEM_GET_DEV(dip);
if (dp == NULL)
return (DDI_FAILURE);
ret = sfe_stop_chip_quiesce(dp);
return (ret);
}
#endif
DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach,
nodev, NULL, D_MP, NULL, sfe_quiesce);
static struct modldrv modldrv = {
&mod_driverops,
ident,
&sfe_ops,
};
static struct modlinkage modlinkage = {
MODREV_1, &modldrv, NULL
};
int
_init(void)
{
int status;
DPRINTF(2, (CE_CONT, CONS "sfe: _init: called"));
gem_mod_init(&sfe_ops, "sfe");
status = mod_install(&modlinkage);
if (status != DDI_SUCCESS) {
gem_mod_fini(&sfe_ops);
}
return (status);
}
int
_fini(void)
{
int status;
DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called"));
status = mod_remove(&modlinkage);
if (status == DDI_SUCCESS) {
gem_mod_fini(&sfe_ops);
}
return (status);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}