#include <sys/cdefs.h>
#include "igc_api.h"
static s32 igc_wait_autoneg(struct igc_hw *hw);
void igc_init_phy_ops_generic(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
DEBUGFUNC("igc_init_phy_ops_generic");
phy->ops.init_params = igc_null_ops_generic;
phy->ops.acquire = igc_null_ops_generic;
phy->ops.check_reset_block = igc_null_ops_generic;
phy->ops.force_speed_duplex = igc_null_ops_generic;
phy->ops.get_info = igc_null_ops_generic;
phy->ops.set_page = igc_null_set_page;
phy->ops.read_reg = igc_null_read_reg;
phy->ops.read_reg_locked = igc_null_read_reg;
phy->ops.read_reg_page = igc_null_read_reg;
phy->ops.release = igc_null_phy_generic;
phy->ops.reset = igc_null_ops_generic;
phy->ops.set_d0_lplu_state = igc_null_lplu_state;
phy->ops.set_d3_lplu_state = igc_null_lplu_state;
phy->ops.write_reg = igc_null_write_reg;
phy->ops.write_reg_locked = igc_null_write_reg;
phy->ops.write_reg_page = igc_null_write_reg;
phy->ops.power_up = igc_null_phy_generic;
phy->ops.power_down = igc_null_phy_generic;
}
s32 igc_null_set_page(struct igc_hw IGC_UNUSEDARG *hw,
u16 IGC_UNUSEDARG data)
{
DEBUGFUNC("igc_null_set_page");
return IGC_SUCCESS;
}
s32 igc_null_read_reg(struct igc_hw IGC_UNUSEDARG *hw,
u32 IGC_UNUSEDARG offset, u16 IGC_UNUSEDARG *data)
{
DEBUGFUNC("igc_null_read_reg");
return IGC_SUCCESS;
}
void igc_null_phy_generic(struct igc_hw IGC_UNUSEDARG *hw)
{
DEBUGFUNC("igc_null_phy_generic");
return;
}
s32 igc_null_lplu_state(struct igc_hw IGC_UNUSEDARG *hw,
bool IGC_UNUSEDARG active)
{
DEBUGFUNC("igc_null_lplu_state");
return IGC_SUCCESS;
}
s32 igc_null_write_reg(struct igc_hw IGC_UNUSEDARG *hw,
u32 IGC_UNUSEDARG offset, u16 IGC_UNUSEDARG data)
{
DEBUGFUNC("igc_null_write_reg");
return IGC_SUCCESS;
}
s32 igc_check_reset_block_generic(struct igc_hw *hw)
{
u32 manc;
DEBUGFUNC("igc_check_reset_block");
manc = IGC_READ_REG(hw, IGC_MANC);
return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
IGC_BLK_PHY_RESET : IGC_SUCCESS;
}
s32 igc_get_phy_id(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = IGC_SUCCESS;
u16 phy_id;
DEBUGFUNC("igc_get_phy_id");
if (!phy->ops.read_reg)
return IGC_SUCCESS;
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
usec_delay(200);
ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
if (ret_val)
return ret_val;
phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
return IGC_SUCCESS;
}
s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
{
struct igc_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
DEBUGFUNC("igc_read_phy_reg_mdic");
if (offset > MAX_PHY_REG_ADDRESS) {
DEBUGOUT1("PHY Address %d is out of range\n", offset);
return -IGC_ERR_PARAM;
}
mdic = ((offset << IGC_MDIC_REG_SHIFT) |
(phy->addr << IGC_MDIC_PHY_SHIFT) |
(IGC_MDIC_OP_READ));
IGC_WRITE_REG(hw, IGC_MDIC, mdic);
for (i = 0; i < (IGC_GEN_POLL_TIMEOUT * 3); i++) {
usec_delay_irq(50);
mdic = IGC_READ_REG(hw, IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
}
if (!(mdic & IGC_MDIC_READY)) {
DEBUGOUT("MDI Read did not complete\n");
return -IGC_ERR_PHY;
}
if (mdic & IGC_MDIC_ERROR) {
DEBUGOUT("MDI Error\n");
return -IGC_ERR_PHY;
}
if (((mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT) != offset) {
DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
offset,
(mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT);
return -IGC_ERR_PHY;
}
*data = (u16) mdic;
return IGC_SUCCESS;
}
s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
{
struct igc_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
DEBUGFUNC("igc_write_phy_reg_mdic");
if (offset > MAX_PHY_REG_ADDRESS) {
DEBUGOUT1("PHY Address %d is out of range\n", offset);
return -IGC_ERR_PARAM;
}
mdic = (((u32)data) |
(offset << IGC_MDIC_REG_SHIFT) |
(phy->addr << IGC_MDIC_PHY_SHIFT) |
(IGC_MDIC_OP_WRITE));
IGC_WRITE_REG(hw, IGC_MDIC, mdic);
for (i = 0; i < (IGC_GEN_POLL_TIMEOUT * 3); i++) {
usec_delay_irq(50);
mdic = IGC_READ_REG(hw, IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
}
if (!(mdic & IGC_MDIC_READY)) {
DEBUGOUT("MDI Write did not complete\n");
return -IGC_ERR_PHY;
}
if (mdic & IGC_MDIC_ERROR) {
DEBUGOUT("MDI Error\n");
return -IGC_ERR_PHY;
}
if (((mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT) != offset) {
DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
offset,
(mdic & IGC_MDIC_REG_MASK) >> IGC_MDIC_REG_SHIFT);
return -IGC_ERR_PHY;
}
return IGC_SUCCESS;
}
static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val;
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg = 0;
u16 aneg_multigbt_an_ctrl = 0;
DEBUGFUNC("igc_phy_setup_autoneg");
phy->autoneg_advertised &= phy->autoneg_mask;
ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
&mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
if (phy->autoneg_mask & ADVERTISE_2500_FULL) {
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
&aneg_multigbt_an_ctrl);
if (ret_val)
return ret_val;
}
mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
NWAY_AR_100TX_HD_CAPS |
NWAY_AR_10T_FD_CAPS |
NWAY_AR_10T_HD_CAPS);
mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
DEBUGOUT("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
DEBUGOUT("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
DEBUGOUT("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
DEBUGOUT("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
DEBUGOUT("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
DEBUGOUT("Advertise 2500mb Half duplex request denied!\n");
if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
DEBUGOUT("Advertise 2500mb Full duplex\n");
aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
} else {
aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
}
switch (hw->fc.current_mode) {
case igc_fc_none:
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case igc_fc_rx_pause:
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case igc_fc_tx_pause:
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case igc_fc_full:
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
DEBUGOUT("Flow control param set incorrectly\n");
return -IGC_ERR_CONFIG;
}
ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
if (phy->autoneg_mask & ADVERTISE_1000_FULL)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
if (phy->autoneg_mask & ADVERTISE_2500_FULL)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
aneg_multigbt_an_ctrl);
return ret_val;
}
static s32 igc_copper_link_autoneg(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_ctrl;
DEBUGFUNC("igc_copper_link_autoneg");
phy->autoneg_advertised &= phy->autoneg_mask;
if (!phy->autoneg_advertised)
phy->autoneg_advertised = phy->autoneg_mask;
DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
ret_val = igc_phy_setup_autoneg(hw);
if (ret_val) {
DEBUGOUT("Error Setting up Auto-Negotiation\n");
return ret_val;
}
DEBUGOUT("Restarting Auto-Neg\n");
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
if (ret_val)
return ret_val;
phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
if (ret_val)
return ret_val;
if (phy->autoneg_wait_to_complete) {
ret_val = igc_wait_autoneg(hw);
if (ret_val) {
DEBUGOUT("Error while waiting for autoneg to complete\n");
return ret_val;
}
}
hw->mac.get_link_status = true;
return ret_val;
}
s32 igc_setup_copper_link_generic(struct igc_hw *hw)
{
s32 ret_val;
bool link;
DEBUGFUNC("igc_setup_copper_link_generic");
if (hw->mac.autoneg) {
ret_val = igc_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
DEBUGOUT("Forcing Speed and Duplex\n");
ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
DEBUGOUT("Error Forcing Speed and Duplex\n");
return ret_val;
}
}
ret_val = igc_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
&link);
if (ret_val)
return ret_val;
if (link) {
DEBUGOUT("Valid link established!!!\n");
hw->mac.ops.config_collision_dist(hw);
ret_val = igc_config_fc_after_link_up_generic(hw);
} else {
DEBUGOUT("Unable to establish link!!!\n");
}
return ret_val;
}
void igc_phy_force_speed_duplex_setup(struct igc_hw *hw, u16 *phy_ctrl)
{
struct igc_mac_info *mac = &hw->mac;
u32 ctrl;
DEBUGFUNC("igc_phy_force_speed_duplex_setup");
hw->fc.current_mode = igc_fc_none;
ctrl = IGC_READ_REG(hw, IGC_CTRL);
ctrl |= (IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
ctrl &= ~IGC_CTRL_SPD_SEL;
ctrl &= ~IGC_CTRL_ASDE;
*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
if (mac->forced_speed_duplex & IGC_ALL_HALF_DUPLEX) {
ctrl &= ~IGC_CTRL_FD;
*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
DEBUGOUT("Half Duplex\n");
} else {
ctrl |= IGC_CTRL_FD;
*phy_ctrl |= MII_CR_FULL_DUPLEX;
DEBUGOUT("Full Duplex\n");
}
if (mac->forced_speed_duplex & IGC_ALL_100_SPEED) {
ctrl |= IGC_CTRL_SPD_100;
*phy_ctrl |= MII_CR_SPEED_100;
*phy_ctrl &= ~MII_CR_SPEED_1000;
DEBUGOUT("Forcing 100mb\n");
} else {
ctrl &= ~(IGC_CTRL_SPD_1000 | IGC_CTRL_SPD_100);
*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
DEBUGOUT("Forcing 10mb\n");
}
hw->mac.ops.config_collision_dist(hw);
IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
}
s32 igc_set_d3_lplu_state_generic(struct igc_hw *hw, bool active)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
DEBUGFUNC("igc_set_d3_lplu_state_generic");
if (!hw->phy.ops.read_reg)
return IGC_SUCCESS;
ret_val = phy->ops.read_reg(hw, IGP02IGC_PHY_POWER_MGMT, &data);
if (ret_val)
return ret_val;
if (!active) {
data &= ~IGP02IGC_PM_D3_LPLU;
ret_val = phy->ops.write_reg(hw, IGP02IGC_PHY_POWER_MGMT,
data);
if (ret_val)
return ret_val;
if (phy->smart_speed == igc_smart_speed_on) {
ret_val = phy->ops.read_reg(hw,
IGP01IGC_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data |= IGP01IGC_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
IGP01IGC_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
} else if (phy->smart_speed == igc_smart_speed_off) {
ret_val = phy->ops.read_reg(hw,
IGP01IGC_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data &= ~IGP01IGC_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
IGP01IGC_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
}
} else if ((phy->autoneg_advertised == IGC_ALL_SPEED_DUPLEX) ||
(phy->autoneg_advertised == IGC_ALL_NOT_GIG) ||
(phy->autoneg_advertised == IGC_ALL_10_SPEED)) {
data |= IGP02IGC_PM_D3_LPLU;
ret_val = phy->ops.write_reg(hw, IGP02IGC_PHY_POWER_MGMT,
data);
if (ret_val)
return ret_val;
ret_val = phy->ops.read_reg(hw, IGP01IGC_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data &= ~IGP01IGC_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw, IGP01IGC_PHY_PORT_CONFIG,
data);
}
return ret_val;
}
s32 igc_check_downshift_generic(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val;
DEBUGFUNC("igc_check_downshift_generic");
switch (phy->type) {
case igc_phy_i225:
default:
phy->speed_downgraded = false;
return IGC_SUCCESS;
}
return ret_val;
}
static s32 igc_wait_autoneg(struct igc_hw *hw)
{
s32 ret_val = IGC_SUCCESS;
u16 i, phy_status;
DEBUGFUNC("igc_wait_autoneg");
if (!hw->phy.ops.read_reg)
return IGC_SUCCESS;
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_AUTONEG_COMPLETE)
break;
msec_delay(100);
}
return ret_val;
}
s32 igc_phy_has_link_generic(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success)
{
s32 ret_val = IGC_SUCCESS;
u16 i, phy_status;
DEBUGFUNC("igc_phy_has_link_generic");
if (!hw->phy.ops.read_reg)
return IGC_SUCCESS;
for (i = 0; i < iterations; i++) {
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val) {
if (usec_interval >= 1000)
msec_delay(usec_interval/1000);
else
usec_delay(usec_interval);
}
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_LINK_STATUS)
break;
if (usec_interval >= 1000)
msec_delay(usec_interval/1000);
else
usec_delay(usec_interval);
}
*success = (i < iterations);
return ret_val;
}
s32 igc_phy_hw_reset_generic(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val;
u32 ctrl, timeout = 10000, phpm = 0;
DEBUGFUNC("igc_phy_hw_reset_generic");
if (phy->ops.check_reset_block) {
ret_val = phy->ops.check_reset_block(hw);
if (ret_val)
return IGC_SUCCESS;
}
ret_val = phy->ops.acquire(hw);
if (ret_val)
return ret_val;
phpm = IGC_READ_REG(hw, IGC_I225_PHPM);
ctrl = IGC_READ_REG(hw, IGC_CTRL);
IGC_WRITE_REG(hw, IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
IGC_WRITE_FLUSH(hw);
usec_delay(phy->reset_delay_us);
IGC_WRITE_REG(hw, IGC_CTRL, ctrl);
IGC_WRITE_FLUSH(hw);
usec_delay(150);
do {
phpm = IGC_READ_REG(hw, IGC_I225_PHPM);
timeout--;
usec_delay(1);
} while (!(phpm & IGC_I225_PHPM_RST_COMPL) && timeout);
if (!timeout)
DEBUGOUT("Timeout expired after a phy reset\n");
phy->ops.release(hw);
return ret_val;
}
void igc_power_up_phy_copper(struct igc_hw *hw)
{
u16 mii_reg = 0;
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
usec_delay(300);
}
void igc_power_down_phy_copper(struct igc_hw *hw)
{
u16 mii_reg = 0;
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
msec_delay(1);
}
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
DEBUGFUNC("igc_write_phy_reg_gpy");
offset = offset & GPY_REG_MASK;
if (!dev_addr) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
if (ret_val)
return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
data);
}
return ret_val;
}
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
DEBUGFUNC("igc_read_phy_reg_gpy");
offset = offset & GPY_REG_MASK;
if (!dev_addr) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
if (ret_val)
return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
data);
}
return ret_val;
}
static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
u8 dev_addr, u16 *data, bool read)
{
s32 ret_val;
DEBUGFUNC("__igc_access_xmdio_reg");
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
if (ret_val)
return ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
if (ret_val)
return ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
dev_addr);
if (ret_val)
return ret_val;
if (read)
ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
else
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
if (ret_val)
return ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
if (ret_val)
return ret_val;
return ret_val;
}
s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, u16 *data)
{
DEBUGFUNC("igc_read_xmdio_reg");
return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
}
s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr, u8 dev_addr, u16 data)
{
DEBUGFUNC("igc_write_xmdio_reg");
return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}