#include "ixgbe_type.h"
#include "ixgbe_mbx.h"
static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id);
static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id);
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
DEBUGFUNC("ixgbe_read_mbx");
if (size > mbx->size) {
ERROR_REPORT3(IXGBE_ERROR_ARGUMENT,
"Invalid mailbox message size %u, changing to %u",
size, mbx->size);
size = mbx->size;
}
if (mbx->ops[mbx_id].read)
return mbx->ops[mbx_id].read(hw, msg, size, mbx_id);
return IXGBE_ERR_CONFIG;
}
s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
DEBUGFUNC("ixgbe_poll_mbx");
if (!mbx->ops[mbx_id].read || !mbx->ops[mbx_id].check_for_msg ||
!mbx->timeout)
return IXGBE_ERR_CONFIG;
if (size > mbx->size) {
ERROR_REPORT3(IXGBE_ERROR_ARGUMENT,
"Invalid mailbox message size %u, changing to %u",
size, mbx->size);
size = mbx->size;
}
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
if (!ret_val)
return mbx->ops[mbx_id].read(hw, msg, size, mbx_id);
return ret_val;
}
s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_write_mbx");
if (!mbx->ops[mbx_id].write || !mbx->ops[mbx_id].check_for_ack ||
!mbx->ops[mbx_id].release || !mbx->timeout)
return IXGBE_ERR_CONFIG;
if (size > mbx->size) {
ret_val = IXGBE_ERR_PARAM;
ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
"Invalid mailbox message size %u", size);
} else {
ret_val = mbx->ops[mbx_id].write(hw, msg, size, mbx_id);
}
return ret_val;
}
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_msg");
if (mbx->ops[mbx_id].check_for_msg)
ret_val = mbx->ops[mbx_id].check_for_msg(hw, mbx_id);
return ret_val;
}
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_ack");
if (mbx->ops[mbx_id].check_for_ack)
ret_val = mbx->ops[mbx_id].check_for_ack(hw, mbx_id);
return ret_val;
}
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_rst");
if (mbx->ops[mbx_id].check_for_rst)
ret_val = mbx->ops[mbx_id].check_for_rst(hw, mbx_id);
return ret_val;
}
s32 ixgbe_clear_mbx(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_clear_mbx");
if (mbx->ops[mbx_id].clear)
ret_val = mbx->ops[mbx_id].clear(hw, mbx_id);
return ret_val;
}
static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
DEBUGFUNC("ixgbe_poll_for_msg");
if (!countdown || !mbx->ops[mbx_id].check_for_msg)
return IXGBE_ERR_CONFIG;
while (countdown && mbx->ops[mbx_id].check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
if (countdown == 0) {
ERROR_REPORT2(IXGBE_ERROR_POLLING,
"Polling for VF%u mailbox message timedout", mbx_id);
return IXGBE_ERR_TIMEOUT;
}
return IXGBE_SUCCESS;
}
static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
DEBUGFUNC("ixgbe_poll_for_ack");
if (!countdown || !mbx->ops[mbx_id].check_for_ack)
return IXGBE_ERR_CONFIG;
while (countdown && mbx->ops[mbx_id].check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
if (countdown == 0) {
ERROR_REPORT2(IXGBE_ERROR_POLLING,
"Polling for VF%u mailbox ack timedout", mbx_id);
return IXGBE_ERR_TIMEOUT;
}
return IXGBE_SUCCESS;
}
static u32 ixgbe_read_mailbox_vf(struct ixgbe_hw *hw)
{
u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
vf_mailbox |= hw->mbx.vf_mailbox;
hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
return vf_mailbox;
}
static void ixgbe_clear_msg_vf(struct ixgbe_hw *hw)
{
u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
hw->mbx.stats.reqs++;
hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
}
}
static void ixgbe_clear_ack_vf(struct ixgbe_hw *hw)
{
u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
hw->mbx.stats.acks++;
hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
}
}
static void ixgbe_clear_rst_vf(struct ixgbe_hw *hw)
{
u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
hw->mbx.stats.rsts++;
hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
IXGBE_VFMAILBOX_RSTD);
}
}
static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
if (vf_mailbox & mask)
return IXGBE_SUCCESS;
return IXGBE_ERR_MBX;
}
static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_msg_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS))
return IXGBE_SUCCESS;
return IXGBE_ERR_MBX;
}
static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_ack_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
ixgbe_clear_ack_vf(hw);
return IXGBE_SUCCESS;
}
return IXGBE_ERR_MBX;
}
static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_rst_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_RSTI |
IXGBE_VFMAILBOX_RSTD)) {
ixgbe_clear_rst_vf(hw);
return IXGBE_SUCCESS;
}
return IXGBE_ERR_MBX;
}
static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
s32 ret_val = IXGBE_ERR_MBX;
u32 vf_mailbox;
DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
if (!mbx->timeout)
return IXGBE_ERR_CONFIG;
while (countdown--) {
vf_mailbox = ixgbe_read_mailbox_vf(hw);
vf_mailbox |= IXGBE_VFMAILBOX_VFU;
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
if (ixgbe_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
ret_val = IXGBE_SUCCESS;
break;
}
usec_delay(mbx->usec_delay);
}
if (ret_val != IXGBE_SUCCESS) {
ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
"Failed to obtain mailbox lock");
ret_val = IXGBE_ERR_TIMEOUT;
}
return ret_val;
}
static void ixgbe_release_mbx_lock_dummy(struct ixgbe_hw *hw, u16 mbx_id)
{
UNREFERENCED_2PARAMETER(hw, mbx_id);
DEBUGFUNC("ixgbe_release_mbx_lock_dummy");
}
static void ixgbe_release_mbx_lock_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
u32 vf_mailbox;
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_release_mbx_lock_vf");
vf_mailbox = ixgbe_read_mailbox_vf(hw);
vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
}
static s32 ixgbe_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
s32 ret_val;
u16 i;
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_write_mbx_vf_legacy");
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
return ret_val;
ixgbe_check_for_msg_vf(hw, 0);
ixgbe_clear_msg_vf(hw);
ixgbe_check_for_ack_vf(hw, 0);
ixgbe_clear_ack_vf(hw);
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
hw->mbx.stats.msgs_tx++;
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
return IXGBE_SUCCESS;
}
static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
u32 vf_mailbox;
s32 ret_val;
u16 i;
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_write_mbx_vf");
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
goto out;
ixgbe_clear_msg_vf(hw);
ixgbe_clear_ack_vf(hw);
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
hw->mbx.stats.msgs_tx++;
vf_mailbox = ixgbe_read_mailbox_vf(hw);
vf_mailbox |= IXGBE_VFMAILBOX_REQ;
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
ixgbe_poll_for_ack(hw, mbx_id);
out:
hw->mbx.ops[mbx_id].release(hw, mbx_id);
return ret_val;
}
static s32 ixgbe_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_vf_legacy");
UNREFERENCED_1PARAMETER(mbx_id);
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
return ret_val;
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
hw->mbx.stats.msgs_rx++;
return IXGBE_SUCCESS;
}
static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
u32 vf_mailbox;
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_vf");
UNREFERENCED_1PARAMETER(mbx_id);
ret_val = ixgbe_check_for_msg_vf(hw, 0);
if (ret_val != IXGBE_SUCCESS)
return IXGBE_ERR_MBX_NOMSG;
ixgbe_clear_msg_vf(hw);
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
vf_mailbox = ixgbe_read_mailbox_vf(hw);
vf_mailbox |= IXGBE_VFMAILBOX_ACK;
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
hw->mbx.stats.msgs_rx++;
return IXGBE_SUCCESS;
}
void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
mbx->ops[0].release = ixgbe_release_mbx_lock_dummy;
mbx->ops[0].read = ixgbe_read_mbx_vf_legacy;
mbx->ops[0].write = ixgbe_write_mbx_vf_legacy;
mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf;
mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf;
mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf;
mbx->ops[0].clear = NULL;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}
void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
mbx->ops[0].release = ixgbe_release_mbx_lock_vf;
mbx->ops[0].read = ixgbe_read_mbx_vf;
mbx->ops[0].write = ixgbe_write_mbx_vf;
mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf;
mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf;
mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf;
mbx->ops[0].clear = NULL;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}
static void ixgbe_clear_msg_pf(struct ixgbe_hw *hw, u16 vf_id)
{
u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
s32 index = IXGBE_PFMBICR_INDEX(vf_id);
u32 pfmbicr;
pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift))
hw->mbx.stats.reqs++;
IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
IXGBE_PFMBICR_VFREQ_VF1 << vf_shift);
}
static void ixgbe_clear_ack_pf(struct ixgbe_hw *hw, u16 vf_id)
{
u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
s32 index = IXGBE_PFMBICR_INDEX(vf_id);
u32 pfmbicr;
pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift))
hw->mbx.stats.acks++;
IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
IXGBE_PFMBICR_VFACK_VF1 << vf_shift);
}
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
if (pfmbicr & mask) {
return IXGBE_SUCCESS;
}
return IXGBE_ERR_MBX;
}
static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_id)
{
u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
s32 index = IXGBE_PFMBICR_INDEX(vf_id);
DEBUGFUNC("ixgbe_check_for_msg_pf");
if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFREQ_VF1 << vf_shift,
index))
return IXGBE_SUCCESS;
return IXGBE_ERR_MBX;
}
static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_id)
{
u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
s32 index = IXGBE_PFMBICR_INDEX(vf_id);
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_ack_pf");
if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFACK_VF1 << vf_shift,
index)) {
ret_val = IXGBE_SUCCESS;
ixgbe_clear_ack_pf(hw, vf_id);
}
return ret_val;
}
static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_id)
{
u32 vf_shift = IXGBE_PFVFLRE_SHIFT(vf_id);
u32 index = IXGBE_PFVFLRE_INDEX(vf_id);
s32 ret_val = IXGBE_ERR_MBX;
u32 vflre = 0;
DEBUGFUNC("ixgbe_check_for_rst_pf");
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLRE(index));
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index));
break;
default:
break;
}
if (vflre & (1 << vf_shift)) {
ret_val = IXGBE_SUCCESS;
IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift));
hw->mbx.stats.rsts++;
}
return ret_val;
}
static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
s32 ret_val = IXGBE_ERR_MBX;
u32 pf_mailbox;
DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
if (!mbx->timeout)
return IXGBE_ERR_CONFIG;
while (countdown--) {
pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
if (pf_mailbox & IXGBE_PFMAILBOX_PFU)
goto retry;
pf_mailbox |= IXGBE_PFMAILBOX_PFU;
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
if (pf_mailbox & IXGBE_PFMAILBOX_PFU) {
ret_val = IXGBE_SUCCESS;
break;
}
retry:
usec_delay(mbx->usec_delay);
}
if (ret_val != IXGBE_SUCCESS) {
ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
"Failed to obtain mailbox lock");
ret_val = IXGBE_ERR_TIMEOUT;
}
return ret_val;
}
static void ixgbe_release_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id)
{
u32 pf_mailbox;
DEBUGFUNC("ixgbe_release_mbx_lock_pf");
pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
pf_mailbox &= ~IXGBE_PFMAILBOX_PFU;
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
}
static s32 ixgbe_write_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_id)
{
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_write_mbx_pf_legacy");
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
if (ret_val)
return ret_val;
ixgbe_check_for_msg_pf(hw, vf_id);
ixgbe_clear_msg_pf(hw, vf_id);
ixgbe_check_for_ack_pf(hw, vf_id);
ixgbe_clear_ack_pf(hw, vf_id);
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS);
hw->mbx.stats.msgs_tx++;
return IXGBE_SUCCESS;
}
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_id)
{
u32 pf_mailbox;
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_write_mbx_pf");
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
if (ret_val)
goto out;
ixgbe_clear_msg_pf(hw, vf_id);
ixgbe_clear_ack_pf(hw, vf_id);
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
pf_mailbox |= IXGBE_PFMAILBOX_STS;
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
if (msg[0] & IXGBE_VT_MSGTYPE_CTS)
ixgbe_poll_for_ack(hw, vf_id);
hw->mbx.stats.msgs_tx++;
out:
hw->mbx.ops[vf_id].release(hw, vf_id);
return ret_val;
}
static s32 ixgbe_read_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_id)
{
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_pf_legacy");
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
if (ret_val != IXGBE_SUCCESS)
return ret_val;
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK);
hw->mbx.stats.msgs_rx++;
return IXGBE_SUCCESS;
}
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_id)
{
u32 pf_mailbox;
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_pf");
ret_val = ixgbe_check_for_msg_pf(hw, vf_id);
if (ret_val != IXGBE_SUCCESS)
return IXGBE_ERR_MBX_NOMSG;
ixgbe_clear_msg_pf(hw, vf_id);
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
pf_mailbox |= IXGBE_PFMAILBOX_ACK;
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
hw->mbx.stats.msgs_rx++;
return IXGBE_SUCCESS;
}
static s32 ixgbe_clear_mbx_pf(struct ixgbe_hw *hw, u16 vf_id)
{
u16 mbx_size = hw->mbx.size;
u16 i;
if (vf_id > 63)
return IXGBE_ERR_PARAM;
for (i = 0; i < mbx_size; ++i)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, 0x0);
return IXGBE_SUCCESS;
}
void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
mbx->ops[vf_id].release = ixgbe_release_mbx_lock_dummy;
mbx->ops[vf_id].read = ixgbe_read_mbx_pf_legacy;
mbx->ops[vf_id].write = ixgbe_write_mbx_pf_legacy;
mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf;
mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf;
mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf;
mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf;
}
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
u16 i;
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X550EM_a &&
hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
for (i = 0; i < 64; i++)
ixgbe_init_mbx_params_pf_id(hw, i);
}
void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X550EM_a &&
hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
mbx->ops[vf_id].release = ixgbe_release_mbx_lock_pf;
mbx->ops[vf_id].read = ixgbe_read_mbx_pf;
mbx->ops[vf_id].write = ixgbe_write_mbx_pf;
mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf;
mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf;
mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf;
mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}