#include "idpf.h"
#include "idpf_virtchnl.h"
#include "idpf_ptp.h"
#include "xdp.h"
#include "xsk.h"
static const struct net_device_ops idpf_netdev_ops;
static int idpf_init_vector_stack(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack;
u16 min_vec;
u32 i;
mutex_lock(&adapter->vector_lock);
min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
stack = &adapter->vector_stack;
stack->size = adapter->num_msix_entries;
stack->base = min_vec;
stack->top = min_vec;
stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
if (!stack->vec_idx) {
mutex_unlock(&adapter->vector_lock);
return -ENOMEM;
}
for (i = 0; i < stack->size; i++)
stack->vec_idx[i] = i;
mutex_unlock(&adapter->vector_lock);
return 0;
}
static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack;
mutex_lock(&adapter->vector_lock);
stack = &adapter->vector_stack;
kfree(stack->vec_idx);
stack->vec_idx = NULL;
mutex_unlock(&adapter->vector_lock);
}
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{
clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
kfree(free_irq(adapter->msix_entries[0].vector, adapter));
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
}
void idpf_intr_rel(struct idpf_adapter *adapter)
{
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
idpf_send_dealloc_vectors_msg(adapter);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
kfree(adapter->rdma_msix_entries);
adapter->rdma_msix_entries = NULL;
}
static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
{
struct idpf_adapter *adapter = (struct idpf_adapter *)data;
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
return IRQ_HANDLED;
}
static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
{
struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
u32 val;
val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
writel(val, intr->dyn_ctl);
writel(intr->icr_ena_ctlq_m, intr->icr_ena);
}
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{
int irq_num, mb_vidx = 0, err;
char *name;
irq_num = adapter->msix_entries[mb_vidx].vector;
name = kasprintf(GFP_KERNEL, "%s-%s-%d",
dev_driver_string(&adapter->pdev->dev),
"Mailbox", mb_vidx);
err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err);
return err;
}
set_bit(IDPF_MB_INTR_MODE, adapter->flags);
return 0;
}
static int idpf_mb_intr_init(struct idpf_adapter *adapter)
{
adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
adapter->irq_mb_handler = idpf_mb_intr_clean;
return idpf_mb_intr_req_irq(adapter);
}
static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
{
struct idpf_vector_lifo *stack = &adapter->vector_stack;
lockdep_assert_held(&adapter->vector_lock);
if (stack->top == stack->base) {
dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
stack->top);
return -EINVAL;
}
stack->vec_idx[--stack->top] = vec_idx;
return 0;
}
static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack = &adapter->vector_stack;
lockdep_assert_held(&adapter->vector_lock);
if (stack->top == stack->size) {
dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
return -EINVAL;
}
return stack->vec_idx[stack->top++];
}
static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
struct idpf_vector_info *vec_info)
{
int i, base = 0;
u16 vec_idx;
lockdep_assert_held(&adapter->vector_lock);
if (!vec_info->num_curr_vecs)
return;
if (vec_info->default_vport)
base = IDPF_MIN_Q_VEC;
for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
vec_idx = q_vector_idxs[i];
idpf_vector_lifo_push(adapter, vec_idx);
adapter->num_avail_msix++;
}
}
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info)
{
u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
struct idpf_vector_lifo *stack;
int i, j, vecid;
mutex_lock(&adapter->vector_lock);
stack = &adapter->vector_stack;
num_req_vecs = vec_info->num_req_vecs;
idpf_vector_stash(adapter, q_vector_idxs, vec_info);
if (!num_req_vecs)
goto rel_lock;
if (vec_info->default_vport) {
j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
num_req_vecs--;
}
}
max_vecs = min(adapter->num_avail_msix, num_req_vecs);
for (j = 0; j < max_vecs; j++) {
vecid = idpf_vector_lifo_pop(adapter);
q_vector_idxs[num_alloc_vecs++] = vecid;
}
adapter->num_avail_msix -= max_vecs;
rel_lock:
mutex_unlock(&adapter->vector_lock);
return num_alloc_vecs;
}
int idpf_intr_req(struct idpf_adapter *adapter)
{
u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0;
u16 default_vports = idpf_get_default_vports(adapter);
int num_q_vecs, total_vecs, num_vec_ids;
int min_vectors, actual_vecs, err;
unsigned int vector;
u16 *vecids;
int i;
total_vecs = idpf_get_reserved_vecs(adapter);
num_lan_vecs = total_vecs;
if (idpf_is_rdma_cap_ena(adapter)) {
num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter);
min_rdma_vecs = IDPF_MIN_RDMA_VEC;
if (!num_rdma_vecs) {
num_rdma_vecs = min_rdma_vecs;
} else if (num_rdma_vecs < min_rdma_vecs) {
dev_err(&adapter->pdev->dev,
"Not enough vectors reserved for RDMA (min: %u, current: %u)\n",
min_rdma_vecs, num_rdma_vecs);
return -EINVAL;
}
}
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
return -EAGAIN;
}
min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
min_vectors = min_lan_vecs + min_rdma_vecs;
actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
total_vecs, PCI_IRQ_MSIX);
if (actual_vecs < 0) {
dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n",
min_vectors);
err = actual_vecs;
goto send_dealloc_vecs;
}
if (idpf_is_rdma_cap_ena(adapter)) {
if (actual_vecs < total_vecs) {
dev_warn(&adapter->pdev->dev,
"Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n",
total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC);
num_rdma_vecs = IDPF_MIN_RDMA_VEC;
}
adapter->rdma_msix_entries = kzalloc_objs(struct msix_entry,
num_rdma_vecs);
if (!adapter->rdma_msix_entries) {
err = -ENOMEM;
goto free_irq;
}
}
num_lan_vecs = actual_vecs - num_rdma_vecs;
adapter->msix_entries = kzalloc_objs(struct msix_entry, num_lan_vecs);
if (!adapter->msix_entries) {
err = -ENOMEM;
goto free_rdma_msix;
}
adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
err = -ENOMEM;
goto free_msix;
}
num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs,
&adapter->req_vec_chunks->vchunks);
if (num_vec_ids < actual_vecs) {
err = -EINVAL;
goto free_vecids;
}
for (vector = 0; vector < num_lan_vecs; vector++) {
adapter->msix_entries[vector].entry = vecids[vector];
adapter->msix_entries[vector].vector =
pci_irq_vector(adapter->pdev, vector);
}
for (i = 0; i < num_rdma_vecs; vector++, i++) {
adapter->rdma_msix_entries[i].entry = vecids[vector];
adapter->rdma_msix_entries[i].vector =
pci_irq_vector(adapter->pdev, vector);
}
adapter->num_avail_msix = num_lan_vecs - min_lan_vecs;
adapter->num_msix_entries = num_lan_vecs;
if (idpf_is_rdma_cap_ena(adapter))
adapter->num_rdma_msix_entries = num_rdma_vecs;
err = idpf_init_vector_stack(adapter);
if (err)
goto free_vecids;
err = idpf_mb_intr_init(adapter);
if (err)
goto deinit_vec_stack;
idpf_mb_irq_enable(adapter);
kfree(vecids);
return 0;
deinit_vec_stack:
idpf_deinit_vector_stack(adapter);
free_vecids:
kfree(vecids);
free_msix:
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
free_rdma_msix:
kfree(adapter->rdma_msix_entries);
adapter->rdma_msix_entries = NULL;
free_irq:
pci_free_irq_vectors(adapter->pdev);
send_dealloc_vecs:
idpf_send_dealloc_vectors_msg(adapter);
return err;
}
static void idpf_del_all_flow_steer_filters(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_fsteer_fltr *f, *ftmp;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry_safe(f, ftmp, &vport_config->user_config.flow_steer_list,
list) {
list_del(&f->list);
kfree(f);
}
vport_config->user_config.num_fsteer_fltrs = 0;
spin_unlock_bh(&vport_config->flow_steer_list_lock);
}
static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
const u8 *macaddr)
{
struct idpf_mac_filter *f;
if (!macaddr)
return NULL;
list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
if (ether_addr_equal(macaddr, f->macaddr))
return f;
}
return NULL;
}
static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
const u8 *macaddr)
{
struct idpf_mac_filter *f;
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, macaddr);
if (f) {
list_del(&f->list);
kfree(f);
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return 0;
}
static int idpf_del_mac_filter(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
const u8 *macaddr, bool async)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = np->adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, macaddr);
if (f) {
f->remove = true;
} else {
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return -EINVAL;
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
err = idpf_add_del_mac_filters(np->adapter, vport_config,
vport->default_mac_addr,
np->vport_id, false, async);
if (err)
return err;
}
return __idpf_del_mac_filter(vport_config, macaddr);
}
static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
const u8 *macaddr)
{
struct idpf_mac_filter *f;
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, macaddr);
if (f) {
f->remove = false;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return 0;
}
f = kzalloc_obj(*f, GFP_ATOMIC);
if (!f) {
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return -ENOMEM;
}
ether_addr_copy(f->macaddr, macaddr);
list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
f->add = true;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
return 0;
}
static int idpf_add_mac_filter(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
const u8 *macaddr, bool async)
{
struct idpf_vport_config *vport_config;
int err;
vport_config = np->adapter->vport_config[np->vport_idx];
err = __idpf_add_mac_filter(vport_config, macaddr);
if (err)
return err;
if (test_bit(IDPF_VPORT_UP, np->state))
err = idpf_add_del_mac_filters(np->adapter, vport_config,
vport->default_mac_addr,
np->vport_id, true, async);
return err;
}
static void idpf_del_all_mac_filters(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f, *ftmp;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
list) {
list_del(&f->list);
kfree(f);
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
}
static void idpf_restore_mac_filters(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
f->add = true;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
idpf_add_del_mac_filters(vport->adapter, vport_config,
vport->default_mac_addr, vport->vport_id,
true, false);
}
static void idpf_remove_mac_filters(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
f->remove = true;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
idpf_add_del_mac_filters(vport->adapter, vport_config,
vport->default_mac_addr, vport->vport_id,
false, false);
}
static void idpf_deinit_mac_addr(struct idpf_vport *vport)
{
struct idpf_vport_config *vport_config;
struct idpf_mac_filter *f;
vport_config = vport->adapter->vport_config[vport->idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
if (f) {
list_del(&f->list);
kfree(f);
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
}
static int idpf_init_mac_addr(struct idpf_vport *vport,
struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_adapter *adapter = vport->adapter;
int err;
if (is_valid_ether_addr(vport->default_mac_addr)) {
eth_hw_addr_set(netdev, vport->default_mac_addr);
ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
false);
}
if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_MACFILTER)) {
dev_err(&adapter->pdev->dev,
"MAC address is not provided and capability is not set\n");
return -EINVAL;
}
eth_hw_addr_random(netdev);
err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
if (err)
return err;
dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
vport->default_mac_addr, netdev->dev_addr);
ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
return 0;
}
static void idpf_detach_and_close(struct idpf_adapter *adapter)
{
int max_vports = adapter->max_vports;
for (int i = 0; i < max_vports; i++) {
struct net_device *netdev = adapter->netdevs[i];
if (!netif_device_present(netdev))
continue;
rtnl_lock();
netif_device_detach(netdev);
if (netif_running(netdev)) {
set_bit(IDPF_VPORT_UP_REQUESTED,
adapter->vport_config[i]->flags);
dev_close(netdev);
}
rtnl_unlock();
}
}
static void idpf_attach_and_open(struct idpf_adapter *adapter)
{
int max_vports = adapter->max_vports;
for (int i = 0; i < max_vports; i++) {
struct idpf_vport *vport = adapter->vports[i];
struct idpf_vport_config *vport_config;
struct net_device *netdev;
if (!vport)
continue;
netdev = adapter->netdevs[i];
netif_device_attach(netdev);
vport_config = adapter->vport_config[vport->idx];
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED,
vport_config->flags)) {
rtnl_lock();
dev_open(netdev, NULL);
rtnl_unlock();
}
}
}
static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
netdev_features_t other_offloads = 0;
netdev_features_t csum_offloads = 0;
netdev_features_t tso_offloads = 0;
netdev_features_t dflt_features;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
int err;
vport_config = adapter->vport_config[idx];
if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
netdev = adapter->netdevs[idx];
np = netdev_priv(netdev);
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
}
netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
vport_config->max_q.max_txq,
vport_config->max_q.max_rxq);
if (!netdev)
return -ENOMEM;
vport->netdev = netdev;
np = netdev_priv(netdev);
np->vport = vport;
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
np->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
spin_lock_init(&np->stats_lock);
err = idpf_init_mac_addr(vport, netdev);
if (err) {
free_netdev(vport->netdev);
vport->netdev = NULL;
return err;
}
netdev->netdev_ops = &idpf_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
netdev->dev_port = idx;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA;
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_FLOW_STEER) &&
idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
dflt_features |= NETIF_F_NTUPLE;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
csum_offloads |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
csum_offloads |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
csum_offloads |= NETIF_F_RXCSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
csum_offloads |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
tso_offloads |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
tso_offloads |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
tso_offloads |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
other_offloads |= NETIF_F_GRO_HW;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
other_offloads |= NETIF_F_LOOPBACK;
netdev->features |= dflt_features | csum_offloads | tso_offloads;
netdev->hw_features |= netdev->features | other_offloads;
netdev->vlan_features |= netdev->features | other_offloads;
netdev->hw_enc_features |= dflt_features | other_offloads;
idpf_xdp_set_features(vport);
idpf_set_ethtool_ops(netdev);
netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->netdevs[idx] = netdev;
return 0;
}
static int idpf_get_free_slot(struct idpf_adapter *adapter)
{
unsigned int i;
for (i = 0; i < adapter->max_vports; i++) {
if (!adapter->vports[i])
return i;
}
return IDPF_NO_FREE_SLOT;
}
static void idpf_remove_features(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
idpf_remove_mac_filters(vport);
}
static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_queue_id_reg_info *chunks;
u32 vport_id = vport->vport_id;
if (!test_bit(IDPF_VPORT_UP, np->state))
return;
if (rtnl)
rtnl_lock();
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
chunks = &adapter->vport_config[vport->idx]->qid_reg_info;
idpf_send_disable_vport_msg(adapter, vport_id);
idpf_send_disable_queues_msg(vport);
idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
idpf_send_delete_queues_msg(adapter, chunks, vport_id);
idpf_remove_features(vport);
vport->link_up = false;
idpf_vport_intr_deinit(vport, rsrc);
idpf_xdp_rxq_info_deinit_all(rsrc);
idpf_vport_queues_rel(vport, rsrc);
idpf_vport_intr_rel(rsrc);
clear_bit(IDPF_VPORT_UP, np->state);
if (rtnl)
rtnl_unlock();
}
static int idpf_stop(struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport *vport;
if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
return 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
idpf_vport_stop(vport, false);
idpf_vport_ctrl_unlock(netdev);
return 0;
}
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
u16 idx = vport->idx;
if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
adapter->vport_config[idx]->flags)) {
unregister_netdev(vport->netdev);
free_netdev(vport->netdev);
}
vport->netdev = NULL;
adapter->netdevs[idx] = NULL;
}
static void idpf_vport_rel(struct idpf_vport *vport)
{
struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_vector_info vec_info;
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
vport_config = adapter->vport_config[vport->idx];
rss_data = &vport_config->user_config.rss_data;
idpf_deinit_rss_lut(rss_data);
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
idpf_send_destroy_vport_msg(adapter, vport->vport_id);
max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq;
max_q.max_bufq = vport_config->max_q.max_bufq;
max_q.max_complq = vport_config->max_q.max_complq;
idpf_vport_dealloc_max_qs(adapter, &max_q);
vec_info.num_req_vecs = 0;
vec_info.num_curr_vecs = rsrc->num_q_vectors;
vec_info.default_vport = vport->default_vport;
idpf_req_rel_vector_indexes(adapter, rsrc->q_vector_idxs, &vec_info);
kfree(rsrc->q_vector_idxs);
rsrc->q_vector_idxs = NULL;
idpf_vport_deinit_queue_reg_chunks(vport_config);
kfree(adapter->vport_params_recvd[idx]);
adapter->vport_params_recvd[idx] = NULL;
kfree(adapter->vport_params_reqd[idx]);
adapter->vport_params_reqd[idx] = NULL;
kfree(vport);
adapter->num_alloc_vports--;
}
static void idpf_vport_dealloc(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
unsigned int i = vport->idx;
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
idpf_deinit_mac_addr(vport);
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) {
idpf_vport_stop(vport, true);
idpf_decfg_netdev(vport);
}
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
idpf_del_all_mac_filters(vport);
idpf_del_all_flow_steer_filters(vport);
}
if (adapter->netdevs[i]) {
struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
np->vport = NULL;
}
idpf_vport_rel(vport);
adapter->vports[i] = NULL;
adapter->next_vport = idpf_get_free_slot(adapter);
}
static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
{
return idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model) &&
idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT);
}
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
{
const struct idpf_vport_user_config_data *config;
if (!idpf_is_hsplit_supported(vport))
return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
config = &vport->adapter->vport_config[vport->idx]->user_config;
return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
ETHTOOL_TCP_DATA_SPLIT_ENABLED :
ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
{
struct idpf_vport_user_config_data *config;
if (!idpf_is_hsplit_supported(vport))
return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
config = &vport->adapter->vport_config[vport->idx]->user_config;
switch (val) {
case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
__set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
return true;
case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
__clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
return true;
default:
return false;
}
}
static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q)
{
struct idpf_rss_data *rss_data;
u16 idx = adapter->next_vport;
struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
u16 num_max_q;
int err;
if (idx == IDPF_NO_FREE_SLOT)
return NULL;
vport = kzalloc_obj(*vport);
if (!vport)
return vport;
num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
struct idpf_q_coalesce *q_coal;
vport_config = kzalloc_obj(*vport_config);
if (!vport_config) {
kfree(vport);
return NULL;
}
q_coal = kzalloc_objs(*q_coal, num_max_q);
if (!q_coal) {
kfree(vport_config);
kfree(vport);
return NULL;
}
for (int i = 0; i < num_max_q; i++) {
q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
}
vport_config->user_config.q_coalesce = q_coal;
adapter->vport_config[idx] = vport_config;
}
vport->idx = idx;
vport->adapter = adapter;
vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
rsrc = &vport->dflt_qv_rsrc;
rsrc->dev = &adapter->pdev->dev;
rsrc->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
if (!rsrc->q_vector_idxs)
goto free_vport;
err = idpf_vport_init(vport, max_q);
if (err)
goto free_vector_idxs;
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
if (!rss_data->rss_key)
goto free_qreg_chunks;
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
err = idpf_init_rss_lut(vport, rss_data);
if (err)
goto free_rss_key;
adapter->vports[idx] = vport;
adapter->vport_ids[idx] = idpf_get_vport_id(vport);
adapter->num_alloc_vports++;
adapter->next_vport = idpf_get_free_slot(adapter);
return vport;
free_rss_key:
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
free_qreg_chunks:
idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]);
free_vector_idxs:
kfree(rsrc->q_vector_idxs);
free_vport:
kfree(vport);
return NULL;
}
static void idpf_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
spin_lock_bh(&np->stats_lock);
*stats = np->netstats;
spin_unlock_bh(&np->stats_lock);
}
void idpf_statistics_task(struct work_struct *work)
{
struct idpf_adapter *adapter;
int i;
adapter = container_of(work, struct idpf_adapter, stats_task.work);
for (i = 0; i < adapter->max_vports; i++) {
struct idpf_vport *vport = adapter->vports[i];
if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
idpf_send_get_stats_msg(netdev_priv(vport->netdev),
&vport->port_stats);
}
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
msecs_to_jiffies(10000));
}
void idpf_mbx_task(struct work_struct *work)
{
struct idpf_adapter *adapter;
adapter = container_of(work, struct idpf_adapter, mbx_task.work);
if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
idpf_mb_irq_enable(adapter);
else
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
usecs_to_jiffies(300));
idpf_recv_mb_msg(adapter, adapter->hw.arq);
}
void idpf_service_task(struct work_struct *work)
{
struct idpf_adapter *adapter;
adapter = container_of(work, struct idpf_adapter, serv_task.work);
if (idpf_is_reset_detected(adapter) &&
!idpf_is_reset_in_prog(adapter) &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
dev_info(&adapter->pdev->dev, "HW reset detected\n");
set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
queue_delayed_work(adapter->vc_event_wq,
&adapter->vc_event_task,
msecs_to_jiffies(10));
}
queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
msecs_to_jiffies(300));
}
static void idpf_restore_features(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
idpf_restore_mac_filters(vport);
}
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
int err, txq = vport->dflt_qv_rsrc.num_txq - vport->num_xdp_txq;
err = netif_set_real_num_rx_queues(vport->netdev,
vport->dflt_qv_rsrc.num_rxq);
if (err)
return err;
return netif_set_real_num_tx_queues(vport->netdev, txq);
}
static void idpf_up_complete(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
netif_carrier_on(vport->netdev);
netif_tx_start_all_queues(vport->netdev);
}
set_bit(IDPF_VPORT_UP, np->state);
}
static void idpf_rx_init_buf_tail(struct idpf_q_vec_rsrc *rsrc)
{
for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
if (idpf_is_queue_model_split(rsrc->rxq_model)) {
for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
for (unsigned int j = 0; j < grp->singleq.num_rxq; j++) {
const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
writel(q->next_to_alloc, q->tail);
}
}
}
}
static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_queue_id_reg_info *chunks;
struct idpf_rss_data *rss_data;
u32 vport_id = vport->vport_id;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
return -EBUSY;
if (rtnl)
rtnl_lock();
netif_carrier_off(vport->netdev);
err = idpf_vport_intr_alloc(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
goto err_rtnl_unlock;
}
err = idpf_vport_queues_alloc(vport, rsrc);
if (err)
goto intr_rel;
vport_config = adapter->vport_config[vport->idx];
chunks = &vport_config->qid_reg_info;
err = idpf_vport_queue_ids_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
err = idpf_vport_intr_init(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
err = idpf_queue_reg_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
err = idpf_rx_bufs_init_all(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
idpf_rx_init_buf_tail(rsrc);
err = idpf_xdp_rxq_info_init_all(rsrc);
if (err) {
netdev_err(vport->netdev,
"Failed to initialize XDP RxQ info for vport %u: %pe\n",
vport->vport_id, ERR_PTR(err));
goto intr_deinit;
}
idpf_vport_intr_ena(vport, rsrc);
err = idpf_send_config_queues_msg(adapter, rsrc, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
goto rxq_deinit;
}
err = idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id,
true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
goto rxq_deinit;
}
err = idpf_send_enable_queues_msg(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
vport->vport_id, err);
goto unmap_queue_vectors;
}
err = idpf_send_enable_vport_msg(adapter, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
vport->vport_id, err);
err = -EAGAIN;
goto disable_queues;
}
idpf_restore_features(vport);
rss_data = &vport_config->user_config.rss_data;
err = idpf_config_rss(vport, rss_data);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
vport->vport_id, err);
goto disable_vport;
}
idpf_up_complete(vport);
if (rtnl)
rtnl_unlock();
return 0;
disable_vport:
idpf_send_disable_vport_msg(adapter, vport_id);
disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
rxq_deinit:
idpf_xdp_rxq_info_deinit_all(rsrc);
intr_deinit:
idpf_vport_intr_deinit(vport, rsrc);
queues_rel:
idpf_vport_queues_rel(vport, rsrc);
intr_rel:
idpf_vport_intr_rel(rsrc);
err_rtnl_unlock:
if (rtnl)
rtnl_unlock();
return err;
}
void idpf_init_task(struct work_struct *work)
{
struct idpf_vport_config *vport_config;
struct idpf_vport_max_q max_q;
struct idpf_adapter *adapter;
struct idpf_vport *vport;
u16 num_default_vports;
struct pci_dev *pdev;
bool default_vport;
int index, err;
adapter = container_of(work, struct idpf_adapter, init_task.work);
num_default_vports = idpf_get_default_vports(adapter);
if (adapter->num_alloc_vports < num_default_vports)
default_vport = true;
else
default_vport = false;
err = idpf_vport_alloc_max_qs(adapter, &max_q);
if (err)
goto unwind_vports;
err = idpf_send_create_vport_msg(adapter, &max_q);
if (err) {
idpf_vport_dealloc_max_qs(adapter, &max_q);
goto unwind_vports;
}
pdev = adapter->pdev;
vport = idpf_vport_alloc(adapter, &max_q);
if (!vport) {
err = -EFAULT;
dev_err(&pdev->dev, "failed to allocate vport: %d\n",
err);
idpf_vport_dealloc_max_qs(adapter, &max_q);
goto unwind_vports;
}
index = vport->idx;
vport_config = adapter->vport_config[index];
spin_lock_init(&vport_config->mac_filter_list_lock);
spin_lock_init(&vport_config->flow_steer_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
err = idpf_check_supported_desc_ids(vport);
if (err) {
dev_err(&pdev->dev, "failed to get required descriptor ids\n");
goto unwind_vports;
}
if (idpf_cfg_netdev(vport))
goto unwind_vports;
if (adapter->num_alloc_vports < num_default_vports) {
queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
return;
}
for (index = 0; index < adapter->max_vports; index++) {
struct net_device *netdev = adapter->netdevs[index];
struct idpf_vport_config *vport_config;
vport_config = adapter->vport_config[index];
if (!netdev ||
test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
continue;
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
index, ERR_PTR(err));
continue;
}
set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
clear_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
return;
unwind_vports:
if (default_vport) {
for (index = 0; index < adapter->max_vports; index++) {
if (adapter->vports[index])
idpf_vport_dealloc(adapter->vports[index]);
}
}
if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
}
idpf_ptp_release(adapter);
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
}
static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
{
struct device *dev = &adapter->pdev->dev;
int err;
err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
if (err) {
dev_err(dev, "Failed to allocate VFs: %d\n", err);
return err;
}
err = pci_enable_sriov(adapter->pdev, num_vfs);
if (err) {
idpf_send_set_sriov_vfs_msg(adapter, 0);
dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
return err;
}
adapter->num_vfs = num_vfs;
return num_vfs;
}
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct idpf_adapter *adapter = pci_get_drvdata(pdev);
if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
return -EOPNOTSUPP;
}
if (num_vfs)
return idpf_sriov_ena(adapter, num_vfs);
if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
return -EBUSY;
}
pci_disable_sriov(adapter->pdev);
idpf_send_set_sriov_vfs_msg(adapter, 0);
adapter->num_vfs = 0;
return 0;
}
void idpf_deinit_task(struct idpf_adapter *adapter)
{
unsigned int i;
cancel_delayed_work_sync(&adapter->init_task);
if (!adapter->vports)
return;
cancel_delayed_work_sync(&adapter->stats_task);
for (i = 0; i < adapter->max_vports; i++) {
if (adapter->vports[i])
idpf_vport_dealloc(adapter->vports[i]);
}
}
static int idpf_check_reset_complete(struct idpf_hw *hw,
struct idpf_reset_reg *reset_reg)
{
struct idpf_adapter *adapter = hw->back;
int i;
for (i = 0; i < 2000; i++) {
u32 reg_val = readl(reset_reg->rstat);
if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
return 0;
usleep_range(5000, 10000);
}
dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
return -EBUSY;
}
static void idpf_init_hard_reset(struct idpf_adapter *adapter)
{
struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
struct device *dev = &adapter->pdev->dev;
int err;
idpf_detach_and_close(adapter);
mutex_lock(&adapter->vport_ctrl_lock);
dev_info(dev, "Device HW Reset initiated\n");
if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
idpf_idc_issue_reset_event(adapter->cdev_info);
idpf_vc_core_deinit(adapter);
if (!is_reset)
reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
} else {
dev_err(dev, "Unhandled hard reset cause\n");
err = -EBADRQC;
goto unlock_mutex;
}
err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
if (err) {
dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
adapter->state);
goto unlock_mutex;
}
err = idpf_init_dflt_mbx(adapter);
if (err) {
dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
goto unlock_mutex;
}
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
err = idpf_vc_core_init(adapter);
if (err) {
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_deinit_dflt_mbx(adapter);
goto unlock_mutex;
}
while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
msleep(100);
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
if (!err) {
idpf_attach_and_open(adapter);
idpf_idc_init(adapter);
}
}
void idpf_vc_event_task(struct work_struct *work)
{
struct idpf_adapter *adapter;
adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return;
if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
goto func_reset;
if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
goto drv_load;
return;
func_reset:
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
drv_load:
set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
idpf_init_hard_reset(adapter);
}
int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_q_vec_rsrc *new_rsrc;
u32 vport_id = vport->vport_id;
struct idpf_vport *new_vport;
int err, tmp_err = 0;
new_vport = kzalloc_obj(*vport);
if (!new_vport)
return -ENOMEM;
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
new_rsrc = &new_vport->dflt_qv_rsrc;
switch (reset_cause) {
case IDPF_SR_Q_CHANGE:
err = idpf_vport_adjust_qs(new_vport, new_rsrc);
if (err)
goto free_vport;
break;
case IDPF_SR_Q_DESC_CHANGE:
idpf_vport_calc_num_q_desc(new_vport, new_rsrc);
break;
case IDPF_SR_MTU_CHANGE:
idpf_idc_vdev_mtu_event(vport->vdev_info,
IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE);
break;
case IDPF_SR_RSC_CHANGE:
break;
default:
dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
err = -EINVAL;
goto free_vport;
}
vport_config = adapter->vport_config[vport->idx];
if (!vport_is_up) {
idpf_send_delete_queues_msg(adapter, &vport_config->qid_reg_info,
vport_id);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
idpf_vport_stop(vport, false);
}
err = idpf_send_add_queues_msg(adapter, vport_config, new_rsrc,
vport_id);
if (err)
goto err_reset;
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport, &vport->dflt_qv_rsrc);
err = idpf_set_real_num_queues(vport);
if (err)
goto err_open;
if (reset_cause == IDPF_SR_Q_CHANGE &&
!netif_is_rxfh_configured(vport->netdev)) {
struct idpf_rss_data *rss_data;
rss_data = &vport_config->user_config.rss_data;
idpf_fill_dflt_rss_lut(vport, rss_data);
}
if (vport_is_up)
err = idpf_vport_open(vport, false);
goto free_vport;
err_reset:
tmp_err = idpf_send_add_queues_msg(adapter, vport_config, rsrc,
vport_id);
err_open:
if (!tmp_err && vport_is_up)
idpf_vport_open(vport, false);
free_vport:
kfree(new_vport);
if (reset_cause == IDPF_SR_MTU_CHANGE)
idpf_idc_vdev_mtu_event(vport->vdev_info,
IIDC_RDMA_EVENT_AFTER_MTU_CHANGE);
return err;
}
static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
return idpf_add_mac_filter(np->vport, np, addr, true);
}
static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
if (ether_addr_equal(addr, netdev->dev_addr))
return 0;
idpf_del_mac_filter(np->vport, np, addr, true);
return 0;
}
static void idpf_set_rx_mode(struct net_device *netdev)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *config_data;
struct idpf_adapter *adapter;
bool changed = false;
struct device *dev;
int err;
adapter = np->adapter;
dev = &adapter->pdev->dev;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
__dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
__dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
}
if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
return;
config_data = &adapter->vport_config[np->vport_idx]->user_config;
if ((netdev->flags & IFF_PROMISC) &&
!test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
changed = true;
dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
dev_info(dev, "Entering multicast promiscuous mode\n");
}
if (!(netdev->flags & IFF_PROMISC) &&
test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
changed = true;
dev_info(dev, "Leaving promiscuous mode\n");
}
if (netdev->flags & IFF_ALLMULTI &&
!test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
changed = true;
dev_info(dev, "Entering multicast promiscuous mode\n");
}
if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
changed = true;
dev_info(dev, "Leaving multicast promiscuous mode\n");
}
if (!changed)
return;
err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
if (err)
dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
}
static int idpf_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
struct idpf_adapter *adapter;
struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
adapter = vport->adapter;
if (idpf_is_reset_in_prog(adapter)) {
dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
err = -EBUSY;
goto unlock_mutex;
}
if (changed & NETIF_F_RXHASH) {
struct idpf_netdev_priv *np = netdev_priv(netdev);
netdev->features ^= NETIF_F_RXHASH;
if (test_bit(IDPF_VPORT_UP, np->state)) {
struct idpf_vport_config *vport_config;
struct idpf_rss_data *rss_data;
vport_config = adapter->vport_config[vport->idx];
rss_data = &vport_config->user_config.rss_data;
err = idpf_config_rss(vport, rss_data);
if (err)
goto unlock_mutex;
}
}
if (changed & NETIF_F_GRO_HW) {
netdev->features ^= NETIF_F_GRO_HW;
err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
if (err)
goto unlock_mutex;
}
if (changed & NETIF_F_LOOPBACK) {
bool loopback_ena;
netdev->features ^= NETIF_F_LOOPBACK;
loopback_ena = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
err = idpf_send_ena_dis_loopback_msg(adapter, vport->vport_id,
loopback_ena);
}
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
return err;
}
static int idpf_open(struct net_device *netdev)
{
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
err = idpf_set_real_num_queues(vport);
if (err)
goto unlock;
err = idpf_vport_open(vport, false);
unlock:
idpf_vport_ctrl_unlock(netdev);
return err;
}
static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
WRITE_ONCE(netdev->mtu, new_mtu);
err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
idpf_vport_ctrl_unlock(netdev);
return err;
}
static bool idpf_chk_tso_segment(const struct sk_buff *skb,
unsigned int max_bufs)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
const skb_frag_t *frag, *stale;
int nr_frags, sum;
nr_frags = shinfo->nr_frags;
if (nr_frags < (max_bufs - 1))
return false;
nr_frags -= max_bufs - 2;
frag = &shinfo->frags[0];
sum = 1 - shinfo->gso_size;
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
for (stale = &shinfo->frags[0];; stale++) {
int stale_size = skb_frag_size(stale);
sum += skb_frag_size(frag++);
if (stale_size > IDPF_TX_MAX_DESC_DATA) {
int align_pad = -(skb_frag_off(stale)) &
(IDPF_TX_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
stale_size -= align_pad;
do {
sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
} while (stale_size > IDPF_TX_MAX_DESC_DATA);
}
if (sum < 0)
return true;
if (!nr_frags--)
break;
sum -= stale_size;
}
return false;
}
static netdev_features_t idpf_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
u16 max_tx_hdr_size = np->max_tx_hdr_size;
size_t len;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
if (skb_is_gso(skb)) {
if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)
features &= ~NETIF_F_GSO_MASK;
else if (idpf_chk_tso_segment(skb, np->tx_max_bufs))
features &= ~NETIF_F_GSO_MASK;
}
len = skb_network_offset(skb);
if (unlikely(len & ~(126)))
goto unsupported;
len = skb_network_header_len(skb);
if (unlikely(len > max_tx_hdr_size))
goto unsupported;
if (!skb->encapsulation)
return features;
len = skb_inner_network_header(skb) - skb_transport_header(skb);
if (unlikely(len & ~(127 * 2)))
goto unsupported;
len = skb_inner_network_header_len(skb);
if (unlikely(len > max_tx_hdr_size))
goto unsupported;
return features;
unsupported:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
static int idpf_set_mac(struct net_device *netdev, void *p)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
struct sockaddr *addr = p;
u8 old_mac_addr[ETH_ALEN];
struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_MACFILTER)) {
dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
err = -EOPNOTSUPP;
goto unlock_mutex;
}
if (!is_valid_ether_addr(addr->sa_data)) {
dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
addr->sa_data);
err = -EADDRNOTAVAIL;
goto unlock_mutex;
}
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
goto unlock_mutex;
ether_addr_copy(old_mac_addr, vport->default_mac_addr);
ether_addr_copy(vport->default_mac_addr, addr->sa_data);
vport_config = vport->adapter->vport_config[vport->idx];
err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
if (err) {
__idpf_del_mac_filter(vport_config, addr->sa_data);
ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
goto unlock_mutex;
}
if (is_valid_ether_addr(old_mac_addr))
__idpf_del_mac_filter(vport_config, old_mac_addr);
eth_hw_addr_set(netdev, addr->sa_data);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
return err;
}
void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
{
struct idpf_adapter *adapter = hw->back;
size_t sz = ALIGN(size, 4096);
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = sz;
return mem->va;
}
void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
{
struct idpf_adapter *adapter = hw->back;
dma_free_attrs(&adapter->pdev->dev, mem->size,
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
mem->size = 0;
mem->va = NULL;
mem->pa = 0;
}
static int idpf_hwtstamp_set(struct net_device *netdev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack)
{
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
if (!vport->link_up) {
idpf_vport_ctrl_unlock(netdev);
return -EPERM;
}
if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
!idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
idpf_vport_ctrl_unlock(netdev);
return -EOPNOTSUPP;
}
err = idpf_ptp_set_timestamp_mode(vport, config);
idpf_vport_ctrl_unlock(netdev);
return err;
}
static int idpf_hwtstamp_get(struct net_device *netdev,
struct kernel_hwtstamp_config *config)
{
struct idpf_vport *vport;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
if (!vport->link_up) {
idpf_vport_ctrl_unlock(netdev);
return -EPERM;
}
if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
!idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
idpf_vport_ctrl_unlock(netdev);
return 0;
}
*config = vport->tstamp_config;
idpf_vport_ctrl_unlock(netdev);
return 0;
}
static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
.ndo_start_xmit = idpf_tx_start,
.ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = idpf_set_mac,
.ndo_change_mtu = idpf_change_mtu,
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
.ndo_hwtstamp_get = idpf_hwtstamp_get,
.ndo_hwtstamp_set = idpf_hwtstamp_set,
.ndo_bpf = idpf_xdp,
.ndo_xdp_xmit = idpf_xdp_xmit,
.ndo_xsk_wakeup = idpf_xsk_wakeup,
};