#ifndef _GVE_FBSD_H
#define _GVE_FBSD_H
#include "gve_desc.h"
#include "gve_plat.h"
#include "gve_register.h"
#ifndef PCI_VENDOR_ID_GOOGLE
#define PCI_VENDOR_ID_GOOGLE 0x1ae0
#endif
#define PCI_DEV_ID_GVNIC 0x0042
#define GVE_REGISTER_BAR 0
#define GVE_DOORBELL_BAR 2
#define GVE_TX_MAX_DESCS 4
#define GVE_TX_BUFRING_ENTRIES 4096
#define GVE_TX_TIMEOUT_PKT_SEC 5
#define GVE_TX_TIMEOUT_CHECK_CADENCE_SEC 5
#define GVE_TX_TIMEOUT_MAX_TX_QUEUES 16
#define GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC \
(2 * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC * GVE_TX_TIMEOUT_MAX_TX_QUEUES)
#define GVE_TIMESTAMP_INVALID -1
#define ADMINQ_SIZE PAGE_SIZE
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_4K_RX_BUFFER_SIZE_DQO 4096
#define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
#define GVE_NUM_PTYPES 1024
#define GVE_QPL_DIVISOR 16
#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
_Static_assert(MCLBYTES >= GVE_DEFAULT_RX_BUFFER_SIZE,
"gve: bad MCLBYTES length");
_Static_assert(MJUMPAGESIZE >= GVE_4K_RX_BUFFER_SIZE_DQO,
"gve: bad MJUMPAGESIZE length");
struct gve_dma_handle {
bus_addr_t bus_addr;
void *cpu_addr;
bus_dma_tag_t tag;
bus_dmamap_t map;
};
union gve_tx_desc {
struct gve_tx_pkt_desc pkt;
struct gve_tx_mtd_desc mtd;
struct gve_tx_seg_desc seg;
};
struct gve_tx_iovec {
uint32_t iov_offset;
uint32_t iov_len;
uint32_t iov_padding;
};
struct gve_queue_config {
uint16_t max_queues;
uint16_t num_queues;
};
struct gve_irq_db {
__be32 index;
} __aligned(CACHE_LINE_SIZE);
enum gve_queue_format {
GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
GVE_GQI_RDA_FORMAT = 0x1,
GVE_GQI_QPL_FORMAT = 0x2,
GVE_DQO_RDA_FORMAT = 0x3,
GVE_DQO_QPL_FORMAT = 0x4,
};
enum gve_state_flags_bit {
GVE_STATE_FLAG_ADMINQ_OK,
GVE_STATE_FLAG_RESOURCES_OK,
GVE_STATE_FLAG_QPLREG_OK,
GVE_STATE_FLAG_RX_RINGS_OK,
GVE_STATE_FLAG_TX_RINGS_OK,
GVE_STATE_FLAG_QUEUES_UP,
GVE_STATE_FLAG_LINK_UP,
GVE_STATE_FLAG_DO_RESET,
GVE_STATE_FLAG_IN_RESET,
GVE_NUM_STATE_FLAGS
};
BITSET_DEFINE(gve_state_flags, GVE_NUM_STATE_FLAGS);
#define GVE_DEVICE_STATUS_RESET (0x1 << 1)
#define GVE_DEVICE_STATUS_LINK_STATUS (0x1 << 2)
#define GVE_RING_LOCK(ring) mtx_lock(&(ring)->ring_mtx)
#define GVE_RING_TRYLOCK(ring) mtx_trylock(&(ring)->ring_mtx)
#define GVE_RING_UNLOCK(ring) mtx_unlock(&(ring)->ring_mtx)
#define GVE_RING_ASSERT(ring) mtx_assert(&(ring)->ring_mtx, MA_OWNED)
#define GVE_IFACE_LOCK_INIT(lock) sx_init(&lock, "gve interface lock")
#define GVE_IFACE_LOCK_DESTROY(lock) sx_destroy(&lock)
#define GVE_IFACE_LOCK_LOCK(lock) sx_xlock(&lock)
#define GVE_IFACE_LOCK_UNLOCK(lock) sx_unlock(&lock)
#define GVE_IFACE_LOCK_ASSERT(lock) sx_assert(&lock, SA_XLOCKED)
struct gve_queue_page_list {
uint32_t id;
uint32_t num_dmas;
uint32_t num_pages;
vm_offset_t kva;
vm_page_t *pages;
struct gve_dma_handle *dmas;
};
struct gve_irq {
struct resource *res;
void *cookie;
};
struct gve_rx_slot_page_info {
void *page_address;
vm_page_t page;
uint32_t page_offset;
uint16_t pad;
};
struct gve_rx_ctx {
struct mbuf *mbuf_head;
struct mbuf *mbuf_tail;
uint32_t total_size;
uint8_t frag_cnt;
bool is_tcp;
bool drop_pkt;
};
struct gve_ring_com {
struct gve_priv *priv;
uint32_t id;
struct gve_queue_resources *q_resources;
struct gve_dma_handle q_resources_mem;
uint32_t irq_db_offset;
uint32_t db_offset;
uint32_t counter_idx;
int ntfy_id;
struct gve_queue_page_list *qpl;
struct task cleanup_task;
struct taskqueue *cleanup_tq;
} __aligned(CACHE_LINE_SIZE);
struct gve_rxq_stats {
counter_u64_t rbytes;
counter_u64_t rpackets;
counter_u64_t rx_dropped_pkt;
counter_u64_t rx_copybreak_cnt;
counter_u64_t rx_frag_flip_cnt;
counter_u64_t rx_frag_copy_cnt;
counter_u64_t rx_dropped_pkt_desc_err;
counter_u64_t rx_dropped_pkt_buf_post_fail;
counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
counter_u64_t rx_mbuf_dmamap_err;
counter_u64_t rx_mbuf_mclget_null;
};
#define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
union gve_rx_qpl_buf_id_dqo {
struct {
uint16_t buf_id:11;
uint8_t frag_num:5;
};
uint16_t all;
} __packed;
_Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2,
"gve: bad dqo qpl rx buf id length");
struct gve_rx_buf_dqo {
union {
struct {
struct mbuf *mbuf;
bus_dmamap_t dmamap;
uint64_t addr;
bool mapped;
};
struct {
uint8_t num_nic_frags;
uint8_t next_idx;
STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry;
};
};
SLIST_ENTRY(gve_rx_buf_dqo) slist_entry;
};
struct gve_rx_ring {
struct gve_ring_com com;
struct gve_dma_handle desc_ring_mem;
uint32_t cnt;
uint32_t fill_cnt;
union {
struct {
struct gve_dma_handle data_ring_mem;
struct gve_rx_desc *desc_ring;
union gve_rx_data_slot *data_ring;
struct gve_rx_slot_page_info *page_info;
uint32_t mask;
uint8_t seq_no;
};
struct {
struct gve_dma_handle compl_ring_mem;
struct gve_rx_compl_desc_dqo *compl_ring;
struct gve_rx_desc_dqo *desc_ring;
struct gve_rx_buf_dqo *bufs;
bus_dma_tag_t buf_dmatag;
uint32_t buf_cnt;
uint32_t mask;
uint32_t head;
uint32_t tail;
uint8_t cur_gen_bit;
SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs;
} dqo;
};
struct lro_ctrl lro;
struct gve_rx_ctx ctx;
struct gve_rxq_stats stats;
} __aligned(CACHE_LINE_SIZE);
struct gve_tx_fifo {
vm_offset_t base;
uint32_t size;
volatile int available;
uint32_t head;
};
struct gve_tx_buffer_state {
struct mbuf *mbuf;
int64_t enqueue_time_sec;
struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
};
struct gve_txq_stats {
counter_u64_t tbytes;
counter_u64_t tpackets;
counter_u64_t tso_packet_cnt;
counter_u64_t tx_dropped_pkt;
counter_u64_t tx_delayed_pkt_nospace_device;
counter_u64_t tx_dropped_pkt_nospace_bufring;
counter_u64_t tx_delayed_pkt_nospace_descring;
counter_u64_t tx_delayed_pkt_nospace_compring;
counter_u64_t tx_delayed_pkt_nospace_qpl_bufs;
counter_u64_t tx_delayed_pkt_tsoerr;
counter_u64_t tx_dropped_pkt_vlan;
counter_u64_t tx_mbuf_collapse;
counter_u64_t tx_mbuf_defrag;
counter_u64_t tx_mbuf_defrag_err;
counter_u64_t tx_mbuf_dmamap_enomem_err;
counter_u64_t tx_mbuf_dmamap_err;
counter_u64_t tx_timeout;
};
#define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
struct gve_tx_pending_pkt_dqo {
struct mbuf *mbuf;
int64_t enqueue_time_sec;
union {
bus_dmamap_t dmamap;
struct {
int32_t qpl_buf_head;
uint32_t num_qpl_bufs;
};
};
uint8_t state;
int next;
};
struct gve_tx_ring {
struct gve_ring_com com;
struct gve_dma_handle desc_ring_mem;
struct task xmit_task;
struct taskqueue *xmit_tq;
bool stopped;
struct buf_ring *br;
struct mtx ring_mtx;
uint32_t req;
uint32_t done;
int64_t last_kicked;
union {
struct {
union gve_tx_desc *desc_ring;
struct gve_tx_buffer_state *info;
struct gve_tx_fifo fifo;
uint32_t mask;
};
struct {
struct gve_dma_handle compl_ring_mem;
struct {
union gve_tx_desc_dqo *desc_ring;
uint32_t desc_mask;
uint32_t desc_head;
uint32_t desc_tail;
uint32_t last_re_idx;
int32_t free_pending_pkts_csm;
int32_t free_qpl_bufs_csm;
uint32_t qpl_bufs_consumed;
uint32_t qpl_bufs_produced_cached;
bus_dma_tag_t buf_dmatag;
} __aligned(CACHE_LINE_SIZE);
struct {
struct gve_tx_compl_desc_dqo *compl_ring;
uint32_t compl_mask;
uint32_t compl_head;
uint8_t cur_gen_bit;
uint32_t hw_tx_head;
int32_t free_pending_pkts_prd;
int32_t free_qpl_bufs_prd;
uint32_t qpl_bufs_produced;
} __aligned(CACHE_LINE_SIZE);
struct {
struct gve_tx_pending_pkt_dqo *pending_pkts;
uint16_t num_pending_pkts;
int32_t *qpl_bufs;
} __aligned(CACHE_LINE_SIZE);
} dqo;
};
struct gve_txq_stats stats;
} __aligned(CACHE_LINE_SIZE);
enum gve_packet_state {
GVE_PACKET_STATE_UNALLOCATED,
GVE_PACKET_STATE_FREE,
GVE_PACKET_STATE_PENDING_DATA_COMPL,
};
struct gve_ptype {
uint8_t l3_type;
uint8_t l4_type;
};
struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
struct gve_priv {
if_t ifp;
device_t dev;
struct ifmedia media;
uint8_t mac[ETHER_ADDR_LEN];
struct gve_dma_handle aq_mem;
struct resource *reg_bar;
struct resource *db_bar;
struct resource *msix_table;
uint32_t mgmt_msix_idx;
uint32_t rx_copybreak;
uint16_t num_event_counters;
uint16_t default_num_queues;
uint16_t tx_desc_cnt;
uint16_t max_tx_desc_cnt;
uint16_t min_tx_desc_cnt;
uint16_t rx_desc_cnt;
uint16_t max_rx_desc_cnt;
uint16_t min_rx_desc_cnt;
uint16_t rx_pages_per_qpl;
uint64_t max_registered_pages;
uint64_t num_registered_pages;
uint32_t supported_features;
uint16_t max_mtu;
bool modify_ringsize_enabled;
struct gve_dma_handle counter_array_mem;
__be32 *counters;
struct gve_dma_handle irqs_db_mem;
struct gve_irq_db *irq_db_indices;
enum gve_queue_format queue_format;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
uint32_t num_queues;
struct gve_irq *irq_tbl;
struct gve_tx_ring *tx;
struct gve_rx_ring *rx;
struct gve_ptype_lut *ptype_lut_dqo;
struct gve_adminq_command *adminq;
vm_paddr_t adminq_bus_addr;
uint32_t adminq_mask;
uint32_t adminq_prod_cnt;
uint32_t adminq_cmd_fail;
uint32_t adminq_timeouts;
uint32_t adminq_describe_device_cnt;
uint32_t adminq_cfg_device_resources_cnt;
uint32_t adminq_register_page_list_cnt;
uint32_t adminq_unregister_page_list_cnt;
uint32_t adminq_create_tx_queue_cnt;
uint32_t adminq_create_rx_queue_cnt;
uint32_t adminq_destroy_tx_queue_cnt;
uint32_t adminq_destroy_rx_queue_cnt;
uint32_t adminq_dcfg_device_resources_cnt;
uint32_t adminq_set_driver_parameter_cnt;
uint32_t adminq_verify_driver_compatibility_cnt;
uint32_t adminq_get_ptype_map_cnt;
uint32_t interface_up_cnt;
uint32_t interface_down_cnt;
uint32_t reset_cnt;
struct task service_task;
struct taskqueue *service_tq;
struct gve_state_flags state_flags;
struct sx gve_iface_lock;
struct callout tx_timeout_service;
uint16_t check_tx_queue_idx;
uint16_t rx_buf_size_dqo;
};
static inline bool
gve_get_state_flag(struct gve_priv *priv, int pos)
{
return (BIT_ISSET(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags));
}
static inline void
gve_set_state_flag(struct gve_priv *priv, int pos)
{
BIT_SET_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
}
static inline void
gve_clear_state_flag(struct gve_priv *priv, int pos)
{
BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
}
static inline bool
gve_is_gqi(struct gve_priv *priv)
{
return (priv->queue_format == GVE_GQI_QPL_FORMAT);
}
static inline bool
gve_is_qpl(struct gve_priv *priv)
{
return (priv->queue_format == GVE_GQI_QPL_FORMAT ||
priv->queue_format == GVE_DQO_QPL_FORMAT);
}
static inline bool
gve_is_4k_rx_buf(struct gve_priv *priv)
{
return (priv->rx_buf_size_dqo == GVE_4K_RX_BUFFER_SIZE_DQO);
}
static inline bus_size_t
gve_rx_dqo_mbuf_segment_size(struct gve_priv *priv)
{
return (gve_is_4k_rx_buf(priv) ? MJUMPAGESIZE : MCLBYTES);
}
void gve_schedule_reset(struct gve_priv *priv);
int gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
int gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
int gve_adjust_ring_sizes(struct gve_priv *priv, uint16_t new_desc_cnt, bool is_rx);
uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
struct gve_queue_page_list *gve_alloc_qpl(struct gve_priv *priv, uint32_t id,
int npages, bool single_kva);
void gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl);
int gve_register_qpls(struct gve_priv *priv);
int gve_unregister_qpls(struct gve_priv *priv);
void gve_mextadd_free(struct mbuf *mbuf);
int gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
void gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
int gve_create_tx_rings(struct gve_priv *priv);
int gve_destroy_tx_rings(struct gve_priv *priv);
int gve_check_tx_timeout_gqi(struct gve_priv *priv, struct gve_tx_ring *tx);
int gve_tx_intr(void *arg);
int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
void gve_qflush(if_t ifp);
void gve_xmit_tq(void *arg, int pending);
void gve_tx_cleanup_tq(void *arg, int pending);
int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i);
void gve_tx_free_ring_dqo(struct gve_priv *priv, int i);
void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i);
int gve_check_tx_timeout_dqo(struct gve_priv *priv, struct gve_tx_ring *tx);
int gve_tx_intr_dqo(void *arg);
int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr);
int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf);
void gve_tx_cleanup_tq_dqo(void *arg, int pending);
int gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
void gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
int gve_create_rx_rings(struct gve_priv *priv);
int gve_destroy_rx_rings(struct gve_priv *priv);
int gve_rx_intr(void *arg);
void gve_rx_cleanup_tq(void *arg, int pending);
int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i);
void gve_rx_free_ring_dqo(struct gve_priv *priv, int i);
void gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx);
void gve_clear_rx_ring_dqo(struct gve_priv *priv, int i);
int gve_rx_intr_dqo(void *arg);
void gve_rx_cleanup_tq_dqo(void *arg, int pending);
int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
struct gve_dma_handle *dma);
void gve_dma_free_coherent(struct gve_dma_handle *dma);
int gve_dmamap_create(struct gve_priv *priv, int size, int align,
struct gve_dma_handle *dma);
void gve_dmamap_destroy(struct gve_dma_handle *dma);
void gve_free_irqs(struct gve_priv *priv);
int gve_alloc_irqs(struct gve_priv *priv);
void gve_unmask_all_queue_irqs(struct gve_priv *priv);
void gve_mask_all_queue_irqs(struct gve_priv *priv);
void gve_invalidate_timestamp(int64_t *timestamp_sec);
int64_t gve_seconds_since(int64_t *timestamp_sec);
void gve_set_timestamp(int64_t *timestamp_sec);
bool gve_timestamp_valid(int64_t *timestamp_sec);
extern bool gve_disable_hw_lro;
extern bool gve_allow_4k_rx_buffers;
extern char gve_queue_format[8];
extern char gve_version[8];
void gve_setup_sysctl(struct gve_priv *priv);
void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
uint64_t *tbytes, uint64_t *tx_dropped_pkt);
void gve_alloc_counters(counter_u64_t *stat, int num_stats);
void gve_free_counters(counter_u64_t *stat, int num_stats);
#endif