#ifndef _LINUX_XDP_SOCK_H
#define _LINUX_XDP_SOCK_H
#include <linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/if_xdp.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <net/sock.h>
#define XDP_UMEM_SG_FLAG (1 << 1)
struct net_device;
struct xsk_queue;
struct xdp_buff;
struct xdp_umem {
void *addrs;
u64 size;
u32 headroom;
u32 chunk_size;
u32 chunks;
u32 npgs;
struct user_struct *user;
refcount_t users;
u8 flags;
u8 tx_metadata_len;
bool zc;
struct page **pgs;
int id;
struct list_head xsk_dma_list;
struct work_struct work;
};
struct xsk_map {
struct bpf_map map;
spinlock_t lock;
atomic_t count;
struct xdp_sock __rcu *xsk_map[];
};
struct xdp_sock {
struct sock sk;
struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
bool sg;
enum {
XSK_READY = 0,
XSK_BOUND,
XSK_UNBOUND,
} state;
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
u32 tx_budget_spent;
u64 rx_dropped;
u64 rx_queue_full;
struct sk_buff *skb;
struct list_head map_list;
spinlock_t map_list_lock;
u32 max_tx_budget;
struct mutex mutex;
struct xsk_queue *fq_tmp;
struct xsk_queue *cq_tmp;
};
struct xsk_tx_metadata_ops {
void (*tmo_request_timestamp)(void *priv);
u64 (*tmo_fill_timestamp)(void *priv);
void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv);
void (*tmo_request_launch_time)(u64 launch_time, void *priv);
};
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(struct list_head *flush_list);
INDIRECT_CALLABLE_DECLARE(void xsk_destruct_skb(struct sk_buff *));
static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
struct xsk_tx_metadata_compl *compl)
{
if (!meta)
return;
if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
compl->tx_timestamp = &meta->completion.tx_timestamp;
else
compl->tx_timestamp = NULL;
}
static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
if (!meta)
return;
if (ops->tmo_request_launch_time)
if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
ops->tmo_request_launch_time(meta->request.launch_time,
priv);
if (ops->tmo_request_timestamp)
if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
ops->tmo_request_timestamp(priv);
if (ops->tmo_request_checksum)
if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM)
ops->tmo_request_checksum(meta->request.csum_start,
meta->request.csum_offset, priv);
}
static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
if (!compl)
return;
if (!compl->tx_timestamp)
return;
*compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
}
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -ENOTSUPP;
}
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -EOPNOTSUPP;
}
static inline void __xsk_map_flush(struct list_head *flush_list)
{
}
#ifdef CONFIG_MITIGATION_RETPOLINE
static inline void xsk_destruct_skb(struct sk_buff *skb)
{
}
#endif
static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
struct xsk_tx_metadata_compl *compl)
{
}
static inline void xsk_tx_metadata_request(struct xsk_tx_metadata *meta,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
}
static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
const struct xsk_tx_metadata_ops *ops,
void *priv)
{
}
#endif
#endif