#ifndef IO_URING_TYPES_H
#define IO_URING_TYPES_H
#include <linux/blkdev.h>
#include <linux/hashtable.h>
#include <linux/task_work.h>
#include <linux/bitmap.h>
#include <linux/llist.h>
#include <uapi/linux/io_uring.h>
enum {
IOU_F_TWQ_LAZY_WAKE = 1,
};
enum io_uring_cmd_flags {
IO_URING_F_COMPLETE_DEFER = 1,
IO_URING_F_UNLOCKED = 2,
IO_URING_F_MULTISHOT = 4,
IO_URING_F_IOWQ = 8,
IO_URING_F_INLINE = 16,
IO_URING_F_NONBLOCK = INT_MIN,
IO_URING_F_SQE128 = (1 << 8),
IO_URING_F_CQE32 = (1 << 9),
IO_URING_F_IOPOLL = (1 << 10),
IO_URING_F_CANCEL = (1 << 11),
IO_URING_F_COMPAT = (1 << 12),
};
struct io_wq_work_node {
struct io_wq_work_node *next;
};
struct io_wq_work_list {
struct io_wq_work_node *first;
struct io_wq_work_node *last;
};
struct io_wq_work {
struct io_wq_work_node list;
atomic_t flags;
int cancel_seq;
};
struct io_rsrc_data {
unsigned int nr;
struct io_rsrc_node **nodes;
};
struct io_file_table {
struct io_rsrc_data data;
unsigned long *bitmap;
unsigned int alloc_hint;
};
struct io_hash_bucket {
struct hlist_head list;
} ____cacheline_aligned_in_smp;
struct io_hash_table {
struct io_hash_bucket *hbs;
unsigned hash_bits;
};
struct io_mapped_region {
struct page **pages;
void *ptr;
unsigned nr_pages;
unsigned flags;
};
struct io_br_sel {
struct io_buffer_list *buf_list;
union {
void __user *addr;
ssize_t val;
};
};
#define IO_RINGFD_REG_MAX 16
struct io_uring_task {
int cached_refs;
const struct io_ring_ctx *last;
struct task_struct *task;
struct io_wq *io_wq;
struct file *registered_rings[IO_RINGFD_REG_MAX];
struct xarray xa;
struct wait_queue_head wait;
atomic_t in_cancel;
atomic_t inflight_tracked;
struct percpu_counter inflight;
struct {
struct llist_head task_list;
struct callback_head task_work;
} ____cacheline_aligned_in_smp;
};
struct iou_vec {
union {
struct iovec *iovec;
struct bio_vec *bvec;
};
unsigned nr;
};
struct io_uring {
u32 head;
u32 tail;
};
struct io_rings {
struct io_uring sq, cq;
u32 sq_ring_mask, cq_ring_mask;
u32 sq_ring_entries, cq_ring_entries;
u32 sq_dropped;
atomic_t sq_flags;
u32 cq_flags;
u32 cq_overflow;
struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
struct io_bpf_filter;
struct io_bpf_filters {
refcount_t refs;
spinlock_t lock;
struct io_bpf_filter __rcu **filters;
struct rcu_head rcu_head;
};
struct io_restriction {
DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
struct io_bpf_filters *bpf_filters;
bool bpf_filters_cow;
u8 sqe_flags_allowed;
u8 sqe_flags_required;
bool op_registered;
bool reg_registered;
};
struct io_submit_link {
struct io_kiocb *head;
struct io_kiocb *last;
};
struct io_submit_state {
struct io_wq_work_node free_list;
struct io_wq_work_list compl_reqs;
struct io_submit_link link;
bool plug_started;
bool need_plug;
bool cq_flush;
unsigned short submit_nr;
struct blk_plug plug;
};
struct io_alloc_cache {
void **entries;
unsigned int nr_cached;
unsigned int max_cached;
unsigned int elem_size;
unsigned int init_clear;
};
struct io_ring_ctx {
struct {
unsigned int flags;
unsigned int drain_next: 1;
unsigned int op_restricted: 1;
unsigned int reg_restricted: 1;
unsigned int off_timeout_used: 1;
unsigned int drain_active: 1;
unsigned int has_evfd: 1;
unsigned int task_complete: 1;
unsigned int lockless_cq: 1;
unsigned int syscall_iopoll: 1;
unsigned int poll_activated: 1;
unsigned int drain_disabled: 1;
unsigned int compat: 1;
unsigned int iowq_limits_set : 1;
struct task_struct *submitter_task;
struct io_rings *rings;
struct io_bpf_filter __rcu **bpf_filters;
struct percpu_ref refs;
clockid_t clockid;
enum tk_offsets clock_offset;
enum task_work_notify_mode notify_method;
unsigned sq_thread_idle;
} ____cacheline_aligned_in_smp;
struct {
struct mutex uring_lock;
u32 *sq_array;
struct io_uring_sqe *sq_sqes;
unsigned cached_sq_head;
unsigned sq_entries;
atomic_t cancel_seq;
bool poll_multi_queue;
struct list_head iopoll_list;
struct io_file_table file_table;
struct io_rsrc_data buf_table;
struct io_alloc_cache node_cache;
struct io_alloc_cache imu_cache;
struct io_submit_state submit_state;
struct xarray io_bl_xa;
struct io_hash_table cancel_table;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
struct io_alloc_cache rw_cache;
struct io_alloc_cache cmd_cache;
struct hlist_head cancelable_uring_cmd;
u64 hybrid_poll_time;
} ____cacheline_aligned_in_smp;
struct {
struct io_uring_cqe *cqe_cached;
struct io_uring_cqe *cqe_sentinel;
unsigned cached_cq_tail;
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
void *cq_wait_arg;
size_t cq_wait_size;
} ____cacheline_aligned_in_smp;
struct {
struct io_rings __rcu *rings_rcu;
struct llist_head work_llist;
struct llist_head retry_llist;
unsigned long check_cq;
atomic_t cq_wait_nr;
atomic_t cq_timeouts;
struct wait_queue_head cq_wait;
} ____cacheline_aligned_in_smp;
struct {
raw_spinlock_t timeout_lock;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
spinlock_t completion_lock;
struct list_head cq_overflow_list;
struct hlist_head waitid_list;
#ifdef CONFIG_FUTEX
struct hlist_head futex_list;
struct io_alloc_cache futex_cache;
#endif
const struct cred *sq_creds;
struct io_sq_data *sq_data;
struct wait_queue_head sqo_sq_wait;
struct list_head sqd_list;
unsigned int file_alloc_start;
unsigned int file_alloc_end;
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
struct xarray zcrx_ctxs;
u32 pers_next;
struct xarray personalities;
struct io_wq_hash *hash_map;
struct user_struct *user;
struct mm_struct *mm_account;
struct list_head tctx_list;
struct mutex tctx_lock;
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct completion ref_comp;
u32 iowq_limits[2];
struct callback_head poll_wq_task_work;
struct list_head defer_list;
unsigned nr_drained;
unsigned nr_req_allocated;
#ifdef CONFIG_NET_RX_BUSY_POLL
struct list_head napi_list;
spinlock_t napi_lock;
ktime_t napi_busy_poll_dt;
bool napi_prefer_busy_poll;
u8 napi_track_mode;
DECLARE_HASHTABLE(napi_ht, 4);
#endif
struct mutex mmap_lock;
struct io_mapped_region sq_region;
struct io_mapped_region ring_region;
struct io_mapped_region param_region;
};
struct io_tw_state {
bool cancel;
};
typedef struct io_tw_state io_tw_token_t;
enum {
REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
REQ_F_FAIL_BIT = 8,
REQ_F_INFLIGHT_BIT,
REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_POLLED_BIT,
REQ_F_HYBRID_IOPOLL_STATE_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_BUFFER_RING_BIT,
REQ_F_REISSUE_BIT,
REQ_F_CREDS_BIT,
REQ_F_REFCOUNT_BIT,
REQ_F_ARM_LTIMEOUT_BIT,
REQ_F_ASYNC_DATA_BIT,
REQ_F_SKIP_LINK_CQES_BIT,
REQ_F_SINGLE_POLL_BIT,
REQ_F_DOUBLE_POLL_BIT,
REQ_F_MULTISHOT_BIT,
REQ_F_APOLL_MULTISHOT_BIT,
REQ_F_CLEAR_POLLIN_BIT,
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
REQ_F_POLL_NO_LAZY_BIT,
REQ_F_CAN_POLL_BIT,
REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
REQ_F_BUFFERS_COMMIT_BIT,
REQ_F_BUF_NODE_BIT,
REQ_F_BUF_MORE_BIT,
REQ_F_HAS_METADATA_BIT,
REQ_F_IMPORT_BUFFER_BIT,
REQ_F_SQE_COPIED_BIT,
__REQ_F_LAST_BIT,
};
typedef u64 __bitwise io_req_flags_t;
#define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno)))
enum {
REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT),
REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT),
REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT),
REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT),
REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT),
REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
REQ_F_BUF_MORE = IO_REQ_FLAG(REQ_F_BUF_MORE_BIT),
REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
};
struct io_tw_req {
struct io_kiocb *req;
};
typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw);
struct io_task_work {
struct llist_node node;
io_req_tw_func_t func;
};
struct io_cqe {
__u64 user_data;
__s32 res;
union {
__u32 flags;
int fd;
};
};
struct io_cmd_data {
struct file *file;
__u8 data[56];
};
static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
{
BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
}
#define io_kiocb_to_cmd(req, cmd_type) ( \
io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
((cmd_type *)&(req)->cmd) \
)
static inline struct io_kiocb *cmd_to_io_kiocb(void *ptr)
{
return ptr;
}
struct io_kiocb {
union {
struct file *file;
struct io_cmd_data cmd;
};
u8 opcode;
u8 iopoll_completed;
u16 buf_index;
unsigned nr_tw;
io_req_flags_t flags;
struct io_cqe cqe;
struct io_ring_ctx *ctx;
struct io_uring_task *tctx;
union {
struct io_buffer *kbuf;
struct io_rsrc_node *buf_node;
};
union {
struct io_wq_work_node comp_list;
__poll_t apoll_events;
};
struct io_rsrc_node *file_node;
atomic_t refs;
bool cancel_seq_set;
union {
struct io_task_work io_task_work;
u64 iopoll_start;
};
union {
struct hlist_node hash_node;
struct list_head iopoll_node;
struct rcu_head rcu_head;
};
struct async_poll *apoll;
void *async_data;
atomic_t poll_refs;
struct io_kiocb *link;
const struct cred *creds;
struct io_wq_work work;
struct io_big_cqe {
u64 extra1;
u64 extra2;
} big_cqe;
};
struct io_overflow_cqe {
struct list_head list;
struct io_uring_cqe cqe;
};
#endif