Symbol: sqe
block/ioctl.c
956
const struct io_uring_sqe *sqe = cmd->sqe;
block/ioctl.c
960
if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
block/ioctl.c
961
sqe->rw_flags || sqe->file_index))
block/ioctl.c
967
start = READ_ONCE(sqe->addr);
block/ioctl.c
968
len = READ_ONCE(sqe->addr3);
drivers/block/ublk_drv.c
2980
buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
drivers/block/ublk_drv.c
3258
const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe,
drivers/block/ublk_drv.c
3617
.uaddr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)),
drivers/block/ublk_drv.c
3698
.uaddr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)),
drivers/block/ublk_drv.c
3837
const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe,
drivers/block/ublk_drv.c
3867
const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe,
drivers/block/ublk_drv.c
5268
const struct ublksrv_ctrl_cmd *ub_src = io_uring_sqe128_cmd(cmd->sqe,
drivers/block/ublk_drv.c
717
fcmd->buf_group = READ_ONCE(cmd->sqe->buf_index);
drivers/crypto/hisilicon/debugfs.c
325
void *sqe;
drivers/crypto/hisilicon/debugfs.c
332
sqe = kzalloc(qm->sqe_size, GFP_KERNEL);
drivers/crypto/hisilicon/debugfs.c
333
if (!sqe)
drivers/crypto/hisilicon/debugfs.c
337
memcpy(sqe, qp->sqe + sqe_id * qm->sqe_size, qm->sqe_size);
drivers/crypto/hisilicon/debugfs.c
338
memset(sqe + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
drivers/crypto/hisilicon/debugfs.c
341
dump_show(qm, sqe, qm->sqe_size, name);
drivers/crypto/hisilicon/debugfs.c
343
kfree(sqe);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
1404
struct hpre_sqe *sqe = &req->req;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
1407
dma = le64_to_cpu(sqe->in);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
1414
dma = le64_to_cpu(sqe->out);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
245
struct hpre_sqe *sqe = &req->req;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
248
tmp = le64_to_cpu(sqe->in);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
259
tmp = le64_to_cpu(sqe->out);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
273
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
drivers/crypto/hisilicon/hpre/hpre_crypto.c
283
*kreq = (void *)le64_to_cpu(sqe->tag);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
285
err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
drivers/crypto/hisilicon/hpre/hpre_crypto.c
287
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
drivers/crypto/hisilicon/hpre/hpre_crypto.c
292
alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
371
struct hpre_sqe *sqe = resp;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
373
h_req = (struct hpre_asym_request *)le64_to_cpu(sqe->tag);
drivers/crypto/hisilicon/hpre/hpre_crypto.c
64
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
drivers/crypto/hisilicon/qm.c
1004
qp->req_cb(qp, qp->sqe + qm->sqe_size *
drivers/crypto/hisilicon/qm.c
2040
return qp->sqe + sq_tail * qp->qm->sqe_size;
drivers/crypto/hisilicon/qm.c
2296
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
drivers/crypto/hisilicon/qm.c
2448
void *sqe;
drivers/crypto/hisilicon/qm.c
2459
sqe = qm_get_avail_sqe(qp);
drivers/crypto/hisilicon/qm.c
2460
if (!sqe) {
drivers/crypto/hisilicon/qm.c
2467
memcpy(sqe, msg, qp->qm->sqe_size);
drivers/crypto/hisilicon/qm.c
3042
qp->sqe = qp->qdma.va;
drivers/crypto/hisilicon/sec2/sec_crypto.c
291
const struct sec_sqe *sqe = qp->msg[qp->qp_status.cq_head];
drivers/crypto/hisilicon/sec2/sec_crypto.c
292
struct sec_req *req = container_of(sqe, struct sec_req, sec_sqe);
drivers/crypto/hisilicon/zip/zip_crypto.c
176
static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
drivers/crypto/hisilicon/zip/zip_crypto.c
178
sqe->source_addr_l = lower_32_bits(req->dma_src);
drivers/crypto/hisilicon/zip/zip_crypto.c
179
sqe->source_addr_h = upper_32_bits(req->dma_src);
drivers/crypto/hisilicon/zip/zip_crypto.c
180
sqe->dest_addr_l = lower_32_bits(req->dma_dst);
drivers/crypto/hisilicon/zip/zip_crypto.c
181
sqe->dest_addr_h = upper_32_bits(req->dma_dst);
drivers/crypto/hisilicon/zip/zip_crypto.c
184
static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
drivers/crypto/hisilicon/zip/zip_crypto.c
188
sqe->input_data_length = a_req->slen;
drivers/crypto/hisilicon/zip/zip_crypto.c
189
sqe->dest_avail_out = a_req->dlen;
drivers/crypto/hisilicon/zip/zip_crypto.c
192
static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
drivers/crypto/hisilicon/zip/zip_crypto.c
196
val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
drivers/crypto/hisilicon/zip/zip_crypto.c
198
sqe->dw9 = val;
drivers/crypto/hisilicon/zip/zip_crypto.c
201
static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
drivers/crypto/hisilicon/zip/zip_crypto.c
205
val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
drivers/crypto/hisilicon/zip/zip_crypto.c
207
sqe->dw9 = val;
drivers/crypto/hisilicon/zip/zip_crypto.c
210
static void hisi_zip_fill_win_size(struct hisi_zip_sqe *sqe, u8 win_size)
drivers/crypto/hisilicon/zip/zip_crypto.c
214
val = sqe->dw9 & ~HZIP_WIN_SIZE_M;
drivers/crypto/hisilicon/zip/zip_crypto.c
216
sqe->dw9 = val;
drivers/crypto/hisilicon/zip/zip_crypto.c
219
static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
drivers/crypto/hisilicon/zip/zip_crypto.c
221
sqe->dw26 = lower_32_bits((u64)req);
drivers/crypto/hisilicon/zip/zip_crypto.c
222
sqe->dw27 = upper_32_bits((u64)req);
drivers/crypto/hisilicon/zip/zip_crypto.c
225
static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
drivers/crypto/hisilicon/zip/zip_crypto.c
229
val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
drivers/crypto/hisilicon/zip/zip_crypto.c
231
sqe->dw7 = val;
drivers/crypto/hisilicon/zip/zip_crypto.c
234
static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
drivers/crypto/hisilicon/zip/zip_crypto.c
239
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
drivers/crypto/hisilicon/zip/zip_crypto.c
241
ops->fill_addr(sqe, req);
drivers/crypto/hisilicon/zip/zip_crypto.c
242
ops->fill_buf_size(sqe, req);
drivers/crypto/hisilicon/zip/zip_crypto.c
243
ops->fill_buf_type(sqe, HZIP_SGL);
drivers/crypto/hisilicon/zip/zip_crypto.c
244
ops->fill_req_type(sqe, req_type);
drivers/crypto/hisilicon/zip/zip_crypto.c
245
ops->fill_win_size(sqe, HZIP_16K_WINSZ);
drivers/crypto/hisilicon/zip/zip_crypto.c
246
ops->fill_tag(sqe, req);
drivers/crypto/hisilicon/zip/zip_crypto.c
247
ops->fill_sqe_type(sqe, ops->sqe_type);
drivers/crypto/hisilicon/zip/zip_crypto.c
304
static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
drivers/crypto/hisilicon/zip/zip_crypto.c
306
return sqe->dw3 & HZIP_BD_STATUS_M;
drivers/crypto/hisilicon/zip/zip_crypto.c
309
static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
drivers/crypto/hisilicon/zip/zip_crypto.c
311
return sqe->produced;
drivers/crypto/hisilicon/zip/zip_crypto.c
316
struct hisi_zip_sqe *sqe = data;
drivers/crypto/hisilicon/zip/zip_crypto.c
317
struct hisi_zip_req *req = (struct hisi_zip_req *)GET_REQ_FROM_SQE(sqe);
drivers/crypto/hisilicon/zip/zip_crypto.c
327
status = ops->get_status(sqe);
drivers/crypto/hisilicon/zip/zip_crypto.c
331
sqe->produced);
drivers/crypto/hisilicon/zip/zip_crypto.c
339
acomp_req->dlen = ops->get_dstlen(sqe);
drivers/crypto/hisilicon/zip/zip_crypto.c
46
#define GET_REQ_FROM_SQE(sqe) ((u64)(sqe)->dw26 | (u64)(sqe)->dw27 << 32)
drivers/crypto/hisilicon/zip/zip_crypto.c
79
void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
drivers/crypto/hisilicon/zip/zip_crypto.c
80
void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
drivers/crypto/hisilicon/zip/zip_crypto.c
81
void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
drivers/crypto/hisilicon/zip/zip_crypto.c
82
void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
drivers/crypto/hisilicon/zip/zip_crypto.c
83
void (*fill_win_size)(struct hisi_zip_sqe *sqe, u8 win_size);
drivers/crypto/hisilicon/zip/zip_crypto.c
84
void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
drivers/crypto/hisilicon/zip/zip_crypto.c
85
void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
drivers/crypto/hisilicon/zip/zip_crypto.c
86
u32 (*get_status)(struct hisi_zip_sqe *sqe);
drivers/crypto/hisilicon/zip/zip_crypto.c
87
u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
drivers/dma/hisi_dma.c
141
struct hisi_dma_sqe sqe;
drivers/dma/hisi_dma.c
492
desc->sqe.length = cpu_to_le32(len);
drivers/dma/hisi_dma.c
493
desc->sqe.src_addr = cpu_to_le64(src);
drivers/dma/hisi_dma.c
494
desc->sqe.dst_addr = cpu_to_le64(dst);
drivers/dma/hisi_dma.c
508
struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail;
drivers/dma/hisi_dma.c
522
memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
drivers/dma/hisi_dma.c
525
sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
drivers/dma/hisi_dma.c
526
sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1936
struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1940
sqe->wqe_type = wqe->type;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1941
sqe->flags = wqe->flags;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1942
sqe->wqe_size = wqe_sz;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1943
sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1944
sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1945
sqe->length = cpu_to_le32(data_len);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1957
struct sq_send_hdr *sqe = base_hdr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1959
sqe->wqe_type = wqe->type;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1960
sqe->flags = wqe->flags;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1961
sqe->wqe_size = wqe_sz;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1962
sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1965
sqe->q_key = cpu_to_le32(wqe->send.q_key);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1966
sqe->length = cpu_to_le32(data_len);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1974
sqe->length = cpu_to_le32(data_len);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1988
struct sq_rdma_hdr *sqe = base_hdr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1990
sqe->wqe_type = wqe->type;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1991
sqe->flags = wqe->flags;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1992
sqe->wqe_size = wqe_sz;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1993
sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1994
sqe->length = cpu_to_le32((u32)data_len);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2008
struct sq_atomic_hdr *sqe = base_hdr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2010
sqe->wqe_type = wqe->type;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2011
sqe->flags = wqe->flags;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2012
sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2013
sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2025
struct sq_localinvalidate *sqe = base_hdr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2027
sqe->wqe_type = wqe->type;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2028
sqe->flags = wqe->flags;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2029
sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2036
struct sq_fr_pmr_hdr *sqe = base_hdr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2038
sqe->wqe_type = wqe->type;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2039
sqe->flags = wqe->flags;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2040
sqe->access_cntl = wqe->frmr.access_cntl |
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2042
sqe->zero_based_page_size_log =
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2046
sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2048
memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2049
sqe->numlevels_pbl_page_size_log =
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2069
struct sq_bind_hdr *sqe = base_hdr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2071
sqe->wqe_type = wqe->type;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2072
sqe->flags = wqe->flags;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2073
sqe->access_cntl = wqe->bind.access_cntl;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2074
sqe->mw_type_zero_based = wqe->bind.mw_type |
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2076
sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2077
sqe->l_key = cpu_to_le32(wqe->bind.r_key);
drivers/infiniband/hw/cxgb4/restrack.c
100
if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
drivers/infiniband/hw/cxgb4/restrack.c
102
if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
drivers/infiniband/hw/cxgb4/restrack.c
104
if (sqe->complete &&
drivers/infiniband/hw/cxgb4/restrack.c
105
rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
drivers/infiniband/hw/cxgb4/restrack.c
107
if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
drivers/infiniband/hw/cxgb4/restrack.c
109
if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
drivers/infiniband/hw/cxgb4/restrack.c
96
struct t4_swsqe *sqe)
drivers/infiniband/hw/erdma/erdma_cmdq.c
287
u64 *sqe;
drivers/infiniband/hw/erdma/erdma_cmdq.c
299
sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
drivers/infiniband/hw/erdma/erdma_cmdq.c
301
ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
drivers/infiniband/hw/erdma/erdma_qp.c
403
static void init_send_sqe_rc(struct erdma_qp *qp, struct erdma_send_sqe_rc *sqe,
drivers/infiniband/hw/erdma/erdma_qp.c
410
sqe->imm_data = wr->ex.imm_data;
drivers/infiniband/hw/erdma/erdma_qp.c
413
sqe->invalid_stag = cpu_to_le32(wr->ex.invalidate_rkey);
drivers/infiniband/hw/erdma/erdma_qp.c
419
static void init_send_sqe_ud(struct erdma_qp *qp, struct erdma_send_sqe_ud *sqe,
drivers/infiniband/hw/erdma/erdma_qp.c
428
sqe->imm_data = wr->ex.imm_data;
drivers/infiniband/hw/erdma/erdma_qp.c
433
sqe->ahn = cpu_to_le32(ah->ahn);
drivers/infiniband/hw/erdma/erdma_qp.c
434
sqe->dst_qpn = cpu_to_le32(uwr->remote_qpn);
drivers/infiniband/hw/erdma/erdma_qp.c
437
sqe->qkey = cpu_to_le32(qp->attrs.rocev2.qkey);
drivers/infiniband/hw/erdma/erdma_qp.c
439
sqe->qkey = cpu_to_le32(uwr->remote_qkey);
drivers/infiniband/sw/siw/siw.h
189
struct siw_sqe sqe;
drivers/infiniband/sw/siw/siw.h
475
#define tx_type(wqe) ((wqe)->sqe.opcode)
drivers/infiniband/sw/siw/siw.h
477
#define tx_flags(wqe) ((wqe)->sqe.flags)
drivers/infiniband/sw/siw/siw.h
521
void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
drivers/infiniband/sw/siw/siw.h
522
int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
drivers/infiniband/sw/siw/siw.h
626
struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
drivers/infiniband/sw/siw/siw.h
628
return READ_ONCE(sqe->flags) == 0;
drivers/infiniband/sw/siw/siw.h
633
struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
drivers/infiniband/sw/siw/siw.h
635
if (READ_ONCE(sqe->flags) & SIW_WQE_VALID)
drivers/infiniband/sw/siw/siw.h
636
return sqe;
drivers/infiniband/sw/siw/siw_mem.c
239
if (!(wqe->sqe.flags & SIW_WQE_INLINE))
drivers/infiniband/sw/siw/siw_mem.c
240
siw_unref_mem_sgl(wqe->mem, wqe->sqe.num_sge);
drivers/infiniband/sw/siw/siw_qp.c
1031
int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
drivers/infiniband/sw/siw/siw_qp.c
1038
u32 sqe_flags = sqe->flags;
drivers/infiniband/sw/siw/siw_qp.c
1051
cqe->id = sqe->id;
drivers/infiniband/sw/siw/siw_qp.c
1052
cqe->opcode = sqe->opcode;
drivers/infiniband/sw/siw/siw_qp.c
1065
smp_store_mb(sqe->flags, 0);
drivers/infiniband/sw/siw/siw_qp.c
1084
smp_store_mb(sqe->flags, 0);
drivers/infiniband/sw/siw/siw_qp.c
1161
struct siw_sqe *sqe;
drivers/infiniband/sw/siw/siw_qp.c
1169
sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size];
drivers/infiniband/sw/siw/siw_qp.c
1170
if (!READ_ONCE(sqe->flags))
drivers/infiniband/sw/siw/siw_qp.c
1173
if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
drivers/infiniband/sw/siw/siw_qp.c
1176
WRITE_ONCE(sqe->flags, 0);
drivers/infiniband/sw/siw/siw_qp.c
1196
siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
drivers/infiniband/sw/siw/siw_qp.c
1205
sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
drivers/infiniband/sw/siw/siw_qp.c
1206
if (!READ_ONCE(sqe->flags))
drivers/infiniband/sw/siw/siw_qp.c
1210
if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0)
drivers/infiniband/sw/siw/siw_qp.c
1217
WRITE_ONCE(sqe->flags, 0);
drivers/infiniband/sw/siw/siw_qp.c
1254
siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR);
drivers/infiniband/sw/siw/siw_qp.c
251
wqe->sqe.flags = 0;
drivers/infiniband/sw/siw/siw_qp.c
252
wqe->sqe.num_sge = 1;
drivers/infiniband/sw/siw/siw_qp.c
253
wqe->sqe.sge[0].length = 0;
drivers/infiniband/sw/siw/siw_qp.c
254
wqe->sqe.sge[0].laddr = 0;
drivers/infiniband/sw/siw/siw_qp.c
255
wqe->sqe.sge[0].lkey = 0;
drivers/infiniband/sw/siw/siw_qp.c
260
wqe->sqe.rkey = 1;
drivers/infiniband/sw/siw/siw_qp.c
261
wqe->sqe.raddr = 0;
drivers/infiniband/sw/siw/siw_qp.c
265
wqe->sqe.opcode = SIW_OP_WRITE;
drivers/infiniband/sw/siw/siw_qp.c
269
wqe->sqe.opcode = SIW_OP_READ;
drivers/infiniband/sw/siw/siw_qp.c
276
siw_read_to_orq(rreq, &wqe->sqe);
drivers/infiniband/sw/siw/siw_qp.c
456
rreq->ddp_msn = htonl(wqe->sqe.sge[0].length);
drivers/infiniband/sw/siw/siw_qp.c
459
rreq->sink_stag = htonl(wqe->sqe.rkey);
drivers/infiniband/sw/siw/siw_qp.c
460
rreq->sink_to = cpu_to_be64(wqe->sqe.raddr);
drivers/infiniband/sw/siw/siw_qp.c
461
rreq->read_size = htonl(wqe->sqe.sge[0].length);
drivers/infiniband/sw/siw/siw_qp.c
462
rreq->source_stag = htonl(wqe->sqe.sge[0].lkey);
drivers/infiniband/sw/siw/siw_qp.c
464
cpu_to_be64(wqe->sqe.sge[0].laddr);
drivers/infiniband/sw/siw/siw_qp.c
840
void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
drivers/infiniband/sw/siw/siw_qp.c
842
rreq->id = sqe->id;
drivers/infiniband/sw/siw/siw_qp.c
843
rreq->opcode = sqe->opcode;
drivers/infiniband/sw/siw/siw_qp.c
844
rreq->sge[0].laddr = sqe->sge[0].laddr;
drivers/infiniband/sw/siw/siw_qp.c
845
rreq->sge[0].length = sqe->sge[0].length;
drivers/infiniband/sw/siw/siw_qp.c
846
rreq->sge[0].lkey = sqe->sge[0].lkey;
drivers/infiniband/sw/siw/siw_qp.c
847
rreq->sge[1].lkey = sqe->sge[1].lkey;
drivers/infiniband/sw/siw/siw_qp.c
848
rreq->flags = sqe->flags | SIW_WQE_VALID;
drivers/infiniband/sw/siw/siw_qp.c
854
struct siw_sqe *sqe;
drivers/infiniband/sw/siw/siw_qp.c
858
sqe = sq_get_next(qp);
drivers/infiniband/sw/siw/siw_qp.c
859
if (!sqe)
drivers/infiniband/sw/siw/siw_qp.c
866
memcpy(&wqe->sqe, sqe, sizeof(*sqe));
drivers/infiniband/sw/siw/siw_qp.c
868
if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
drivers/infiniband/sw/siw/siw_qp.c
872
if (wqe->sqe.flags & SIW_WQE_INLINE) {
drivers/infiniband/sw/siw/siw_qp.c
873
if (wqe->sqe.opcode != SIW_OP_SEND &&
drivers/infiniband/sw/siw/siw_qp.c
874
wqe->sqe.opcode != SIW_OP_WRITE) {
drivers/infiniband/sw/siw/siw_qp.c
878
if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
drivers/infiniband/sw/siw/siw_qp.c
882
wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
drivers/infiniband/sw/siw/siw_qp.c
883
wqe->sqe.sge[0].lkey = 0;
drivers/infiniband/sw/siw/siw_qp.c
884
wqe->sqe.num_sge = 1;
drivers/infiniband/sw/siw/siw_qp.c
886
if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
drivers/infiniband/sw/siw/siw_qp.c
888
if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
drivers/infiniband/sw/siw/siw_qp.c
889
wqe->sqe.opcode ==
drivers/infiniband/sw/siw/siw_qp.c
903
} else if (wqe->sqe.opcode == SIW_OP_READ ||
drivers/infiniband/sw/siw/siw_qp.c
904
wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
drivers/infiniband/sw/siw/siw_qp.c
912
wqe->sqe.num_sge = 1;
drivers/infiniband/sw/siw/siw_qp.c
922
siw_read_to_orq(rreq, &wqe->sqe);
drivers/infiniband/sw/siw/siw_qp.c
932
smp_store_mb(sqe->flags, 0);
drivers/infiniband/sw/siw/siw_qp.c
973
wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
drivers/infiniband/sw/siw/siw_qp.c
974
wqe->sqe.flags = 0;
drivers/infiniband/sw/siw/siw_qp.c
976
wqe->sqe.num_sge = 1;
drivers/infiniband/sw/siw/siw_qp.c
977
wqe->sqe.sge[0].length = irqe->sge[0].length;
drivers/infiniband/sw/siw/siw_qp.c
978
wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
drivers/infiniband/sw/siw/siw_qp.c
979
wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
drivers/infiniband/sw/siw/siw_qp.c
981
wqe->sqe.num_sge = 0;
drivers/infiniband/sw/siw/siw_qp.c
987
wqe->sqe.sge[1].length = irqe->sge[1].length;
drivers/infiniband/sw/siw/siw_qp.c
989
wqe->sqe.rkey = irqe->rkey;
drivers/infiniband/sw/siw/siw_qp.c
990
wqe->sqe.raddr = irqe->raddr;
drivers/infiniband/sw/siw/siw_qp_rx.c
1152
if (tx_waiting->sqe.opcode == SIW_OP_READ ||
drivers/infiniband/sw/siw/siw_qp_rx.c
1153
tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
drivers/infiniband/sw/siw/siw_qp_rx.c
1162
siw_read_to_orq(rreq, &tx_waiting->sqe);
drivers/infiniband/sw/siw/siw_qp_rx.c
1265
rv = siw_invalidate_stag(qp->pd, wqe->sqe.sge[0].lkey);
drivers/infiniband/sw/siw/siw_qp_rx.c
1280
if ((wqe->sqe.flags & SIW_WQE_SIGNALLED) || error != 0)
drivers/infiniband/sw/siw/siw_qp_rx.c
1281
rv = siw_sqe_complete(qp, &wqe->sqe, wqe->processed,
drivers/infiniband/sw/siw/siw_qp_rx.c
176
srx->ddp_stag = wqe->sqe.sge[0].lkey;
drivers/infiniband/sw/siw/siw_qp_rx.c
177
srx->ddp_to = wqe->sqe.sge[0].laddr;
drivers/infiniband/sw/siw/siw_qp_rx.c
691
resp = &tx_work->sqe;
drivers/infiniband/sw/siw/siw_qp_rx.c
754
wqe->sqe.id = orqe->id;
drivers/infiniband/sw/siw/siw_qp_rx.c
755
wqe->sqe.opcode = orqe->opcode;
drivers/infiniband/sw/siw/siw_qp_rx.c
756
wqe->sqe.sge[0].laddr = orqe->sge[0].laddr;
drivers/infiniband/sw/siw/siw_qp_rx.c
757
wqe->sqe.sge[0].lkey = orqe->sge[0].lkey;
drivers/infiniband/sw/siw/siw_qp_rx.c
758
wqe->sqe.sge[0].length = orqe->sge[0].length;
drivers/infiniband/sw/siw/siw_qp_rx.c
759
wqe->sqe.flags = orqe->flags;
drivers/infiniband/sw/siw/siw_qp_rx.c
760
wqe->sqe.num_sge = 1;
drivers/infiniband/sw/siw/siw_qp_rx.c
794
qp_id(qp), wqe->wr_status, wqe->sqe.opcode);
drivers/infiniband/sw/siw/siw_qp_rx.c
823
sge = wqe->sqe.sge; /* there is only one */
drivers/infiniband/sw/siw/siw_qp_tx.c
1061
siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
drivers/infiniband/sw/siw/siw_qp_tx.c
1146
siw_sqe_complete(qp, &wqe->sqe, wqe->bytes,
drivers/infiniband/sw/siw/siw_qp_tx.c
135
c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
137
cpu_to_be64(wqe->sqe.sge[0].laddr);
drivers/infiniband/sw/siw/siw_qp_tx.c
138
c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
139
c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr);
drivers/infiniband/sw/siw/siw_qp_tx.c
140
c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length);
drivers/infiniband/sw/siw/siw_qp_tx.c
183
c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
195
c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
196
c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr);
drivers/infiniband/sw/siw/siw_qp_tx.c
209
c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
210
c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr);
drivers/infiniband/sw/siw/siw_qp_tx.c
244
cpu_to_be64(wqe->sqe.raddr);
drivers/infiniband/sw/siw/siw_qp_tx.c
441
struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx];
drivers/infiniband/sw/siw/siw_qp_tx.c
52
struct siw_sge *sge = &wqe->sqe.sge[0];
drivers/infiniband/sw/siw/siw_qp_tx.c
55
if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1)
drivers/infiniband/sw/siw/siw_qp_tx.c
593
rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx],
drivers/infiniband/sw/siw/siw_qp_tx.c
62
memcpy(paddr, &wqe->sqe.sge[1], bytes);
drivers/infiniband/sw/siw/siw_qp_tx.c
653
sge = &wqe->sqe.sge[c_tx->sge_idx];
drivers/infiniband/sw/siw/siw_qp_tx.c
723
cpu_to_be64(wqe->sqe.raddr + wqe->processed);
drivers/infiniband/sw/siw/siw_qp_tx.c
766
struct siw_sge *sge = &wqe->sqe.sge[0];
drivers/infiniband/sw/siw/siw_qp_tx.c
767
int i, len, num_sge = wqe->sqe.num_sge;
drivers/infiniband/sw/siw/siw_qp_tx.c
807
if (!(wqe->sqe.flags & SIW_WQE_INLINE)) {
drivers/infiniband/sw/siw/siw_qp_tx.c
809
wqe->sqe.num_sge = 1;
drivers/infiniband/sw/siw/siw_qp_tx.c
831
wqe->bytes = wqe->sqe.sge[0].length;
drivers/infiniband/sw/siw/siw_qp_tx.c
837
wqe->sqe.sge[0].laddr =
drivers/infiniband/sw/siw/siw_qp_tx.c
838
(u64)(uintptr_t)&wqe->sqe.sge[1];
drivers/infiniband/sw/siw/siw_qp_tx.c
860
wqe->sqe.id);
drivers/infiniband/sw/siw/siw_qp_tx.c
931
static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
drivers/infiniband/sw/siw/siw_qp_tx.c
933
struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
drivers/infiniband/sw/siw/siw_qp_tx.c
938
siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
941
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
945
if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
drivers/infiniband/sw/siw/siw_qp_tx.c
946
pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
950
mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
drivers/infiniband/sw/siw/siw_qp_tx.c
952
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
962
pr_warn("siw: fastreg: STag 0x%08x already valid\n", sqe->rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
967
mem->stag = sqe->rkey;
drivers/infiniband/sw/siw/siw_qp_tx.c
968
mem->perms = sqe->access;
drivers/infiniband/sw/siw/siw_qp_tx.c
970
siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
drivers/infiniband/sw/siw/siw_qp_tx.c
984
rv = siw_fastreg_mr(qp->pd, &wqe->sqe);
drivers/infiniband/sw/siw/siw_qp_tx.c
988
rv = siw_invalidate_stag(qp->pd, wqe->sqe.rkey);
drivers/infiniband/sw/siw/siw_verbs.c
652
struct siw_sqe *sqe)
drivers/infiniband/sw/siw/siw_verbs.c
655
void *kbuf = &sqe->sge[1];
drivers/infiniband/sw/siw/siw_verbs.c
658
sqe->sge[0].laddr = (uintptr_t)kbuf;
drivers/infiniband/sw/siw/siw_verbs.c
659
sqe->sge[0].lkey = 0;
drivers/infiniband/sw/siw/siw_verbs.c
677
sqe->sge[0].length = max(bytes, 0);
drivers/infiniband/sw/siw/siw_verbs.c
678
sqe->num_sge = bytes > 0 ? 1 : 0;
drivers/infiniband/sw/siw/siw_verbs.c
690
struct siw_sqe sqe = {};
drivers/infiniband/sw/siw/siw_verbs.c
694
sqe.opcode = SIW_OP_WRITE;
drivers/infiniband/sw/siw/siw_verbs.c
697
sqe.opcode = SIW_OP_READ;
drivers/infiniband/sw/siw/siw_verbs.c
700
sqe.opcode = SIW_OP_READ_LOCAL_INV;
drivers/infiniband/sw/siw/siw_verbs.c
703
sqe.opcode = SIW_OP_SEND;
drivers/infiniband/sw/siw/siw_verbs.c
706
sqe.opcode = SIW_OP_SEND_WITH_IMM;
drivers/infiniband/sw/siw/siw_verbs.c
709
sqe.opcode = SIW_OP_SEND_REMOTE_INV;
drivers/infiniband/sw/siw/siw_verbs.c
712
sqe.opcode = SIW_OP_INVAL_STAG;
drivers/infiniband/sw/siw/siw_verbs.c
715
sqe.opcode = SIW_OP_REG_MR;
drivers/infiniband/sw/siw/siw_verbs.c
722
sqe.id = wr->wr_id;
drivers/infiniband/sw/siw/siw_verbs.c
723
rv = siw_sqe_complete(qp, &sqe, 0,
drivers/infiniband/sw/siw/siw_verbs.c
827
struct siw_sqe *sqe = &qp->sendq[idx];
drivers/infiniband/sw/siw/siw_verbs.c
829
if (sqe->flags) {
drivers/infiniband/sw/siw/siw_verbs.c
839
sqe->id = wr->wr_id;
drivers/infiniband/sw/siw/siw_verbs.c
843
sqe->flags |= SIW_WQE_SIGNALLED;
drivers/infiniband/sw/siw/siw_verbs.c
846
sqe->flags |= SIW_WQE_READ_FENCE;
drivers/infiniband/sw/siw/siw_verbs.c
852
sqe->flags |= SIW_WQE_SOLICITED;
drivers/infiniband/sw/siw/siw_verbs.c
855
siw_copy_sgl(wr->sg_list, sqe->sge,
drivers/infiniband/sw/siw/siw_verbs.c
857
sqe->num_sge = wr->num_sge;
drivers/infiniband/sw/siw/siw_verbs.c
859
rv = siw_copy_inline_sgl(wr, sqe);
drivers/infiniband/sw/siw/siw_verbs.c
864
sqe->flags |= SIW_WQE_INLINE;
drivers/infiniband/sw/siw/siw_verbs.c
865
sqe->num_sge = 1;
drivers/infiniband/sw/siw/siw_verbs.c
868
sqe->opcode = SIW_OP_SEND;
drivers/infiniband/sw/siw/siw_verbs.c
870
sqe->opcode = SIW_OP_SEND_REMOTE_INV;
drivers/infiniband/sw/siw/siw_verbs.c
871
sqe->rkey = wr->ex.invalidate_rkey;
drivers/infiniband/sw/siw/siw_verbs.c
888
siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
drivers/infiniband/sw/siw/siw_verbs.c
892
sqe->raddr = rdma_wr(wr)->remote_addr;
drivers/infiniband/sw/siw/siw_verbs.c
893
sqe->rkey = rdma_wr(wr)->rkey;
drivers/infiniband/sw/siw/siw_verbs.c
894
sqe->num_sge = 1;
drivers/infiniband/sw/siw/siw_verbs.c
897
sqe->opcode = SIW_OP_READ;
drivers/infiniband/sw/siw/siw_verbs.c
899
sqe->opcode = SIW_OP_READ_LOCAL_INV;
drivers/infiniband/sw/siw/siw_verbs.c
904
siw_copy_sgl(wr->sg_list, &sqe->sge[0],
drivers/infiniband/sw/siw/siw_verbs.c
906
sqe->num_sge = wr->num_sge;
drivers/infiniband/sw/siw/siw_verbs.c
908
rv = siw_copy_inline_sgl(wr, sqe);
drivers/infiniband/sw/siw/siw_verbs.c
913
sqe->flags |= SIW_WQE_INLINE;
drivers/infiniband/sw/siw/siw_verbs.c
914
sqe->num_sge = 1;
drivers/infiniband/sw/siw/siw_verbs.c
916
sqe->raddr = rdma_wr(wr)->remote_addr;
drivers/infiniband/sw/siw/siw_verbs.c
917
sqe->rkey = rdma_wr(wr)->rkey;
drivers/infiniband/sw/siw/siw_verbs.c
918
sqe->opcode = SIW_OP_WRITE;
drivers/infiniband/sw/siw/siw_verbs.c
922
sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
drivers/infiniband/sw/siw/siw_verbs.c
923
sqe->rkey = reg_wr(wr)->key;
drivers/infiniband/sw/siw/siw_verbs.c
924
sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
drivers/infiniband/sw/siw/siw_verbs.c
925
sqe->opcode = SIW_OP_REG_MR;
drivers/infiniband/sw/siw/siw_verbs.c
929
sqe->rkey = wr->ex.invalidate_rkey;
drivers/infiniband/sw/siw/siw_verbs.c
930
sqe->opcode = SIW_OP_INVAL_STAG;
drivers/infiniband/sw/siw/siw_verbs.c
940
sqe->opcode, sqe->flags,
drivers/infiniband/sw/siw/siw_verbs.c
941
(void *)(uintptr_t)sqe->id);
drivers/infiniband/sw/siw/siw_verbs.c
948
sqe->flags |= SIW_WQE_VALID;
drivers/net/ethernet/broadcom/cnic_defs.h
3105
struct fcoe_sqe sqe;
drivers/net/ethernet/intel/idpf/xsk.c
183
struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
drivers/net/ethernet/intel/idpf/xsk.c
185
if (sqe->type)
drivers/net/ethernet/intel/idpf/xsk.c
186
libeth_xdp_complete_tx(sqe, &cp);
drivers/net/ethernet/intel/idpf/xsk.c
218
struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
drivers/net/ethernet/intel/idpf/xsk.c
220
if (sqe->type)
drivers/net/ethernet/intel/idpf/xsk.c
221
libeth_xdp_complete_tx(sqe, &cp);
drivers/net/ethernet/intel/libeth/tx.c
24
void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp)
drivers/net/ethernet/intel/libeth/tx.c
26
if (sqe->type >= __LIBETH_SQE_XDP_START)
drivers/net/ethernet/intel/libeth/tx.c
27
__libeth_xdp_complete_tx(sqe, cp, static_call(bulk),
drivers/net/ethernet/intel/libeth/tx.c
30
libeth_tx_complete(sqe, cp);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1007
sq->sqe_base = sq->sqe->base;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
973
err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1575
if (!sq->sqe)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1577
qmem_free(pf->dev, sq->sqe);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
1392
int sq_idx, sqe;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
1396
for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
1397
sg = &sq->sg[sqe];
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
54
sq->sqe_base = sq->sqe->base;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
848
struct sk_buff *skb, int sqe, int hdr_len)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
851
struct sg_list *sg = &sq->sg[sqe];
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
881
u64 seg_addr, int hdr_len, int sqe)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
883
struct sg_list *sg = &sq->sg[sqe];
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
99
struct qmem *sqe;
drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
141
qmem_free(pfvf->dev, sq->sqe);
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
101
SET_FIELD(task_params->sqe->contlen_cdbsize,
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
106
SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
110
SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN,
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
112
SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES,
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
119
SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
68
if (!task_params->sqe)
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
71
memset(task_params->sqe, 0, sizeof(*task_params->sqe));
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
72
task_params->sqe->task_id = cpu_to_le16(task_params->itid);
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
79
SET_FIELD(task_params->sqe->contlen_cdbsize,
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
81
SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
94
SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges);
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
95
SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size);
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
99
SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
drivers/nvme/host/fc.c
1904
struct nvme_command *sqe = &op->cmd_iu.sqe;
drivers/nvme/host/fc.c
2008
sqe->common.command_id != cqe->command_id)) {
drivers/nvme/host/fc.c
2018
sqe->common.command_id,
drivers/nvme/host/fc.c
2126
nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
drivers/nvme/host/fc.c
2135
struct nvme_command *sqe;
drivers/nvme/host/fc.c
2149
sqe = &cmdiu->sqe;
drivers/nvme/host/fc.c
2161
memset(sqe, 0, sizeof(*sqe));
drivers/nvme/host/fc.c
2162
sqe->common.opcode = nvme_admin_async_event;
drivers/nvme/host/fc.c
2164
sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
drivers/nvme/host/fc.c
2537
struct nvme_command *sqe = &cmdiu->sqe;
drivers/nvme/host/fc.c
2546
ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
drivers/nvme/host/fc.c
2547
nvme_fabrics_opcode_str(qnum, sqe),
drivers/nvme/host/fc.c
2548
sqe->common.cdw10, sqe->common.cdw11);
drivers/nvme/host/fc.c
2641
struct nvme_command *sqe = &cmdiu->sqe;
drivers/nvme/host/fc.c
2679
WARN_ON_ONCE(sqe->common.metadata);
drivers/nvme/host/fc.c
2680
sqe->common.flags |= NVME_CMD_SGL_METABUF;
drivers/nvme/host/fc.c
2689
sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
drivers/nvme/host/fc.c
2691
sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
drivers/nvme/host/fc.c
2692
sqe->rw.dptr.sgl.addr = 0;
drivers/nvme/host/ioctl.c
450
const struct nvme_uring_cmd *cmd = io_uring_sqe128_cmd(ioucmd->sqe,
drivers/nvme/host/rdma.c
1583
container_of(qe, struct nvme_rdma_request, sqe);
drivers/nvme/host/rdma.c
1668
struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
drivers/nvme/host/rdma.c
1669
struct nvme_command *cmd = sqe->data;
drivers/nvme/host/rdma.c
1673
ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
drivers/nvme/host/rdma.c
1681
sqe->cqe.done = nvme_rdma_async_done;
drivers/nvme/host/rdma.c
1683
ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
drivers/nvme/host/rdma.c
1686
ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
drivers/nvme/host/rdma.c
1999
struct nvme_rdma_qe *sqe = &req->sqe;
drivers/nvme/host/rdma.c
2013
req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
drivers/nvme/host/rdma.c
2016
err = ib_dma_mapping_error(dev, req->sqe.dma);
drivers/nvme/host/rdma.c
2020
ib_dma_sync_single_for_cpu(dev, sqe->dma,
drivers/nvme/host/rdma.c
2045
sqe->cqe.done = nvme_rdma_send_done;
drivers/nvme/host/rdma.c
2047
ib_dma_sync_single_for_device(dev, sqe->dma,
drivers/nvme/host/rdma.c
2050
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
drivers/nvme/host/rdma.c
2068
ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
drivers/nvme/host/rdma.c
2121
ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
drivers/nvme/host/rdma.c
290
kfree(req->sqe.data);
drivers/nvme/host/rdma.c
303
req->sqe.data = kzalloc_obj(struct nvme_command);
drivers/nvme/host/rdma.c
304
if (!req->sqe.data)
drivers/nvme/host/rdma.c
314
nvme_req(rq)->cmd = req->sqe.data;
drivers/nvme/host/rdma.c
65
struct nvme_rdma_qe sqe;
drivers/nvme/target/fc.c
2152
struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
drivers/nvme/target/fc.c
2184
nvme_is_fabrics((struct nvme_command *) sqe) ||
drivers/nvme/target/fc.c
2187
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
drivers/nvme/target/fc.c
2458
struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
drivers/nvme/target/fc.c
2482
cqe->command_id = sqe->command_id;
drivers/nvme/target/fc.c
2542
if (!nvme_is_write(&cmdiu->sqe))
drivers/nvme/target/fc.c
2546
if (nvme_is_write(&cmdiu->sqe))
drivers/nvme/target/fc.c
2554
fod->req.cmd = &fod->cmdiubuf.sqe;
drivers/nvme/target/fcloop.c
633
struct nvme_command *sqe = &cmdiu->sqe;
drivers/nvme/target/fcloop.c
640
__func__, sqe->common.opcode, sqe->fabrics.fctype,
drivers/nvme/target/fcloop.c
645
(sqe->common.opcode != nvme_fabrics_command ||
drivers/nvme/target/fcloop.c
646
sqe->fabrics.fctype != drop_opcode)) ||
drivers/nvme/target/fcloop.c
647
(!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1415
struct fcoe_sqe *sqe;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1417
sqe = &tgt->sq[tgt->sq_prod_idx];
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1420
sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1421
sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
drivers/scsi/bnx2i/bnx2i.h
634
struct sqe *sq_virt;
drivers/scsi/bnx2i/bnx2i.h
638
struct sqe *sq_prod_qe;
drivers/scsi/bnx2i/bnx2i.h
639
struct sqe *sq_cons_qe;
drivers/scsi/bnx2i/bnx2i.h
640
struct sqe *sq_first_qe;
drivers/scsi/bnx2i/bnx2i.h
641
struct sqe *sq_last_qe;
drivers/scsi/lpfc/lpfc_nvme.c
1016
cid = cp->sqe.common.command_id;
drivers/scsi/lpfc/lpfc_nvme.c
1085
cp->sqe.common.opcode,
drivers/scsi/lpfc/lpfc_nvme.c
1086
cp->sqe.common.command_id,
drivers/scsi/lpfc/lpfc_nvme.c
1098
cp->sqe.common.opcode,
drivers/scsi/lpfc/lpfc_nvme.c
1099
cp->sqe.common.command_id,
drivers/scsi/lpfc/lpfc_nvme.c
1214
struct nvme_common_command *sqe;
drivers/scsi/lpfc/lpfc_nvme.c
1270
sqe = &((struct nvme_fc_cmd_iu *)
drivers/scsi/lpfc/lpfc_nvme.c
1271
nCmd->cmdaddr)->sqe.common;
drivers/scsi/lpfc/lpfc_nvme.c
1272
if (sqe->opcode == nvme_admin_async_event)
drivers/scsi/lpfc/lpfc_nvme.c
1537
struct nvme_common_command *sqe;
drivers/scsi/lpfc/lpfc_nvme.c
1626
sqe = &((struct nvme_fc_cmd_iu *)
drivers/scsi/lpfc/lpfc_nvme.c
1627
pnvme_fcreq->cmdaddr)->sqe.common;
drivers/scsi/lpfc/lpfc_nvme.c
1628
if (sqe->opcode == nvme_admin_keep_alive)
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
13
memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
14
SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
16
task_params->sqe->task_id = task_params->itid;
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
167
task_params->sqe->additional_info_union.burst_length =
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
169
SET_FIELD(task_params->sqe->flags,
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
171
SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
193
task_params->sqe->additional_info_union.seq_rec_updated_offset =
drivers/scsi/qedf/drv_fcoe_fw_funcs.h
16
struct fcoe_wqe *sqe;
drivers/scsi/qedf/qedf.h
516
struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
drivers/scsi/qedf/qedf_els.c
120
sqe = &fcport->sq[sqe_idx];
drivers/scsi/qedf/qedf_els.c
121
memset(sqe, 0, sizeof(struct fcoe_wqe));
drivers/scsi/qedf/qedf_els.c
125
qedf_init_mp_task(els_req, task, sqe);
drivers/scsi/qedf/qedf_els.c
23
struct fcoe_wqe *sqe;
drivers/scsi/qedf/qedf_els.c
702
struct fcoe_wqe *sqe;
drivers/scsi/qedf/qedf_els.c
732
sqe = &fcport->sq[sqe_idx];
drivers/scsi/qedf/qedf_els.c
733
memset(sqe, 0, sizeof(struct fcoe_wqe));
drivers/scsi/qedf/qedf_els.c
734
orig_io_req->task_params->sqe = sqe;
drivers/scsi/qedf/qedf_io.c
1856
struct fcoe_wqe *sqe;
drivers/scsi/qedf/qedf_io.c
1938
sqe = &fcport->sq[sqe_idx];
drivers/scsi/qedf/qedf_io.c
1939
memset(sqe, 0, sizeof(struct fcoe_wqe));
drivers/scsi/qedf/qedf_io.c
1940
io_req->task_params->sqe = sqe;
drivers/scsi/qedf/qedf_io.c
2153
struct fcoe_wqe *sqe;
drivers/scsi/qedf/qedf_io.c
2223
sqe = &fcport->sq[sqe_idx];
drivers/scsi/qedf/qedf_io.c
2224
memset(sqe, 0, sizeof(struct fcoe_wqe));
drivers/scsi/qedf/qedf_io.c
2225
io_req->task_params->sqe = sqe;
drivers/scsi/qedf/qedf_io.c
2297
struct fcoe_wqe *sqe;
drivers/scsi/qedf/qedf_io.c
2349
sqe = &fcport->sq[sqe_idx];
drivers/scsi/qedf/qedf_io.c
2350
memset(sqe, 0, sizeof(struct fcoe_wqe));
drivers/scsi/qedf/qedf_io.c
2352
qedf_init_task(fcport, lport, io_req, task, sqe);
drivers/scsi/qedf/qedf_io.c
588
struct fcoe_wqe *sqe)
drivers/scsi/qedf/qedf_io.c
624
io_req->task_params->sqe = sqe;
drivers/scsi/qedf/qedf_io.c
677
struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
drivers/scsi/qedf/qedf_io.c
703
io_req->task_params->sqe = sqe;
drivers/scsi/qedf/qedf_io.c
854
struct fcoe_wqe *sqe;
drivers/scsi/qedf/qedf_io.c
901
sqe = &fcport->sq[sqe_idx];
drivers/scsi/qedf/qedf_io.c
902
memset(sqe, 0, sizeof(struct fcoe_wqe));
drivers/scsi/qedf/qedf_io.c
915
qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
drivers/scsi/qedi/qedi_fw.c
1042
task_params.sqe = &ep->sq[sq_idx];
drivers/scsi/qedi/qedi_fw.c
1044
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
drivers/scsi/qedi/qedi_fw.c
1116
task_params.sqe = &ep->sq[sq_idx];
drivers/scsi/qedi/qedi_fw.c
1117
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
drivers/scsi/qedi/qedi_fw.c
1490
task_params.sqe = &ep->sq[sq_idx];
drivers/scsi/qedi/qedi_fw.c
1492
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
drivers/scsi/qedi/qedi_fw.c
1614
task_params.sqe = &ep->sq[sq_idx];
drivers/scsi/qedi/qedi_fw.c
1616
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
drivers/scsi/qedi/qedi_fw.c
1731
task_params.sqe = &ep->sq[sq_idx];
drivers/scsi/qedi/qedi_fw.c
1733
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
drivers/scsi/qedi/qedi_fw.c
2093
task_params.sqe = &ep->sq[sq_idx];
drivers/scsi/qedi/qedi_fw.c
2104
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
drivers/scsi/qedi/qedi_fw.c
2148
task_params.sqe = &ep->sq[sq_idx];
drivers/scsi/qedi/qedi_fw.c
2149
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
drivers/scsi/qedi/qedi_fw_api.c
101
memset(task_params->sqe, 0, sizeof(*task_params->sqe));
drivers/scsi/qedi/qedi_fw_api.c
102
task_params->sqe->task_id = cpu_to_le16(task_params->itid);
drivers/scsi/qedi/qedi_fw_api.c
104
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
drivers/scsi/qedi/qedi_fw_api.c
115
init_dif_context_flags(&task_params->sqe->prot_flags,
drivers/scsi/qedi/qedi_fw_api.c
118
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
drivers/scsi/qedi/qedi_fw_api.c
134
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
drivers/scsi/qedi/qedi_fw_api.c
136
SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
drivers/scsi/qedi/qedi_fw_api.c
141
SET_FIELD(task_params->sqe->contlen_cdbsize,
drivers/scsi/qedi/qedi_fw_api.c
147
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
drivers/scsi/qedi/qedi_fw_api.c
152
SET_FIELD(task_params->sqe->contlen_cdbsize,
drivers/scsi/qedi/qedi_fw_api.c
162
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
drivers/scsi/qedi/qedi_fw_api.c
165
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
drivers/scsi/qedi/qedi_fw_api.c
178
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
drivers/scsi/qedi/qedi_fw_api.c
182
SET_FIELD(task_params->sqe->contlen_cdbsize,
drivers/scsi/qedi/qedi_fw_api.c
187
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
drivers/scsi/qedi/qedi_fw_api.c
190
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
drivers/scsi/qedi/qedi_fw_api.c
98
if (!task_params->sqe)
drivers/scsi/qedi/qedi_fw_iscsi.h
14
struct iscsi_wqe *sqe;
drivers/scsi/qla2xxx/qla_nvme.c
620
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
drivers/scsi/qla2xxx/qla_nvme.c
671
cmd->sqe.common.opcode == nvme_admin_async_event) {
fs/btrfs/ioctl.c
4705
sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
fs/btrfs/ioctl.c
4846
sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
fs/fuse/dev_uring.c
1013
static int fuse_uring_get_iovec_from_sqe(const struct io_uring_sqe *sqe,
fs/fuse/dev_uring.c
1016
struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr));
fs/fuse/dev_uring.c
1020
if (sqe->len != FUSE_URING_IOV_SEGS)
fs/fuse/dev_uring.c
1045
err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov);
fs/fuse/dev_uring.c
1087
const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe128_cmd(cmd->sqe,
fs/fuse/dev_uring.c
882
const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe128_cmd(cmd->sqe,
include/linux/hisi_acc_qm.h
447
int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
include/linux/hisi_acc_qm.h
462
void *sqe;
include/linux/io_uring/cmd.h
16
const struct io_uring_sqe *sqe;
include/linux/io_uring/cmd.h
23
#define io_uring_sqe128_cmd(sqe, type) ({ \
include/linux/io_uring/cmd.h
26
(const type *)(sqe)->cmd; \
include/linux/io_uring/cmd.h
29
#define io_uring_sqe_cmd(sqe, type) ({ \
include/linux/io_uring/cmd.h
32
(const type *)(sqe)->cmd; \
include/linux/nvme-fc.h
56
struct nvme_command sqe;
include/linux/qed/qed_nvmetcp_if.h
98
struct nvmetcp_wqe *sqe;
include/net/libeth/tx.h
124
static inline void libeth_tx_complete(struct libeth_sqe *sqe,
include/net/libeth/tx.h
127
switch (sqe->type) {
include/net/libeth/tx.h
133
dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
include/net/libeth/tx.h
134
dma_unmap_len(sqe, len), DMA_TO_DEVICE);
include/net/libeth/tx.h
140
switch (sqe->type) {
include/net/libeth/tx.h
142
cp->ss->packets += sqe->packets;
include/net/libeth/tx.h
143
cp->ss->bytes += sqe->bytes;
include/net/libeth/tx.h
145
napi_consume_skb(sqe->skb, cp->napi);
include/net/libeth/tx.h
148
kfree(sqe->raw);
include/net/libeth/tx.h
154
sqe->type = LIBETH_SQE_EMPTY;
include/net/libeth/tx.h
157
void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp);
include/net/libeth/xdp.h
1003
sqe = &sq->sqes[i];
include/net/libeth/xdp.h
1004
dma_unmap_addr_set(sqe, dma, desc.addr);
include/net/libeth/xdp.h
1005
dma_unmap_len_set(sqe, len, desc.len);
include/net/libeth/xdp.h
1008
sqe->type = LIBETH_SQE_XDP_XMIT_FRAG;
include/net/libeth/xdp.h
1012
sqe->type = LIBETH_SQE_XDP_XMIT;
include/net/libeth/xdp.h
1013
sqe->xdpf = xdpf;
include/net/libeth/xdp.h
1014
libeth_xdp_tx_fill_stats(sqe, &desc,
include/net/libeth/xdp.h
1761
__libeth_xdp_complete_tx(struct libeth_sqe *sqe, struct libeth_cq_pp *cp,
include/net/libeth/xdp.h
1765
enum libeth_sqe_type type = sqe->type;
include/net/libeth/xdp.h
1772
dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
include/net/libeth/xdp.h
1773
dma_unmap_len(sqe, len), DMA_TO_DEVICE);
include/net/libeth/xdp.h
1781
bulk(sqe->sinfo, cp->bq, sqe->nr_frags != 1);
include/net/libeth/xdp.h
1784
xdp_return_frame_bulk(sqe->xdpf, cp->bq);
include/net/libeth/xdp.h
1788
xsk(sqe->xsk);
include/net/libeth/xdp.h
1798
cp->xdp_tx -= sqe->nr_frags;
include/net/libeth/xdp.h
1801
cp->xss->bytes += sqe->bytes;
include/net/libeth/xdp.h
1807
sqe->type = LIBETH_SQE_EMPTY;
include/net/libeth/xdp.h
1810
static inline void libeth_xdp_complete_tx(struct libeth_sqe *sqe,
include/net/libeth/xdp.h
1813
__libeth_xdp_complete_tx(sqe, cp, libeth_xdp_return_buff_bulk,
include/net/libeth/xdp.h
684
#define libeth_xdp_tx_fill_stats(sqe, desc, sinfo) \
include/net/libeth/xdp.h
685
__libeth_xdp_tx_fill_stats(sqe, desc, sinfo, __UNIQUE_ID(sqe_), \
include/net/libeth/xdp.h
688
#define __libeth_xdp_tx_fill_stats(sqe, desc, sinfo, ue, ud, us) do { \
include/net/libeth/xdp.h
691
struct libeth_sqe *ue = (sqe); \
include/net/libeth/xdp.h
720
struct libeth_sqe *sqe;
include/net/libeth/xdp.h
746
sqe = &sq->sqes[i];
include/net/libeth/xdp.h
747
sqe->type = LIBETH_SQE_XDP_TX;
include/net/libeth/xdp.h
748
sqe->sinfo = sinfo;
include/net/libeth/xdp.h
749
libeth_xdp_tx_fill_stats(sqe, &desc, sinfo);
include/net/libeth/xdp.h
991
struct libeth_sqe *sqe;
include/net/libeth/xsk.h
120
struct libeth_sqe *sqe;
include/net/libeth/xsk.h
124
sqe = &sq->sqes[i];
include/net/libeth/xsk.h
125
sqe->xsk = xdp;
include/net/libeth/xsk.h
128
sqe->type = LIBETH_SQE_XSK_TX_FRAG;
include/net/libeth/xsk.h
132
sqe->type = LIBETH_SQE_XSK_TX;
include/net/libeth/xsk.h
133
libeth_xdp_tx_fill_stats(sqe, &desc,
include/trace/events/io_uring.h
492
TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
include/trace/events/io_uring.h
494
TP_ARGS(sqe, req, error),
include/trace/events/io_uring.h
514
__string( op_str, io_uring_get_opcode(sqe->opcode) )
include/trace/events/io_uring.h
520
__entry->user_data = sqe->user_data;
include/trace/events/io_uring.h
521
__entry->opcode = sqe->opcode;
include/trace/events/io_uring.h
522
__entry->flags = sqe->flags;
include/trace/events/io_uring.h
523
__entry->ioprio = sqe->ioprio;
include/trace/events/io_uring.h
524
__entry->off = sqe->off;
include/trace/events/io_uring.h
525
__entry->addr = sqe->addr;
include/trace/events/io_uring.h
526
__entry->len = sqe->len;
include/trace/events/io_uring.h
527
__entry->op_flags = sqe->poll32_events;
include/trace/events/io_uring.h
528
__entry->buf_index = sqe->buf_index;
include/trace/events/io_uring.h
529
__entry->personality = sqe->personality;
include/trace/events/io_uring.h
530
__entry->file_index = sqe->file_index;
include/trace/events/io_uring.h
531
__entry->pad1 = sqe->__pad2[0];
include/trace/events/io_uring.h
532
__entry->addr3 = sqe->addr3;
io_uring/advise.c
31
int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/advise.c
36
if (sqe->buf_index || sqe->splice_fd_in)
io_uring/advise.c
39
ma->addr = READ_ONCE(sqe->addr);
io_uring/advise.c
40
ma->len = READ_ONCE(sqe->off);
io_uring/advise.c
42
ma->len = READ_ONCE(sqe->len);
io_uring/advise.c
43
ma->advice = READ_ONCE(sqe->fadvise_advice);
io_uring/advise.c
79
int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/advise.c
83
if (sqe->buf_index || sqe->splice_fd_in)
io_uring/advise.c
86
fa->offset = READ_ONCE(sqe->off);
io_uring/advise.c
87
fa->len = READ_ONCE(sqe->addr);
io_uring/advise.c
89
fa->len = READ_ONCE(sqe->len);
io_uring/advise.c
90
fa->advice = READ_ONCE(sqe->fadvise_advice);
io_uring/advise.h
3
int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/advise.h
6
int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/cancel.c
140
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/cancel.c
146
if (sqe->off || sqe->splice_fd_in)
io_uring/cancel.c
149
cancel->addr = READ_ONCE(sqe->addr);
io_uring/cancel.c
150
cancel->flags = READ_ONCE(sqe->cancel_flags);
io_uring/cancel.c
156
cancel->fd = READ_ONCE(sqe->fd);
io_uring/cancel.c
161
cancel->opcode = READ_ONCE(sqe->len);
io_uring/cancel.h
16
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/cmd_net.c
14
const struct io_uring_sqe *sqe = cmd->sqe;
io_uring/cmd_net.c
140
const struct io_uring_sqe *sqe = cmd->sqe;
io_uring/cmd_net.c
145
if (sqe->ioprio || sqe->__pad1 || sqe->len || sqe->rw_flags)
io_uring/cmd_net.c
148
uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/cmd_net.c
149
ulen = u64_to_user_ptr(READ_ONCE(sqe->addr3));
io_uring/cmd_net.c
150
peer = READ_ONCE(sqe->optlen);
io_uring/cmd_net.c
19
level = READ_ONCE(sqe->level);
io_uring/cmd_net.c
23
optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
io_uring/cmd_net.c
24
optname = READ_ONCE(sqe->optname);
io_uring/cmd_net.c
25
optlen = READ_ONCE(sqe->optlen);
io_uring/cmd_net.c
41
const struct io_uring_sqe *sqe = cmd->sqe;
io_uring/cmd_net.c
47
optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
io_uring/cmd_net.c
48
optname = READ_ONCE(sqe->optname);
io_uring/cmd_net.c
49
optlen = READ_ONCE(sqe->optlen);
io_uring/cmd_net.c
50
level = READ_ONCE(sqe->level);
io_uring/epoll.c
29
int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/epoll.c
33
if (sqe->buf_index || sqe->splice_fd_in)
io_uring/epoll.c
36
epoll->epfd = READ_ONCE(sqe->fd);
io_uring/epoll.c
37
epoll->op = READ_ONCE(sqe->len);
io_uring/epoll.c
38
epoll->fd = READ_ONCE(sqe->off);
io_uring/epoll.c
43
ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/epoll.c
67
int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/epoll.c
71
if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
io_uring/epoll.c
74
iew->maxevents = READ_ONCE(sqe->len);
io_uring/epoll.c
75
iew->events = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/epoll.h
4
int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/epoll.h
6
int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/fdinfo.c
108
sqe = &ctx->sq_sqes[sq_idx << sq_shift];
io_uring/fdinfo.c
109
opcode = READ_ONCE(sqe->opcode);
io_uring/fdinfo.c
135
sq_idx, io_uring_get_opcode(opcode), sqe->fd,
io_uring/fdinfo.c
136
sqe->flags, (unsigned long long) sqe->off,
io_uring/fdinfo.c
137
(unsigned long long) sqe->addr, sqe->rw_flags,
io_uring/fdinfo.c
138
sqe->buf_index, sqe->user_data);
io_uring/fdinfo.c
140
u64 *sqeb = (void *) (sqe + 1);
io_uring/fdinfo.c
96
struct io_uring_sqe *sqe;
io_uring/fs.c
107
int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/fs.c
113
if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in)
io_uring/fs.c
118
un->dfd = READ_ONCE(sqe->fd);
io_uring/fs.c
120
un->flags = READ_ONCE(sqe->unlink_flags);
io_uring/fs.c
124
fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/fs.c
159
int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/fs.c
165
if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
io_uring/fs.c
170
mkd->dfd = READ_ONCE(sqe->fd);
io_uring/fs.c
171
mkd->mode = READ_ONCE(sqe->len);
io_uring/fs.c
173
fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/fs.c
205
int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/fs.c
211
if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
io_uring/fs.c
216
sl->new_dfd = READ_ONCE(sqe->fd);
io_uring/fs.c
217
oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/fs.c
218
newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/fs.c
251
int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/fs.c
257
if (sqe->buf_index || sqe->splice_fd_in)
io_uring/fs.c
262
lnk->old_dfd = READ_ONCE(sqe->fd);
io_uring/fs.c
263
lnk->new_dfd = READ_ONCE(sqe->len);
io_uring/fs.c
264
oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/fs.c
265
newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/fs.c
266
lnk->flags = READ_ONCE(sqe->hardlink_flags);
io_uring/fs.c
50
int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/fs.c
56
if (sqe->buf_index || sqe->splice_fd_in)
io_uring/fs.c
61
ren->old_dfd = READ_ONCE(sqe->fd);
io_uring/fs.c
62
oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/fs.c
63
newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/fs.c
64
ren->new_dfd = READ_ONCE(sqe->len);
io_uring/fs.c
65
ren->flags = READ_ONCE(sqe->rename_flags);
io_uring/fs.h
11
int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/fs.h
15
int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/fs.h
18
int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/fs.h
3
int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/fs.h
7
int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/futex.c
127
int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/futex.c
132
if (unlikely(sqe->len || sqe->futex_flags || sqe->buf_index ||
io_uring/futex.c
133
sqe->file_index))
io_uring/futex.c
136
iof->uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/futex.c
137
iof->futex_val = READ_ONCE(sqe->addr2);
io_uring/futex.c
138
iof->futex_mask = READ_ONCE(sqe->addr3);
io_uring/futex.c
139
flags = READ_ONCE(sqe->fd);
io_uring/futex.c
172
int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/futex.c
179
if (unlikely(sqe->fd || sqe->buf_index || sqe->file_index ||
io_uring/futex.c
180
sqe->addr2 || sqe->futex_flags || sqe->addr3))
io_uring/futex.c
183
iof->uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/futex.c
184
iof->futex_nr = READ_ONCE(sqe->len);
io_uring/futex.h
5
int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/futex.h
6
int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/io_uring.c
1716
const struct io_uring_sqe *sqe, unsigned int *left)
io_uring/io_uring.c
1725
req->opcode = opcode = READ_ONCE(sqe->opcode);
io_uring/io_uring.c
1727
sqe_flags = READ_ONCE(sqe->flags);
io_uring/io_uring.c
1729
req->cqe.user_data = READ_ONCE(sqe->user_data);
io_uring/io_uring.c
1748
(unsigned)(sqe - ctx->sq_sqes) >= ctx->sq_entries - 1)
io_uring/io_uring.c
1767
req->buf_index = READ_ONCE(sqe->buf_group);
io_uring/io_uring.c
1791
if (!def->ioprio && sqe->ioprio)
io_uring/io_uring.c
1799
req->cqe.fd = READ_ONCE(sqe->fd);
io_uring/io_uring.c
1812
personality = READ_ONCE(sqe->personality);
io_uring/io_uring.c
1828
return def->prep(req, sqe);
io_uring/io_uring.c
1831
static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
io_uring/io_uring.c
1838
trace_io_uring_req_failed(sqe, req, ret);
io_uring/io_uring.c
1869
const struct io_uring_sqe *sqe, unsigned int *left)
io_uring/io_uring.c
1875
ret = io_init_req(ctx, req, sqe, left);
io_uring/io_uring.c
1877
return io_submit_fail_init(sqe, req, ret);
io_uring/io_uring.c
1882
return io_submit_fail_init(sqe, req, ret);
io_uring/io_uring.c
1976
static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
io_uring/io_uring.c
2004
*sqe = &ctx->sq_sqes[head];
io_uring/io_uring.c
2029
const struct io_uring_sqe *sqe;
io_uring/io_uring.c
2034
if (unlikely(!io_get_sqe(ctx, &sqe))) {
io_uring/io_uring.c
2043
if (unlikely(io_submit_sqe(ctx, req, sqe, &left)) &&
io_uring/kbuf.c
480
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/kbuf.c
485
if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
io_uring/kbuf.c
486
sqe->splice_fd_in)
io_uring/kbuf.c
489
tmp = READ_ONCE(sqe->fd);
io_uring/kbuf.c
495
p->bgid = READ_ONCE(sqe->buf_group);
io_uring/kbuf.c
499
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/kbuf.c
505
if (sqe->rw_flags || sqe->splice_fd_in)
io_uring/kbuf.c
508
tmp = READ_ONCE(sqe->fd);
io_uring/kbuf.c
512
p->addr = READ_ONCE(sqe->addr);
io_uring/kbuf.c
513
p->len = READ_ONCE(sqe->len);
io_uring/kbuf.c
525
p->bgid = READ_ONCE(sqe->buf_group);
io_uring/kbuf.c
526
tmp = READ_ONCE(sqe->off);
io_uring/kbuf.h
72
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/kbuf.h
73
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/mock_file.c
213
const struct io_uring_sqe *sqe = cmd->sqe;
io_uring/mock_file.c
225
uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/mock_file.c
226
uarg_size = READ_ONCE(sqe->len);
io_uring/mock_file.c
228
if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index)
io_uring/mock_file.c
280
const struct io_uring_sqe *sqe = cmd->sqe;
io_uring/mock_file.c
284
uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/mock_file.c
285
uarg_size = READ_ONCE(sqe->len);
io_uring/mock_file.c
287
if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index ||
io_uring/mock_file.c
67
const struct io_uring_sqe *sqe = cmd->sqe;
io_uring/mock_file.c
74
ubuf = u64_to_user_ptr(READ_ONCE(sqe->addr3));
io_uring/mock_file.c
75
iovec = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/mock_file.c
76
iovec_len = READ_ONCE(sqe->len);
io_uring/mock_file.c
77
flags = READ_ONCE(sqe->file_index);
io_uring/mock_file.c
79
if (unlikely(sqe->ioprio || sqe->__pad1))
io_uring/msg_ring.c
261
static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe)
io_uring/msg_ring.c
263
if (unlikely(sqe->buf_index || sqe->personality))
io_uring/msg_ring.c
267
msg->user_data = READ_ONCE(sqe->off);
io_uring/msg_ring.c
268
msg->len = READ_ONCE(sqe->len);
io_uring/msg_ring.c
269
msg->cmd = READ_ONCE(sqe->addr);
io_uring/msg_ring.c
270
msg->src_fd = READ_ONCE(sqe->addr3);
io_uring/msg_ring.c
271
msg->dst_fd = READ_ONCE(sqe->file_index);
io_uring/msg_ring.c
272
msg->flags = READ_ONCE(sqe->msg_ring_flags);
io_uring/msg_ring.c
279
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/msg_ring.c
281
return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe);
io_uring/msg_ring.c
315
int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
io_uring/msg_ring.c
320
ret = __io_msg_ring_prep(&io_msg, sqe);
io_uring/msg_ring.c
331
CLASS(fd, f)(sqe->fd);
io_uring/msg_ring.h
3
int io_uring_sync_msg_ring(struct io_uring_sqe *sqe);
io_uring/msg_ring.h
4
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.c
123
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1250
int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1255
if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3))
io_uring/net.c
1258
ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx);
io_uring/net.c
1263
zc->len = READ_ONCE(sqe->len);
io_uring/net.c
1264
zc->flags = READ_ONCE(sqe->ioprio);
io_uring/net.c
1265
if (READ_ONCE(sqe->msg_flags))
io_uring/net.c
127
if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
io_uring/net.c
128
sqe->buf_index || sqe->splice_fd_in))
io_uring/net.c
131
shutdown->how = READ_ONCE(sqe->len);
io_uring/net.c
1330
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1340
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
io_uring/net.c
1354
zc->flags = READ_ONCE(sqe->ioprio);
io_uring/net.c
1367
zc->len = READ_ONCE(sqe->len);
io_uring/net.c
1368
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
io_uring/net.c
1369
req->buf_index = READ_ONCE(sqe->buf_index);
io_uring/net.c
1381
ret = io_send_setup(req, sqe);
io_uring/net.c
1383
if (unlikely(sqe->addr2 || sqe->file_index))
io_uring/net.c
1385
ret = io_sendmsg_setup(req, sqe);
io_uring/net.c
1615
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1619
if (sqe->len || sqe->buf_index)
io_uring/net.c
1622
accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/net.c
1623
accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/net.c
1624
accept->flags = READ_ONCE(sqe->accept_flags);
io_uring/net.c
1626
accept->iou_flags = READ_ONCE(sqe->ioprio);
io_uring/net.c
1630
accept->file_slot = READ_ONCE(sqe->file_index);
io_uring/net.c
1719
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1723
if (sqe->addr || sqe->rw_flags || sqe->buf_index)
io_uring/net.c
1726
sock->domain = READ_ONCE(sqe->fd);
io_uring/net.c
1727
sock->type = READ_ONCE(sqe->off);
io_uring/net.c
1728
sock->protocol = READ_ONCE(sqe->len);
io_uring/net.c
1729
sock->file_slot = READ_ONCE(sqe->file_index);
io_uring/net.c
1773
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1778
if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
io_uring/net.c
1781
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/net.c
1782
conn->addr_len = READ_ONCE(sqe->addr2);
io_uring/net.c
1844
int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1850
if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
io_uring/net.c
1853
uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/net.c
1854
bind->addr_len = READ_ONCE(sqe->addr2);
io_uring/net.c
1880
int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
1884
if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
io_uring/net.c
1887
listen->backlog = READ_ONCE(sqe->len);
io_uring/net.c
349
static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
357
sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/net.c
359
if (READ_ONCE(sqe->__pad3[0]))
io_uring/net.c
368
addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/net.c
369
addr_len = READ_ONCE(sqe->addr_len);
io_uring/net.c
392
static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
399
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/net.c
418
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
423
sr->len = READ_ONCE(sqe->len);
io_uring/net.c
426
sr->flags = READ_ONCE(sqe->ioprio);
io_uring/net.c
429
sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
io_uring/net.c
447
return io_send_setup(req, sqe);
io_uring/net.c
448
if (unlikely(sqe->addr2 || sqe->file_index))
io_uring/net.c
450
return io_sendmsg_setup(req, sqe);
io_uring/net.c
785
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/net.c
791
if (unlikely(sqe->addr2))
io_uring/net.c
794
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/net.c
795
sr->len = READ_ONCE(sqe->len);
io_uring/net.c
798
sr->flags = READ_ONCE(sqe->ioprio);
io_uring/net.c
801
sr->msg_flags = READ_ONCE(sqe->msg_flags);
io_uring/net.c
816
sr->mshot_total_len = READ_ONCE(sqe->optlen);
io_uring/net.c
819
} else if (sqe->optlen) {
io_uring/net.c
823
} else if (sqe->optlen) {
io_uring/net.h
28
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
32
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
37
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
43
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
46
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
50
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
55
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
58
int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/net.h
61
int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/nop.c
28
int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/nop.c
32
nop->flags = READ_ONCE(sqe->nop_flags);
io_uring/nop.c
37
nop->result = READ_ONCE(sqe->len);
io_uring/nop.c
41
nop->fd = READ_ONCE(sqe->fd);
io_uring/nop.c
45
req->buf_index = READ_ONCE(sqe->buf_index);
io_uring/nop.c
51
nop->extra1 = READ_ONCE(sqe->off);
io_uring/nop.c
52
nop->extra2 = READ_ONCE(sqe->addr);
io_uring/nop.h
3
int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/opdef.c
49
const struct io_uring_sqe *sqe)
io_uring/openclose.c
101
return __io_openat_prep(req, sqe);
io_uring/openclose.c
104
int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/openclose.c
111
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/openclose.c
112
len = READ_ONCE(sqe->len);
io_uring/openclose.c
120
return __io_openat_prep(req, sqe);
io_uring/openclose.c
219
int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/openclose.c
223
if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
io_uring/openclose.c
228
close->fd = READ_ONCE(sqe->fd);
io_uring/openclose.c
229
close->file_slot = READ_ONCE(sqe->file_index);
io_uring/openclose.c
275
int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/openclose.c
280
if (sqe->off || sqe->addr || sqe->len || sqe->buf_index ||
io_uring/openclose.c
281
sqe->splice_fd_in || sqe->addr3)
io_uring/openclose.c
288
flags = READ_ONCE(sqe->install_fd_flags);
io_uring/openclose.c
326
int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/openclose.c
330
if (sqe->fd || sqe->off || sqe->addr3)
io_uring/openclose.c
333
p->fds = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/openclose.c
334
p->flags = READ_ONCE(sqe->pipe_flags);
io_uring/openclose.c
338
p->file_slot = READ_ONCE(sqe->file_index);
io_uring/openclose.c
53
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/openclose.c
59
if (unlikely(sqe->buf_index))
io_uring/openclose.c
68
open->dfd = READ_ONCE(sqe->fd);
io_uring/openclose.c
69
fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/openclose.c
75
open->file_slot = READ_ONCE(sqe->file_index);
io_uring/openclose.c
94
int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/openclose.c
97
u64 mode = READ_ONCE(sqe->len);
io_uring/openclose.c
98
u64 flags = READ_ONCE(sqe->open_flags);
io_uring/openclose.h
13
int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/openclose.h
16
int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/openclose.h
19
int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/openclose.h
22
int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/openclose.h
8
int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/poll.c
832
static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
io_uring/poll.c
837
events = READ_ONCE(sqe->poll32_events);
io_uring/poll.c
849
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/poll.c
854
if (sqe->buf_index || sqe->splice_fd_in)
io_uring/poll.c
856
flags = READ_ONCE(sqe->len);
io_uring/poll.c
864
upd->old_user_data = READ_ONCE(sqe->addr);
io_uring/poll.c
868
upd->new_user_data = READ_ONCE(sqe->off);
io_uring/poll.c
872
upd->events = io_poll_parse_events(sqe, flags);
io_uring/poll.c
873
else if (sqe->poll32_events)
io_uring/poll.c
879
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/poll.c
884
if (sqe->buf_index || sqe->off || sqe->addr)
io_uring/poll.c
886
flags = READ_ONCE(sqe->len);
io_uring/poll.c
892
poll->events = io_poll_parse_events(sqe, flags);
io_uring/poll.h
35
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/poll.h
38
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/register.c
985
struct io_uring_sqe sqe;
io_uring/register.c
989
if (copy_from_user(&sqe, arg, sizeof(sqe)))
io_uring/register.c
992
if (sqe.flags)
io_uring/register.c
994
if (sqe.opcode != IORING_OP_MSG_RING)
io_uring/register.c
997
return io_uring_sync_msg_ring(&sqe);
io_uring/rsrc.c
411
int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rsrc.c
417
if (sqe->rw_flags || sqe->splice_fd_in)
io_uring/rsrc.c
420
up->offset = READ_ONCE(sqe->off);
io_uring/rsrc.c
421
up->nr_args = READ_ONCE(sqe->len);
io_uring/rsrc.c
424
up->arg = READ_ONCE(sqe->addr);
io_uring/rsrc.h
124
int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.c
259
static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
io_uring/rw.c
272
rw->kiocb.ki_pos = READ_ONCE(sqe->off);
io_uring/rw.c
274
req->buf_index = READ_ONCE(sqe->buf_index);
io_uring/rw.c
277
ioprio = READ_ONCE(sqe->ioprio);
io_uring/rw.c
288
rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream);
io_uring/rw.c
295
rw->addr = READ_ONCE(sqe->addr);
io_uring/rw.c
296
rw->len = READ_ONCE(sqe->len);
io_uring/rw.c
297
rw->flags = (__force rwf_t) READ_ONCE(sqe->rw_flags);
io_uring/rw.c
299
attr_type_mask = READ_ONCE(sqe->attr_type_mask);
io_uring/rw.c
307
attr_ptr = READ_ONCE(sqe->attr_ptr);
io_uring/rw.c
323
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
io_uring/rw.c
328
ret = __io_prep_rw(req, sqe, ddir);
io_uring/rw.c
335
int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
337
return io_prep_rw(req, sqe, ITER_DEST);
io_uring/rw.c
340
int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
342
return io_prep_rw(req, sqe, ITER_SOURCE);
io_uring/rw.c
345
static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
io_uring/rw.c
350
ret = io_prep_rw(req, sqe, ddir);
io_uring/rw.c
363
int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
365
return io_prep_rwv(req, sqe, ITER_DEST);
io_uring/rw.c
368
int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
370
return io_prep_rwv(req, sqe, ITER_SOURCE);
io_uring/rw.c
389
int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
391
return __io_prep_rw(req, sqe, ITER_DEST);
io_uring/rw.c
394
int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
396
return __io_prep_rw(req, sqe, ITER_SOURCE);
io_uring/rw.c
426
int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
430
ret = __io_prep_rw(req, sqe, ITER_DEST);
io_uring/rw.c
436
int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
440
ret = __io_prep_rw(req, sqe, ITER_SOURCE);
io_uring/rw.c
450
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/rw.c
459
ret = __io_prep_rw(req, sqe, ITER_DEST);
io_uring/rw.h
35
int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
36
int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
37
int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
38
int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
39
int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
40
int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
41
int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
42
int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/rw.h
50
int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/splice.c
110
int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/splice.c
114
sp->off_in = READ_ONCE(sqe->splice_off_in);
io_uring/splice.c
115
sp->off_out = READ_ONCE(sqe->off);
io_uring/splice.c
116
return __io_splice_prep(req, sqe);
io_uring/splice.c
29
const struct io_uring_sqe *sqe)
io_uring/splice.c
34
sp->len = READ_ONCE(sqe->len);
io_uring/splice.c
35
sp->flags = READ_ONCE(sqe->splice_flags);
io_uring/splice.c
38
sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
io_uring/splice.c
44
int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/splice.c
46
if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
io_uring/splice.c
48
return __io_splice_prep(req, sqe);
io_uring/splice.h
3
int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/splice.h
7
int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/statx.c
23
int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/statx.c
29
if (sqe->buf_index || sqe->splice_fd_in)
io_uring/statx.c
34
sx->dfd = READ_ONCE(sqe->fd);
io_uring/statx.c
35
sx->mask = READ_ONCE(sqe->len);
io_uring/statx.c
36
path = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/statx.c
37
sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/statx.c
38
sx->flags = READ_ONCE(sqe->statx_flags);
io_uring/statx.h
3
int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/sync.c
25
int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/sync.c
29
if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
io_uring/sync.c
32
sync->off = READ_ONCE(sqe->off);
io_uring/sync.c
33
sync->len = READ_ONCE(sqe->len);
io_uring/sync.c
34
sync->flags = READ_ONCE(sqe->sync_range_flags);
io_uring/sync.c
53
int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/sync.c
57
if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in))
io_uring/sync.c
60
sync->flags = READ_ONCE(sqe->fsync_flags);
io_uring/sync.c
64
sync->off = READ_ONCE(sqe->off);
io_uring/sync.c
67
sync->len = READ_ONCE(sqe->len);
io_uring/sync.c
87
int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/sync.c
91
if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
io_uring/sync.c
94
sync->off = READ_ONCE(sqe->off);
io_uring/sync.c
95
sync->len = READ_ONCE(sqe->addr);
io_uring/sync.c
96
sync->mode = READ_ONCE(sqe->len);
io_uring/sync.h
10
int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/sync.h
3
int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/sync.h
6
int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/timeout.c
446
int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/timeout.c
452
if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
io_uring/timeout.c
456
tr->addr = READ_ONCE(sqe->addr);
io_uring/timeout.c
457
tr->flags = READ_ONCE(sqe->timeout_flags);
io_uring/timeout.c
465
if (get_timespec64(&tr->ts, u64_to_user_ptr(READ_ONCE(sqe->addr2))))
io_uring/timeout.c
516
const struct io_uring_sqe *sqe,
io_uring/timeout.c
522
u32 off = READ_ONCE(sqe->off);
io_uring/timeout.c
524
if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
io_uring/timeout.c
528
flags = READ_ONCE(sqe->timeout_flags);
io_uring/timeout.c
560
if (get_timespec64(&data->ts, u64_to_user_ptr(READ_ONCE(sqe->addr))))
io_uring/timeout.c
585
int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/timeout.c
587
return __io_timeout_prep(req, sqe, false);
io_uring/timeout.c
590
int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/timeout.c
592
return __io_timeout_prep(req, sqe, true);
io_uring/timeout.h
19
int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/timeout.h
20
int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/timeout.h
22
int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/truncate.c
23
int io_ftruncate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/truncate.c
27
if (sqe->rw_flags || sqe->addr || sqe->len || sqe->buf_index ||
io_uring/truncate.c
28
sqe->splice_fd_in || sqe->addr3)
io_uring/truncate.c
31
ft->len = READ_ONCE(sqe->off);
io_uring/truncate.h
3
int io_ftruncate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/uring_cmd.c
184
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/uring_cmd.c
189
if (sqe->__pad1)
io_uring/uring_cmd.c
192
ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
io_uring/uring_cmd.c
199
req->buf_index = READ_ONCE(sqe->buf_index);
io_uring/uring_cmd.c
206
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
io_uring/uring_cmd.c
211
ioucmd->sqe = sqe;
io_uring/uring_cmd.c
233
if (WARN_ON_ONCE(ioucmd->sqe == ac->sqes))
io_uring/uring_cmd.c
235
memcpy(ac->sqes, ioucmd->sqe, uring_sqe_size(req));
io_uring/uring_cmd.c
236
ioucmd->sqe = ac->sqes;
io_uring/uring_cmd.c
39
ioucmd->sqe = NULL;
io_uring/uring_cmd.h
12
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/waitid.c
260
int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/waitid.c
265
if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
io_uring/waitid.c
273
iw->which = READ_ONCE(sqe->len);
io_uring/waitid.c
274
iw->upid = READ_ONCE(sqe->fd);
io_uring/waitid.c
275
iw->options = READ_ONCE(sqe->file_index);
io_uring/waitid.c
277
iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/waitid.h
10
int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/xattr.c
123
const struct io_uring_sqe *sqe)
io_uring/xattr.c
130
name = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/xattr.c
131
ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/xattr.c
133
ix->ctx.size = READ_ONCE(sqe->len);
io_uring/xattr.c
134
ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
io_uring/xattr.c
151
int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/xattr.c
160
ret = __io_setxattr_prep(req, sqe);
io_uring/xattr.c
164
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
io_uring/xattr.c
169
int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/xattr.c
171
return __io_setxattr_prep(req, sqe);
io_uring/xattr.c
43
const struct io_uring_sqe *sqe)
io_uring/xattr.c
51
name = u64_to_user_ptr(READ_ONCE(sqe->addr));
io_uring/xattr.c
52
ix->ctx.value = u64_to_user_ptr(READ_ONCE(sqe->addr2));
io_uring/xattr.c
53
ix->ctx.size = READ_ONCE(sqe->len);
io_uring/xattr.c
54
ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
io_uring/xattr.c
74
int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/xattr.c
76
return __io_getxattr_prep(req, sqe);
io_uring/xattr.c
79
int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
io_uring/xattr.c
88
ret = __io_getxattr_prep(req, sqe);
io_uring/xattr.c
92
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
io_uring/xattr.h
11
int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/xattr.h
14
int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/xattr.h
5
int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/xattr.h
8
int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
io_uring/zcrx.h
107
int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
tools/include/io_uring/mini_liburing.h
259
static inline void io_uring_prep_cmd(struct io_uring_sqe *sqe, int op,
tools/include/io_uring/mini_liburing.h
265
memset(sqe, 0, sizeof(*sqe));
tools/include/io_uring/mini_liburing.h
266
sqe->opcode = (__u8)IORING_OP_URING_CMD;
tools/include/io_uring/mini_liburing.h
267
sqe->fd = sockfd;
tools/include/io_uring/mini_liburing.h
268
sqe->cmd_op = op;
tools/include/io_uring/mini_liburing.h
270
sqe->level = level;
tools/include/io_uring/mini_liburing.h
271
sqe->optname = optname;
tools/include/io_uring/mini_liburing.h
272
sqe->optval = (unsigned long long)optval;
tools/include/io_uring/mini_liburing.h
273
sqe->optlen = optlen;
tools/include/io_uring/mini_liburing.h
287
static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
tools/include/io_uring/mini_liburing.h
290
memset(sqe, 0, sizeof(*sqe));
tools/include/io_uring/mini_liburing.h
291
sqe->opcode = (__u8)IORING_OP_SEND;
tools/include/io_uring/mini_liburing.h
292
sqe->fd = sockfd;
tools/include/io_uring/mini_liburing.h
293
sqe->addr = (unsigned long)buf;
tools/include/io_uring/mini_liburing.h
294
sqe->len = len;
tools/include/io_uring/mini_liburing.h
295
sqe->msg_flags = (__u32)flags;
tools/include/io_uring/mini_liburing.h
298
static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd,
tools/include/io_uring/mini_liburing.h
302
io_uring_prep_send(sqe, sockfd, buf, len, flags);
tools/include/io_uring/mini_liburing.h
303
sqe->opcode = (__u8)IORING_OP_SEND_ZC;
tools/include/io_uring/mini_liburing.h
304
sqe->ioprio = zc_flags;
tools/testing/selftests/bpf/prog_tests/sockopt.c
1004
sqe = io_uring_get_sqe(&ring);
tools/testing/selftests/bpf/prog_tests/sockopt.c
1005
if (!ASSERT_NEQ(sqe, NULL, "Get an SQE")) {
tools/testing/selftests/bpf/prog_tests/sockopt.c
1010
io_uring_prep_cmd(sqe, op, fd, level, optname, optval, optlen);
tools/testing/selftests/bpf/prog_tests/sockopt.c
996
struct io_uring_sqe *sqe;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
223
struct io_uring_sqe *sqe;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
225
sqe = io_uring_get_sqe(ring);
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
227
io_uring_prep_accept(sqe, sockfd, NULL, NULL, 0);
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
228
sqe->user_data = 1;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
233
struct io_uring_sqe *sqe;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
235
sqe = io_uring_get_sqe(ring);
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
237
io_uring_prep_rw(IORING_OP_RECV_ZC, sqe, sockfd, NULL, 0, 0);
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
238
sqe->ioprio |= IORING_RECV_MULTISHOT;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
239
sqe->user_data = 2;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
244
struct io_uring_sqe *sqe;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
246
sqe = io_uring_get_sqe(ring);
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
248
io_uring_prep_rw(IORING_OP_RECV_ZC, sqe, sockfd, NULL, len, 0);
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
249
sqe->ioprio |= IORING_RECV_MULTISHOT;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
250
sqe->user_data = 2;
tools/testing/selftests/drivers/net/hw/iou-zcrx.c
272
struct io_uring_sqe *sqe;
tools/testing/selftests/mm/cow.c
415
struct io_uring_sqe *sqe;
tools/testing/selftests/mm/cow.c
517
sqe = io_uring_get_sqe(&ring);
tools/testing/selftests/mm/cow.c
518
if (!sqe) {
tools/testing/selftests/mm/cow.c
523
io_uring_prep_write_fixed(sqe, fd, mem, size, 0, 0);
tools/testing/selftests/net/io_uring_zerocopy_tx.c
132
sqe = io_uring_get_sqe(&ring);
tools/testing/selftests/net/io_uring_zerocopy_tx.c
135
io_uring_prep_send(sqe, fd, payload,
tools/testing/selftests/net/io_uring_zerocopy_tx.c
137
sqe->user_data = NONZC_TAG;
tools/testing/selftests/net/io_uring_zerocopy_tx.c
139
io_uring_prep_sendzc(sqe, fd, payload,
tools/testing/selftests/net/io_uring_zerocopy_tx.c
143
sqe->ioprio |= IORING_RECVSEND_FIXED_BUF;
tools/testing/selftests/net/io_uring_zerocopy_tx.c
144
sqe->buf_index = buf_idx;
tools/testing/selftests/net/io_uring_zerocopy_tx.c
146
sqe->user_data = ZC_TAG;
tools/testing/selftests/net/io_uring_zerocopy_tx.c
96
struct io_uring_sqe *sqe;
tools/testing/selftests/ublk/batch.c
230
struct io_uring_sqe *sqe, unsigned op,
tools/testing/selftests/ublk/batch.c
238
cmd = (struct ublk_batch_io *)ublk_get_sqe_cmd(sqe);
tools/testing/selftests/ublk/batch.c
240
ublk_set_sqe_cmd_op(sqe, op);
tools/testing/selftests/ublk/batch.c
242
sqe->fd = 0; /* dev->fds[0] */
tools/testing/selftests/ublk/batch.c
243
sqe->opcode = IORING_OP_URING_CMD;
tools/testing/selftests/ublk/batch.c
244
sqe->flags = IOSQE_FIXED_FILE;
tools/testing/selftests/ublk/batch.c
253
io_uring_sqe_set_data64(sqe, user_data);
tools/testing/selftests/ublk/batch.c
266
struct io_uring_sqe *sqe,
tools/testing/selftests/ublk/batch.c
271
cmd = (struct ublk_batch_io *)ublk_get_sqe_cmd(sqe);
tools/testing/selftests/ublk/batch.c
282
struct io_uring_sqe *sqe;
tools/testing/selftests/ublk/batch.c
289
ublk_io_alloc_sqes(t, &sqe, 1);
tools/testing/selftests/ublk/batch.c
291
ublk_init_batch_cmd(t, q->q_id, sqe, UBLK_U_IO_FETCH_IO_CMDS, 2, nr_elem,
tools/testing/selftests/ublk/batch.c
294
sqe->rw_flags= IORING_URING_CMD_MULTISHOT;
tools/testing/selftests/ublk/batch.c
295
sqe->buf_group = buf_idx;
tools/testing/selftests/ublk/batch.c
296
sqe->flags |= IOSQE_BUFFER_SELECT;
tools/testing/selftests/ublk/batch.c
358
struct io_uring_sqe *sqe;
tools/testing/selftests/ublk/batch.c
364
ublk_io_alloc_sqes(t, &sqe, 1);
tools/testing/selftests/ublk/batch.c
382
sqe->addr = (__u64)buf;
tools/testing/selftests/ublk/batch.c
383
sqe->len = t->commit_buf_elem_size * nr_elem;
tools/testing/selftests/ublk/batch.c
385
ublk_init_batch_cmd(t, q->q_id, sqe, UBLK_U_IO_PREP_IO_CMDS,
tools/testing/selftests/ublk/batch.c
387
ublk_setup_commit_sqe(t, sqe, buf_idx);
tools/testing/selftests/ublk/batch.c
457
struct io_uring_sqe *sqe;
tools/testing/selftests/ublk/batch.c
467
ublk_io_alloc_sqes(t, &sqe, 1);
tools/testing/selftests/ublk/batch.c
469
sqe->addr = (__u64)cb->elem;
tools/testing/selftests/ublk/batch.c
470
sqe->len = nr_elem * t->commit_buf_elem_size;
tools/testing/selftests/ublk/batch.c
473
ublk_init_batch_cmd(t, cb->q_id, sqe, UBLK_U_IO_COMMIT_IO_CMDS,
tools/testing/selftests/ublk/batch.c
475
ublk_setup_commit_sqe(t, sqe, buf_idx);
tools/testing/selftests/ublk/fault_inject.c
46
struct io_uring_sqe *sqe;
tools/testing/selftests/ublk/fault_inject.c
51
ublk_io_alloc_sqes(t, &sqe, 1);
tools/testing/selftests/ublk/fault_inject.c
52
io_uring_prep_timeout(sqe, &ts, 1, 0);
tools/testing/selftests/ublk/fault_inject.c
53
sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
20
struct io_uring_sqe *sqe[1];
tools/testing/selftests/ublk/file_backed.c
22
ublk_io_alloc_sqes(t, sqe, 1);
tools/testing/selftests/ublk/file_backed.c
23
io_uring_prep_fsync(sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/, IORING_FSYNC_DATASYNC);
tools/testing/selftests/ublk/file_backed.c
24
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
tools/testing/selftests/ublk/file_backed.c
26
sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
40
struct io_uring_sqe *sqe[3];
tools/testing/selftests/ublk/file_backed.c
45
ublk_io_alloc_sqes(t, sqe, 1);
tools/testing/selftests/ublk/file_backed.c
47
io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 2),
tools/testing/selftests/ublk/file_backed.c
51
sqe[0]->flags = IOSQE_FIXED_FILE;
tools/testing/selftests/ublk/file_backed.c
53
sqe[0]->user_data = build_user_data(tag, ublk_op, 1, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
57
ublk_io_alloc_sqes(t, sqe, 1);
tools/testing/selftests/ublk/file_backed.c
58
if (!sqe[0])
tools/testing/selftests/ublk/file_backed.c
61
io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/,
tools/testing/selftests/ublk/file_backed.c
66
sqe[0]->buf_index = buf_index;
tools/testing/selftests/ublk/file_backed.c
67
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
tools/testing/selftests/ublk/file_backed.c
69
sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
73
ublk_io_alloc_sqes(t, sqe, 3);
tools/testing/selftests/ublk/file_backed.c
75
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_index);
tools/testing/selftests/ublk/file_backed.c
76
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
tools/testing/selftests/ublk/file_backed.c
77
sqe[0]->user_data = build_user_data(tag,
tools/testing/selftests/ublk/file_backed.c
78
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
80
io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0,
tools/testing/selftests/ublk/file_backed.c
83
sqe[1]->buf_index = buf_index;
tools/testing/selftests/ublk/file_backed.c
84
sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
tools/testing/selftests/ublk/file_backed.c
85
sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
87
io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_index);
tools/testing/selftests/ublk/file_backed.c
88
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/kublk.c
45
struct io_uring_sqe *sqe,
tools/testing/selftests/ublk/kublk.c
49
struct ublksrv_ctrl_cmd *cmd = (struct ublksrv_ctrl_cmd *)ublk_get_sqe_cmd(sqe);
tools/testing/selftests/ublk/kublk.c
51
sqe->fd = dev->ctrl_fd;
tools/testing/selftests/ublk/kublk.c
52
sqe->opcode = IORING_OP_URING_CMD;
tools/testing/selftests/ublk/kublk.c
53
sqe->ioprio = 0;
tools/testing/selftests/ublk/kublk.c
635
struct io_uring_sqe *sqe,
tools/testing/selftests/ublk/kublk.c
648
sqe->addr = ublk_auto_buf_reg_to_sqe_addr(&buf);
tools/testing/selftests/ublk/kublk.c
66
ublk_set_sqe_cmd_op(sqe, data->cmd_op);
tools/testing/selftests/ublk/kublk.c
68
io_uring_sqe_set_data(sqe, cmd);
tools/testing/selftests/ublk/kublk.c
701
struct io_uring_sqe *sqe[1];
tools/testing/selftests/ublk/kublk.c
730
ublk_io_alloc_sqes(t, sqe, 1);
tools/testing/selftests/ublk/kublk.c
731
if (!sqe[0]) {
tools/testing/selftests/ublk/kublk.c
737
cmd = (struct ublksrv_io_cmd *)ublk_get_sqe_cmd(sqe[0]);
tools/testing/selftests/ublk/kublk.c
74
struct io_uring_sqe *sqe;
tools/testing/selftests/ublk/kublk.c
743
ublk_set_sqe_cmd_op(sqe[0], cmd_op);
tools/testing/selftests/ublk/kublk.c
744
sqe[0]->fd = ublk_get_registered_fd(q, 0); /* dev->fds[0] */
tools/testing/selftests/ublk/kublk.c
745
sqe[0]->opcode = IORING_OP_URING_CMD;
tools/testing/selftests/ublk/kublk.c
747
sqe[0]->flags = 0; /* Use raw FD, not fixed file */
tools/testing/selftests/ublk/kublk.c
749
sqe[0]->flags = IOSQE_FIXED_FILE;
tools/testing/selftests/ublk/kublk.c
750
sqe[0]->rw_flags = 0;
tools/testing/selftests/ublk/kublk.c
759
ublk_set_auto_buf_reg(t, q, sqe[0], io->tag);
tools/testing/selftests/ublk/kublk.c
762
io_uring_sqe_set_data64(sqe[0], user_data);
tools/testing/selftests/ublk/kublk.c
78
sqe = io_uring_get_sqe(&dev->ring);
tools/testing/selftests/ublk/kublk.c
79
if (!sqe) {
tools/testing/selftests/ublk/kublk.c
84
ublk_ctrl_init_cmd(dev, sqe, data);
tools/testing/selftests/ublk/kublk.h
405
static inline void __io_uring_prep_buf_reg_unreg(struct io_uring_sqe *sqe,
tools/testing/selftests/ublk/kublk.h
408
struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
tools/testing/selftests/ublk/kublk.h
411
io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
tools/testing/selftests/ublk/kublk.h
412
sqe->opcode = IORING_OP_URING_CMD;
tools/testing/selftests/ublk/kublk.h
414
sqe->flags &= ~IOSQE_FIXED_FILE;
tools/testing/selftests/ublk/kublk.h
416
sqe->flags |= IOSQE_FIXED_FILE;
tools/testing/selftests/ublk/kublk.h
423
static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
tools/testing/selftests/ublk/kublk.h
426
__io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
tools/testing/selftests/ublk/kublk.h
427
sqe->cmd_op = UBLK_U_IO_REGISTER_IO_BUF;
tools/testing/selftests/ublk/kublk.h
430
static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
tools/testing/selftests/ublk/kublk.h
433
__io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
tools/testing/selftests/ublk/kublk.h
434
sqe->cmd_op = UBLK_U_IO_UNREGISTER_IO_BUF;
tools/testing/selftests/ublk/kublk.h
437
static inline void *ublk_get_sqe_cmd(const struct io_uring_sqe *sqe)
tools/testing/selftests/ublk/kublk.h
439
return (void *)&sqe->cmd;
tools/testing/selftests/ublk/kublk.h
463
static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
tools/testing/selftests/ublk/kublk.h
465
__u32 *addr = (__u32 *)&sqe->off;
tools/testing/selftests/ublk/null.c
47
struct io_uring_sqe *sqe, int q_id, unsigned buf_idx)
tools/testing/selftests/ublk/null.c
51
io_uring_prep_nop(sqe);
tools/testing/selftests/ublk/null.c
52
sqe->buf_index = buf_idx;
tools/testing/selftests/ublk/null.c
53
sqe->flags |= IOSQE_FIXED_FILE;
tools/testing/selftests/ublk/null.c
54
sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT;
tools/testing/selftests/ublk/null.c
55
sqe->len = iod->nr_sectors << 9; /* injected result */
tools/testing/selftests/ublk/null.c
56
sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1);
tools/testing/selftests/ublk/null.c
63
struct io_uring_sqe *sqe[3];
tools/testing/selftests/ublk/null.c
66
ublk_io_alloc_sqes(t, sqe, 3);
tools/testing/selftests/ublk/null.c
68
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
tools/testing/selftests/ublk/null.c
69
sqe[0]->user_data = build_user_data(tag,
tools/testing/selftests/ublk/null.c
70
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/null.c
71
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
tools/testing/selftests/ublk/null.c
73
__setup_nop_io(tag, iod, sqe[1], q->q_id, buf_idx);
tools/testing/selftests/ublk/null.c
74
sqe[1]->flags |= IOSQE_IO_HARDLINK;
tools/testing/selftests/ublk/null.c
76
io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_idx);
tools/testing/selftests/ublk/null.c
77
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/null.c
87
struct io_uring_sqe *sqe[1];
tools/testing/selftests/ublk/null.c
89
ublk_io_alloc_sqes(t, sqe, 1);
tools/testing/selftests/ublk/null.c
90
__setup_nop_io(tag, iod, sqe[0], q->q_id, ublk_io_buf_idx(t, q, tag));
tools/testing/selftests/ublk/stripe.c
133
struct io_uring_sqe *sqe[NR_STRIPE];
tools/testing/selftests/ublk/stripe.c
143
ublk_io_alloc_sqes(t, sqe, s->nr + extra);
tools/testing/selftests/ublk/stripe.c
146
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
tools/testing/selftests/ublk/stripe.c
147
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
tools/testing/selftests/ublk/stripe.c
148
sqe[0]->user_data = build_user_data(tag,
tools/testing/selftests/ublk/stripe.c
149
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/stripe.c
155
io_uring_prep_rw(op, sqe[i],
tools/testing/selftests/ublk/stripe.c
160
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
tools/testing/selftests/ublk/stripe.c
162
sqe[i]->buf_index = buf_idx;
tools/testing/selftests/ublk/stripe.c
164
sqe[i]->flags |= IOSQE_IO_HARDLINK;
tools/testing/selftests/ublk/stripe.c
167
sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, q->q_id, 1);
tools/testing/selftests/ublk/stripe.c
170
struct io_uring_sqe *unreg = sqe[s->nr + 1];
tools/testing/selftests/ublk/stripe.c
185
struct io_uring_sqe *sqe[NR_STRIPE];
tools/testing/selftests/ublk/stripe.c
188
ublk_io_alloc_sqes(t, sqe, conf->nr_files);
tools/testing/selftests/ublk/stripe.c
190
io_uring_prep_fsync(sqe[i], i + 1, IORING_FSYNC_DATASYNC);
tools/testing/selftests/ublk/stripe.c
191
io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
tools/testing/selftests/ublk/stripe.c
192
sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, q->q_id, 1);
tools/testing/selftests/x86/lam.c
610
struct io_uring_sqe *sqe;
tools/testing/selftests/x86/lam.c
640
sqe = &ring->sq_ring.queue.sqes[index];
tools/testing/selftests/x86/lam.c
641
sqe->fd = file_fd;
tools/testing/selftests/x86/lam.c
642
sqe->flags = 0;
tools/testing/selftests/x86/lam.c
643
sqe->opcode = IORING_OP_READV;
tools/testing/selftests/x86/lam.c
644
sqe->addr = (unsigned long)fi->iovecs;
tools/testing/selftests/x86/lam.c
645
sqe->len = blocks;
tools/testing/selftests/x86/lam.c
646
sqe->off = 0;
tools/testing/selftests/x86/lam.c
647
sqe->user_data = (uint64_t)fi;
tools/testing/vsock/vsock_uring_test.c
143
struct io_uring_sqe *sqe;
tools/testing/vsock/vsock_uring_test.c
147
sqe = io_uring_get_sqe(&ring);
tools/testing/vsock/vsock_uring_test.c
151
io_uring_prep_readv(sqe, fd, &iovec, 1, 0);
tools/testing/vsock/vsock_uring_test.c
62
struct io_uring_sqe *sqe;
tools/testing/vsock/vsock_uring_test.c
89
sqe = io_uring_get_sqe(&ring);
tools/testing/vsock/vsock_uring_test.c
92
io_uring_prep_sendmsg_zc(sqe, fd, &msg, 0);
tools/testing/vsock/vsock_uring_test.c
94
io_uring_prep_sendmsg(sqe, fd, &msg, 0);